diff --git a/Cargo.lock b/Cargo.lock index 047df324f3..bcb1de97ee 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -377,6 +377,16 @@ version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e22d1f4b888c298a027c99dc9048015fac177587de20fc30232a057dfbe24a21" +[[package]] +name = "async-attributes" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3203e79f4dd9bdda415ed03cf14dae5a2bf775c683a00f94e9cd1faf0f596e5" +dependencies = [ + "quote 1.0.28", + "syn 1.0.107", +] + [[package]] name = "async-channel" version = "1.8.0" @@ -428,6 +438,7 @@ dependencies = [ "blocking", "futures-lite", "once_cell", + "tokio", ] [[package]] @@ -466,6 +477,7 @@ version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "62565bb4402e926b29953c785397c6dc0391b7b446e45008b0049eb43cec6f5d" dependencies = [ + "async-attributes", "async-channel", "async-global-executor", "async-io", @@ -9255,6 +9267,7 @@ name = "starcoin-chain" version = "1.13.7" dependencies = [ "anyhow", + "async-std", "bcs-ext", "clap 3.2.23", "proptest", @@ -9271,6 +9284,7 @@ dependencies = [ "starcoin-crypto", "starcoin-dag", "starcoin-executor", + "starcoin-flexidag", "starcoin-genesis", "starcoin-logger", "starcoin-network-rpc-api", @@ -9360,9 +9374,10 @@ dependencies = [ [[package]] name = "starcoin-chain-service" -version = "1.13.7" +version = "1.13.8" dependencies = [ "anyhow", + "async-std", "async-trait", "futures 0.3.26", "rand 0.8.5", @@ -9547,7 +9562,7 @@ dependencies = [ [[package]] name = "starcoin-dag" -version = "1.13.7" +version = "1.13.8" dependencies = [ "anyhow", "bcs-ext", @@ -9715,6 +9730,27 @@ dependencies = [ "tokio-executor 0.2.0-alpha.6", ] +[[package]] +name = "starcoin-flexidag" +version = "1.13.7" +dependencies = [ + "anyhow", + "async-trait", + "bcs-ext", + "futures 0.3.26", + "starcoin-accumulator", + "starcoin-config", + "starcoin-consensus", + "starcoin-crypto", + "starcoin-dag", + "starcoin-logger", + "starcoin-service-registry", + "starcoin-storage", + "starcoin-types", + "thiserror", + "tokio", +] + [[package]] name = "starcoin-framework" version = "11.0.0" @@ -10774,6 +10810,7 @@ dependencies = [ "starcoin-crypto", "starcoin-dag", "starcoin-executor", + "starcoin-flexidag", "starcoin-genesis", "starcoin-logger", "starcoin-metrics", @@ -10801,6 +10838,7 @@ dependencies = [ "sysinfo", "test-helper", "thiserror", + "timeout-join-handler", "tokio", ] diff --git a/Cargo.toml b/Cargo.toml index fd3a95886b..83132d5568 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,7 +1,6 @@ [workspace] resolver = "2" members = [ - "consensus/dag", "benchmarks", "commons/stest", "commons/bcs_ext", @@ -112,10 +111,11 @@ members = [ "cmd/miner_client/api", "cmd/db-exporter", "cmd/genesis-nft-miner", + "flexidag", + "flexidag/dag", ] default-members = [ - "consensus/dag", "benchmarks", "commons/stest", "commons/bcs_ext", @@ -219,6 +219,8 @@ default-members = [ "stratum", "cmd/miner_client/api", "cmd/db-exporter", + "flexidag", + "flexidag/dag", ] [profile.dev] @@ -248,7 +250,7 @@ api-limiter = { path = "commons/api-limiter" } arc-swap = "1.5.1" arrayref = "0.3" ascii = "1.0.0" -async-std = "1.12" +async-std = { version = "1.12", features = ["attributes", "tokio1"] } async-trait = "0.1.53" asynchronous-codec = "0.5" atomic-counter = "1.0.1" @@ -259,6 +261,9 @@ bcs-ext = { path = "commons/bcs_ext" } bech32 = "0.9" bencher = "0.1.5" bitflags = "1.3.2" +faster-hex = "0.6" +indexmap = "1.9.1" +bincode = { version = "1", default-features = false } bs58 = "0.3.1" byteorder = "1.3.4" bytes = "1" @@ -500,7 +505,8 @@ starcoin-parallel-executor = { path = "vm/parallel-executor" } starcoin-transaction-benchmarks = { path = "vm/transaction-benchmarks" } starcoin-language-e2e-tests = { path = "vm/e2e-tests" } starcoin-proptest-helpers = { path = "vm/proptest-helpers" } - +starcoin-flexidag = { path = "flexidag" } +starcoin-dag = {path = "flexidag/dag"} syn = { version = "1.0.107", features = [ "full", "extra-traits", @@ -535,7 +541,7 @@ walkdir = "2.3.1" wasm-timer = "0.2" which = "4.1.0" zeroize = "1.3.0" -starcoin-dag = {path = "consensus/dag"} + [profile.release.package] starcoin-service-registry.debug = 1 starcoin-chain.debug = 1 diff --git a/account/src/account_test.rs b/account/src/account_test.rs index 6b657d6405..5e36ea2528 100644 --- a/account/src/account_test.rs +++ b/account/src/account_test.rs @@ -224,7 +224,7 @@ pub fn test_wallet_account() -> Result<()> { ); //println!("verify result is {:?}", sign.verify(&raw_txn, &public_key)?); println!("public key is {:?}", public_key.to_bytes().as_ref()); - println!("hash value is {:?}", &hash_value); + println!("hash value is {:?}", hash_value); println!("key is {:?}", key.derived_address()); println!("address is {:?},result is {:?}", address, result); diff --git a/benchmarks/src/chain.rs b/benchmarks/src/chain.rs index f16fc23c28..ee9760eb0b 100644 --- a/benchmarks/src/chain.rs +++ b/benchmarks/src/chain.rs @@ -9,6 +9,7 @@ use starcoin_chain::BlockChain; use starcoin_chain::{ChainReader, ChainWriter}; use starcoin_config::{temp_dir, ChainNetwork, DataDirPath, RocksdbConfig}; use starcoin_consensus::Consensus; +use starcoin_dag::blockdag::BlockDAG; use starcoin_genesis::Genesis; use starcoin_storage::cache_storage::CacheStorage; use starcoin_storage::db_storage::DBStorage; diff --git a/block-relayer/src/block_relayer.rs b/block-relayer/src/block_relayer.rs index d8d791051c..6f066818b6 100644 --- a/block-relayer/src/block_relayer.rs +++ b/block-relayer/src/block_relayer.rs @@ -203,7 +203,9 @@ impl BlockRelayer { ctx: &mut ServiceContext, ) -> Result<()> { let network = ctx.get_shared::()?; - let block_connector_service = ctx.service_ref::()?.clone(); + let block_connector_service = ctx + .service_ref::>()? + .clone(); let txpool = self.txpool.clone(); let metrics = self.metrics.clone(); let fut = async move { @@ -277,7 +279,7 @@ impl EventHandler for BlockRelayer { fn handle_event(&mut self, event: NewHeadBlock, ctx: &mut ServiceContext) { debug!( "[block-relay] Handle new head block event, block_id: {:?}", - event.0.block().id() + event.executed_block.block().id() ); let network = match ctx.get_shared::() { Ok(network) => network, @@ -286,7 +288,7 @@ impl EventHandler for BlockRelayer { return; } }; - self.broadcast_compact_block(network, event.0); + self.broadcast_compact_block(network, event.executed_block); } } diff --git a/chain/Cargo.toml b/chain/Cargo.toml index a42b10c4e4..88674327d0 100644 --- a/chain/Cargo.toml +++ b/chain/Cargo.toml @@ -24,7 +24,10 @@ starcoin-vm-types = { workspace = true } starcoin-storage = { workspace = true } thiserror = { workspace = true } starcoin-network-rpc-api = { workspace = true } -starcoin-dag = {workspace = true} +async-std = { workspace = true } +starcoin-flexidag ={ workspace = true } +starcoin-dag ={ workspace = true } + [dev-dependencies] proptest = { workspace = true } proptest-derive = { workspace = true } diff --git a/chain/api/Cargo.toml b/chain/api/Cargo.toml index 1648fcdee5..094c6edcb8 100644 --- a/chain/api/Cargo.toml +++ b/chain/api/Cargo.toml @@ -18,7 +18,6 @@ thiserror = { workspace = true } starcoin-network-rpc-api = { workspace = true } starcoin-config = { workspace = true } - [dev-dependencies] [features] diff --git a/chain/api/src/chain.rs b/chain/api/src/chain.rs index 2a2ada21de..8d48e0e324 100644 --- a/chain/api/src/chain.rs +++ b/chain/api/src/chain.rs @@ -2,6 +2,7 @@ // SPDX-License-Identifier: Apache-2 use anyhow::Result; +use starcoin_config::ChainNetworkID; use starcoin_crypto::HashValue; use starcoin_state_api::ChainStateReader; use starcoin_statedb::ChainStateDB; @@ -102,6 +103,7 @@ pub trait ChainReader { ) -> Result>; fn current_tips_hash(&self) -> Result>>; + fn has_dag_block(&self, hash: HashValue) -> Result; } pub trait ChainWriter { diff --git a/chain/api/src/message.rs b/chain/api/src/message.rs index d4144fe9a0..17ae4cda86 100644 --- a/chain/api/src/message.rs +++ b/chain/api/src/message.rs @@ -60,6 +60,9 @@ pub enum ChainRequest { access_path: Option, }, GetBlockInfos(Vec), + GetDagBlockChildren { + block_ids: Vec, + } } impl ServiceRequest for ChainRequest { diff --git a/chain/api/src/service.rs b/chain/api/src/service.rs index 8ba6adce0e..c1c9ba16a2 100644 --- a/chain/api/src/service.rs +++ b/chain/api/src/service.rs @@ -72,6 +72,7 @@ pub trait ReadableChainService { ) -> Result>; fn get_block_infos(&self, ids: Vec) -> Result>>; + fn get_dag_block_children(&self, ids: Vec) -> Result>; } /// Writeable block chain service trait @@ -139,6 +140,7 @@ pub trait ChainAsyncService: ) -> Result>; async fn get_block_infos(&self, hashes: Vec) -> Result>>; + async fn get_dag_block_children(&self, hashes: Vec) -> Result>; } #[async_trait::async_trait] @@ -436,4 +438,15 @@ where bail!("get block_infos error") } } + + async fn get_dag_block_children(&self, hashes: Vec) -> Result> { + let response = self.send(ChainRequest::GetDagBlockChildren { + block_ids: hashes, + }).await??; + if let ChainResponse::HashVec(children) = response { + Ok(children) + } else { + bail!("get dag block children error") + } + } } diff --git a/chain/chain-notify/src/lib.rs b/chain/chain-notify/src/lib.rs index 0cd0a22d6e..2cf26a6db4 100644 --- a/chain/chain-notify/src/lib.rs +++ b/chain/chain-notify/src/lib.rs @@ -52,8 +52,7 @@ impl EventHandler for ChainNotifyHandlerService { item: NewHeadBlock, ctx: &mut ServiceContext, ) { - let NewHeadBlock(block_detail) = item; - let block = block_detail.block(); + let block = item.executed_block.block(); // notify header. self.notify_new_block(block, ctx); // notify events diff --git a/chain/mock/src/mock_chain.rs b/chain/mock/src/mock_chain.rs index 85d923d39b..847651c4f5 100644 --- a/chain/mock/src/mock_chain.rs +++ b/chain/mock/src/mock_chain.rs @@ -19,6 +19,7 @@ pub struct MockChain { net: ChainNetwork, head: BlockChain, miner: AccountInfo, + storage: Arc, } impl MockChain { @@ -29,12 +30,12 @@ impl MockChain { let chain = BlockChain::new( net.time_service(), chain_info.head().id(), - storage, + storage.clone(), None, dag, )?; let miner = AccountInfo::random(); - Ok(Self::new_inner(net, chain, miner)) + Ok(Self::new_inner(net, chain, miner, storage)) } pub fn new_with_storage( @@ -47,20 +48,20 @@ impl MockChain { let chain = BlockChain::new( net.time_service(), head_block_hash, - storage, + storage.clone(), None, dag.clone(), )?; - Ok(Self::new_inner(net, chain, miner)) + Ok(Self::new_inner(net, chain, miner, storage)) } - pub fn new_with_chain(net: ChainNetwork, chain: BlockChain) -> Result { + pub fn new_with_chain(net: ChainNetwork, chain: BlockChain, storage: Arc) -> Result { let miner = AccountInfo::random(); - Ok(Self::new_inner(net, chain, miner)) + Ok(Self::new_inner(net, chain, miner, storage)) } - fn new_inner(net: ChainNetwork, head: BlockChain, miner: AccountInfo) -> Self { - Self { net, head, miner } + fn new_inner(net: ChainNetwork, head: BlockChain, miner: AccountInfo, storage: Arc) -> Self { + Self { net, head, miner, storage } } pub fn net(&self) -> &ChainNetwork { @@ -96,9 +97,14 @@ impl MockChain { head: chain, net: self.net.clone(), miner: AccountInfo::random(), + storage: self.storage.clone(), }) } + pub fn get_storage(&self) -> Arc { + self.storage.clone() + } + pub fn select_head(&mut self, new_block: Block) -> Result<()> { //TODO reuse WriteChainService's select_head logic. // new block should be execute and save to storage. @@ -128,14 +134,9 @@ impl MockChain { } pub fn produce(&self) -> Result { - let (template, _) = self.head.create_block_template( - *self.miner.address(), - None, - vec![], - vec![], - None, - None, - )?; + let (template, _) = + self.head + .create_block_template(*self.miner.address(), None, vec![], vec![], None, None)?; self.head .consensus() .create_block(template, self.net.time_service().as_ref()) diff --git a/chain/service/Cargo.toml b/chain/service/Cargo.toml index 75fec7a1d1..7249664812 100644 --- a/chain/service/Cargo.toml +++ b/chain/service/Cargo.toml @@ -1,5 +1,6 @@ [dependencies] anyhow = { workspace = true } +async-std = { workspace = true } async-trait = { workspace = true } futures = { workspace = true } rand = { workspace = true } @@ -36,7 +37,7 @@ edition = { workspace = true } license = { workspace = true } name = "starcoin-chain-service" publish = { workspace = true } -version = "1.13.7" +version = "1.13.8" homepage = { workspace = true } repository = { workspace = true } rust-version = { workspace = true } diff --git a/chain/service/src/chain_service.rs b/chain/service/src/chain_service.rs index 9344c1a8f0..477d966cfe 100644 --- a/chain/service/src/chain_service.rs +++ b/chain/service/src/chain_service.rs @@ -11,9 +11,8 @@ use starcoin_config::NodeConfig; use starcoin_crypto::HashValue; use starcoin_dag::blockdag::BlockDAG; use starcoin_logger::prelude::*; - use starcoin_service_registry::{ - ActorService, EventHandler, ServiceContext, ServiceFactory, ServiceHandler, + ActorService, EventHandler, ServiceContext, ServiceFactory, ServiceHandler, ServiceRef, }; use starcoin_storage::{BlockStore, Storage, Store}; use starcoin_types::block::ExecutedBlock; @@ -46,11 +45,11 @@ impl ChainReaderService { ) -> Result { Ok(Self { inner: ChainReaderServiceInner::new( - config.clone(), + config, startup_info, - storage.clone(), + storage, dag, - vm_metrics.clone(), + vm_metrics, )?, }) } @@ -63,11 +62,15 @@ impl ServiceFactory for ChainReaderService { let startup_info = storage .get_startup_info()? .ok_or_else(|| format_err!("StartupInfo should exist at service init."))?; + let dag = ctx.get_shared::()?.clone(); let vm_metrics = ctx.get_shared_opt::()?; - let dag = ctx - .get_shared_opt::()? - .expect("dag should be initialized at service init"); - Self::new(config, startup_info, storage, dag, vm_metrics) + Self::new( + config, + startup_info, + storage, + dag, + vm_metrics, + ) } } @@ -85,9 +88,14 @@ impl ActorService for ChainReaderService { impl EventHandler for ChainReaderService { fn handle_event(&mut self, event: NewHeadBlock, _ctx: &mut ServiceContext) { - let new_head = event.0.block().header(); - if let Err(e) = if self.inner.get_main().can_connect(event.0.as_ref()) { - self.inner.update_chain_head(event.0.as_ref().clone()) + let new_head = event.executed_block.block().header().clone(); + if let Err(e) = if self + .inner + .get_main() + .can_connect(event.executed_block.as_ref()) + { + self.inner + .update_chain_head(event.executed_block.as_ref().clone()) } else { self.inner.switch_main(new_head.id()) } { @@ -244,6 +252,9 @@ impl ServiceHandler for ChainReaderService { ChainRequest::GetBlockInfos(ids) => Ok(ChainResponse::BlockInfoVec(Box::new( self.inner.get_block_infos(ids)?, ))), + ChainRequest::GetDagBlockChildren { block_ids } => Ok(ChainResponse::HashVec( + self.inner.get_dag_block_children(block_ids)?, + )), } } } @@ -253,8 +264,8 @@ pub struct ChainReaderServiceInner { startup_info: StartupInfo, main: BlockChain, storage: Arc, - vm_metrics: Option, dag: BlockDAG, + vm_metrics: Option, } impl ChainReaderServiceInner { @@ -383,6 +394,7 @@ impl ReadableChainService for ChainReaderServiceInner { fn main_startup_info(&self) -> StartupInfo { self.startup_info.clone() } + fn main_blocks_by_number( &self, number: Option, @@ -433,6 +445,18 @@ impl ReadableChainService for ChainReaderServiceInner { fn get_block_infos(&self, ids: Vec) -> Result>> { self.storage.get_block_infos(ids) } + + fn get_dag_block_children(&self, ids: Vec) -> Result> { + ids.into_iter().fold(Ok(vec![]), |mut result, id| { + match self.dag.get_children(id) { + anyhow::Result::Ok(children) => { + result.as_mut().map(|r| r.extend(children)); + Ok(result?) + } + Err(e) => Err(e), + } + }) + } } #[cfg(test)] diff --git a/chain/src/chain.rs b/chain/src/chain.rs index c95b929000..20290a2792 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -3,7 +3,7 @@ use crate::verifier::{BlockVerifier, FullVerifier, NoneVerifier}; use anyhow::{bail, ensure, format_err, Ok, Result}; - +use bcs_ext::BCSCodec; use sp_utils::stop_watch::{watch, CHAIN_WATCH_NAME}; use starcoin_accumulator::inmemory::InMemoryAccumulator; use starcoin_accumulator::{ @@ -13,10 +13,12 @@ use starcoin_chain_api::{ verify_block, ChainReader, ChainWriter, ConnectBlockError, EventWithProof, ExcludedTxns, ExecutedBlock, MintedUncleNumber, TransactionInfoWithProof, VerifiedBlock, VerifyBlockField, }; +use starcoin_config::{ChainNetworkID, NodeConfig}; use starcoin_consensus::Consensus; use starcoin_crypto::hash::PlainCryptoHash; use starcoin_crypto::HashValue; use starcoin_dag::blockdag::BlockDAG; +use starcoin_dag::consensusdb::prelude::StoreError; use starcoin_executor::VMMetrics; use starcoin_logger::prelude::*; use starcoin_open_block::OpenedBlock; @@ -41,6 +43,7 @@ use starcoin_vm_types::access_path::AccessPath; use starcoin_vm_types::account_config::genesis_address; use starcoin_vm_types::genesis_config::ConsensusStrategy; use starcoin_vm_types::on_chain_resource::Epoch; +use std::backtrace; use std::cmp::min; use std::iter::Extend; use std::option::Option::{None, Some}; @@ -576,7 +579,17 @@ impl BlockChain { self.storage.save_block_info(block_info.clone())?; self.storage.save_table_infos(txn_table_infos)?; - self.dag.commit(header.to_owned())?; + let result = self.dag.commit(header.to_owned()); + match result { + anyhow::Result::Ok(_) => (), + Err(e) => { + if let Some(StoreError::KeyAlreadyExists(_)) = e.downcast_ref::() { + info!("dag block already exist, ignore"); + } else { + return Err(e); + } + } + } watch(CHAIN_WATCH_NAME, "n26"); Ok(ExecutedBlock { block, block_info }) } @@ -1114,6 +1127,10 @@ impl ChainReader for BlockChain { fn current_tips_hash(&self) -> Result>> { Ok(self.storage.get_dag_state()?.map(|state| state.tips)) } + + fn has_dag_block(&self, hash: HashValue) -> Result { + self.dag.has_dag_block(hash) + } } impl BlockChain { @@ -1291,6 +1308,7 @@ impl ChainWriter for BlockChain { fn connect(&mut self, executed_block: ExecutedBlock) -> Result { if executed_block.block.is_dag() { + info!("connect a dag block, {:?}, number: {:?}", executed_block.block.id(), executed_block.block.header().number()); return self.connect_dag(executed_block); } let (block, block_info) = (executed_block.block(), executed_block.block_info()); diff --git a/chain/src/verifier/mod.rs b/chain/src/verifier/mod.rs index d57dff7702..57f5c3496e 100644 --- a/chain/src/verifier/mod.rs +++ b/chain/src/verifier/mod.rs @@ -2,11 +2,14 @@ // SPDX-License-Identifier: Apache-2.0 use anyhow::{format_err, Result}; +use bcs_ext::BCSCodec; use sp_utils::stop_watch::{watch, CHAIN_WATCH_NAME}; use starcoin_chain_api::{ verify_block, ChainReader, ConnectBlockError, VerifiedBlock, VerifyBlockField, }; use starcoin_consensus::{Consensus, ConsensusVerifyError}; +use starcoin_crypto::hash::PlainCryptoHash; +use starcoin_crypto::HashValue; use starcoin_logger::prelude::debug; use starcoin_types::block::{Block, BlockHeader, LegacyBlockBody, ALLOWED_FUTURE_BLOCKTIME}; use std::{collections::HashSet, str::FromStr}; diff --git a/cmd/db-exporter/src/main.rs b/cmd/db-exporter/src/main.rs index 536cf8a0eb..3b008c8259 100644 --- a/cmd/db-exporter/src/main.rs +++ b/cmd/db-exporter/src/main.rs @@ -20,7 +20,7 @@ use starcoin_chain::{ use starcoin_config::{BuiltinNetworkID, ChainNetwork, RocksdbConfig}; use starcoin_consensus::Consensus; use starcoin_crypto::HashValue; -use starcoin_dag::consensusdb::prelude::FlexiDagStorageConfig; +use starcoin_dag::{blockdag::BlockDAG, consensusdb::prelude::FlexiDagStorageConfig}; use starcoin_genesis::Genesis; use starcoin_resource_viewer::{AnnotatedMoveStruct, AnnotatedMoveValue, MoveValueAnnotator}; use starcoin_statedb::{ChainStateDB, ChainStateReader, ChainStateWriter}; @@ -260,7 +260,7 @@ pub struct CheckKeyOptions { /// starcoin node db path. like ~/.starcoin/barnard/starcoindb/db/starcoindb pub db_path: PathBuf, #[clap(long, short = 'n', - possible_values = & ["block", "block_header"],)] + possible_values=&["block", "block_header"],)] pub cf_name: String, #[clap(long, short = 'b')] pub block_hash: HashValue, @@ -351,7 +351,7 @@ pub struct GenBlockTransactionsOptions { pub block_num: Option, #[clap(long, short = 't')] pub trans_num: Option, - #[clap(long, short = 'p', possible_values = & ["CreateAccount", "FixAccount", "EmptyTxn"],)] + #[clap(long, short = 'p', possible_values=&["CreateAccount", "FixAccount", "EmptyTxn"],)] /// txn type pub txn_type: Txntype, } @@ -405,9 +405,9 @@ pub struct ExportResourceOptions { pub block_hash: HashValue, #[clap( - short = 'r', - default_value = "0x1::Account::Balance<0x1::STC::STC>", - parse(try_from_str = parse_struct_tag) + short='r', + default_value = "0x1::Account::Balance<0x1::STC::STC>", + parse(try_from_str=parse_struct_tag) )] /// resource struct tag. resource_type: StructTag, diff --git a/cmd/replay/src/main.rs b/cmd/replay/src/main.rs index 896d0c2f98..0f48acc479 100644 --- a/cmd/replay/src/main.rs +++ b/cmd/replay/src/main.rs @@ -8,6 +8,7 @@ use starcoin_chain::verifier::{BasicVerifier, ConsensusVerifier, FullVerifier, N use starcoin_chain::{BlockChain, ChainReader}; use starcoin_config::RocksdbConfig; use starcoin_config::{BuiltinNetworkID, ChainNetwork}; +use starcoin_dag::blockdag::BlockDAG; use starcoin_genesis::Genesis; use starcoin_storage::cache_storage::CacheStorage; use starcoin_storage::db_storage::DBStorage; diff --git a/commons/stream-task/src/collector.rs b/commons/stream-task/src/collector.rs index 3e597fce95..cd0e317bbd 100644 --- a/commons/stream-task/src/collector.rs +++ b/commons/stream-task/src/collector.rs @@ -15,7 +15,7 @@ use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::Arc; use thiserror::Error; -#[derive(Clone, Copy, Debug)] +#[derive(Clone, Copy, Debug, PartialEq)] pub enum CollectorState { /// Collector is enough, do not feed more item, finish task. Enough, diff --git a/config/src/available_port.rs b/config/src/available_port.rs index 588b28ad81..f03bf1af60 100644 --- a/config/src/available_port.rs +++ b/config/src/available_port.rs @@ -57,7 +57,7 @@ fn get_ephemeral_port() -> ::std::io::Result { use std::net::{TcpListener, TcpStream}; // Request a random available port from the OS - let listener = TcpListener::bind(("localhost", 0))?; + let listener = TcpListener::bind(("127.0.0.1", 0))?; let addr = listener.local_addr()?; // Create and accept a connection (which we'll promptly drop) in order to force the port diff --git a/consensus/dag/Cargo.toml b/consensus/dag/Cargo.toml deleted file mode 100644 index c764c2be8f..0000000000 --- a/consensus/dag/Cargo.toml +++ /dev/null @@ -1,51 +0,0 @@ -[dependencies] -anyhow = { workspace = true } -byteorder = { workspace = true } -cryptonight-rs = { workspace = true } -futures = { workspace = true } -hex = { default-features = false, workspace = true } -once_cell = { workspace = true } -proptest = { default-features = false, optional = true, workspace = true } -proptest-derive = { default-features = false, optional = true, workspace = true } -rand = { workspace = true } -rand_core = { default-features = false, workspace = true } -rust-argon2 = { workspace = true } -sha3 = { workspace = true } -starcoin-chain-api = { workspace = true } -starcoin-crypto = { workspace = true } -starcoin-logger = { workspace = true } -starcoin-state-api = { workspace = true } -starcoin-time-service = { workspace = true } -starcoin-types = { workspace = true } -starcoin-vm-types = { workspace = true } -thiserror = { workspace = true } -rocksdb = { workspace = true } -bincode = { version = "1", default-features = false } - -serde = { workspace = true } -starcoin-storage = { workspace = true } -parking_lot = { workspace = true } -itertools = { workspace = true } -starcoin-config = { workspace = true } -bcs-ext = { workspace = true } - -[dev-dependencies] -proptest = { workspace = true } -proptest-derive = { workspace = true } -stest = { workspace = true } -tempfile = { workspace = true } - -[features] -default = [] -fuzzing = ["proptest", "proptest-derive", "starcoin-types/fuzzing"] - -[package] -authors = { workspace = true } -edition = { workspace = true } -license = { workspace = true } -name = "starcoin-dag" -publish = { workspace = true } -version = "1.13.7" -homepage = { workspace = true } -repository = { workspace = true } -rust-version = { workspace = true } diff --git a/consensus/dag/src/blockdag.rs b/consensus/dag/src/blockdag.rs deleted file mode 100644 index 33bc1711f1..0000000000 --- a/consensus/dag/src/blockdag.rs +++ /dev/null @@ -1,257 +0,0 @@ -use super::ghostdag::protocol::GhostdagManager; -use super::reachability::{inquirer, reachability_service::MTReachabilityService}; -use super::types::ghostdata::GhostdagData; -use crate::consensusdb::prelude::{FlexiDagStorageConfig, StoreError}; -use crate::consensusdb::schemadb::GhostdagStoreReader; -use crate::consensusdb::{ - prelude::FlexiDagStorage, - schemadb::{ - DbGhostdagStore, DbHeadersStore, DbReachabilityStore, DbRelationsStore, GhostdagStore, - HeaderStore, ReachabilityStoreReader, RelationsStore, RelationsStoreReader, - }, -}; -use anyhow::{bail, Ok}; -use parking_lot::RwLock; -use starcoin_config::temp_dir; -use starcoin_crypto::{HashValue as Hash, HashValue}; -use starcoin_types::block::BlockHeader; -use starcoin_types::{ - blockhash::{BlockHashes, KType}, - consensus_header::ConsensusHeader, -}; -use std::sync::Arc; - -pub type DbGhostdagManager = GhostdagManager< - DbGhostdagStore, - DbRelationsStore, - MTReachabilityService, - DbHeadersStore, ->; - -#[derive(Clone)] -pub struct BlockDAG { - pub storage: FlexiDagStorage, - ghostdag_manager: DbGhostdagManager, -} -const FLEXIDAG_K: KType = 16; -impl BlockDAG { - pub fn new(k: KType, db: FlexiDagStorage) -> Self { - let ghostdag_store = db.ghost_dag_store.clone(); - let header_store = db.header_store.clone(); - let relations_store = db.relations_store.clone(); - let reachability_store = db.reachability_store.clone(); - let reachability_service = - MTReachabilityService::new(Arc::new(RwLock::new(reachability_store))); - let ghostdag_manager = DbGhostdagManager::new( - k, - ghostdag_store.clone(), - relations_store.clone(), - header_store.clone(), - reachability_service, - ); - - Self { - ghostdag_manager, - storage: db, - } - } - - pub fn create_flexidag(db: FlexiDagStorage) -> Self { - Self::new(FLEXIDAG_K, db) - } - - pub fn create_for_testing() -> anyhow::Result { - let dag_storage = - FlexiDagStorage::create_from_path(temp_dir(), FlexiDagStorageConfig::default())?; - Ok(BlockDAG::new(16, dag_storage)) - } - - pub fn init_with_genesis(&self, genesis: BlockHeader) -> anyhow::Result<()> { - let origin = genesis.parent_hash(); - - if self.storage.relations_store.has(origin)? { - return Ok(()); - }; - inquirer::init(&mut self.storage.reachability_store.clone(), origin)?; - self.storage - .relations_store - .insert(origin, BlockHashes::new(vec![]))?; - self.commit(genesis)?; - Ok(()) - } - pub fn ghostdata(&self, parents: &[HashValue]) -> GhostdagData { - self.ghostdag_manager.ghostdag(parents) - } - - pub fn ghostdata_by_hash(&self, hash: HashValue) -> anyhow::Result>> { - match self.storage.ghost_dag_store.get_data(hash) { - Result::Ok(value) => Ok(Some(value)), - Err(StoreError::KeyNotFound(_)) => Ok(None), - Err(e) => Err(e.into()), - } - } - - pub fn commit(&self, header: BlockHeader) -> anyhow::Result<()> { - // Generate ghostdag data - let parents = header.parents(); - let ghostdata = self.ghostdata_by_hash(header.id())?.unwrap_or_else(|| { - Arc::new(if header.is_dag_genesis() { - self.ghostdag_manager.genesis_ghostdag_data(&header) - } else { - self.ghostdag_manager.ghostdag(&parents) - }) - }); - // Store ghostdata - self.storage - .ghost_dag_store - .insert(header.id(), ghostdata.clone())?; - - // Update reachability store - let mut reachability_store = self.storage.reachability_store.clone(); - let mut merge_set = ghostdata - .unordered_mergeset_without_selected_parent() - .filter(|hash| self.storage.reachability_store.has(*hash).unwrap()); - inquirer::add_block( - &mut reachability_store, - header.id(), - ghostdata.selected_parent, - &mut merge_set, - )?; - // store relations - self.storage - .relations_store - .insert(header.id(), BlockHashes::new(parents))?; - // Store header store - let _ = self - .storage - .header_store - .insert(header.id(), Arc::new(header.to_owned()), 0)?; - return Ok(()); - } - - pub fn get_parents(&self, hash: Hash) -> anyhow::Result> { - match self.storage.relations_store.get_parents(hash) { - anyhow::Result::Ok(parents) => anyhow::Result::Ok((*parents).clone()), - Err(error) => { - println!("failed to get parents by hash: {}", error.to_string()); - bail!("failed to get parents by hash: {}", error.to_string()); - } - } - } - - pub fn get_children(&self, hash: Hash) -> anyhow::Result> { - match self.storage.relations_store.get_children(hash) { - anyhow::Result::Ok(children) => anyhow::Result::Ok((*children).clone()), - Err(error) => { - println!("failed to get parents by hash: {}", error.to_string()); - bail!("failed to get parents by hash: {}", error.to_string()); - } - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::consensusdb::prelude::FlexiDagStorageConfig; - use starcoin_config::RocksdbConfig; - use starcoin_types::block::{BlockHeader, BlockHeaderBuilder}; - use std::{env, fs}; - - fn build_block_dag(k: KType) -> BlockDAG { - let db_path = env::temp_dir().join("smolstc"); - println!("db path:{}", db_path.to_string_lossy()); - if db_path - .as_path() - .try_exists() - .unwrap_or_else(|_| panic!("Failed to check {db_path:?}")) - { - fs::remove_dir_all(db_path.as_path()).expect("Failed to delete temporary directory"); - } - let config = FlexiDagStorageConfig::create_with_params(1, RocksdbConfig::default()); - let db = FlexiDagStorage::create_from_path(db_path, config) - .expect("Failed to create flexidag storage"); - let dag = BlockDAG::new(k, db); - return dag; - } - - #[test] - fn test_dag_0() { - //let dag = build_block_dag(16); - let dag = BlockDAG::create_for_testing().unwrap(); - let genesis = BlockHeader::dag_genesis_random() - .as_builder() - .with_difficulty(0.into()) - .build(); - - let mut parents_hash = vec![genesis.id()]; - dag.init_with_genesis(genesis.to_owned()).unwrap(); - - for _ in 0..10 { - let header_builder = BlockHeaderBuilder::random(); - let header = header_builder - .with_parents_hash(Some(parents_hash.clone())) - .build(); - parents_hash = vec![header.id()]; - dag.commit(header.to_owned()).unwrap(); - let ghostdata = dag.ghostdata_by_hash(header.id()).unwrap().unwrap(); - println!("{:?},{:?}", header, ghostdata); - } - } - - #[test] - fn test_dag_1() { - let genesis = BlockHeader::dag_genesis_random() - .as_builder() - .with_difficulty(0.into()) - .build(); - let block1 = BlockHeaderBuilder::random() - .with_difficulty(1.into()) - .with_parents_hash(Some(vec![genesis.id()])) - .build(); - let block2 = BlockHeaderBuilder::random() - .with_difficulty(2.into()) - .with_parents_hash(Some(vec![genesis.id()])) - .build(); - let block3_1 = BlockHeaderBuilder::random() - .with_difficulty(1.into()) - .with_parents_hash(Some(vec![genesis.id()])) - .build(); - let block3 = BlockHeaderBuilder::random() - .with_difficulty(3.into()) - .with_parents_hash(Some(vec![block3_1.id()])) - .build(); - let block4 = BlockHeaderBuilder::random() - .with_difficulty(4.into()) - .with_parents_hash(Some(vec![block1.id(), block2.id()])) - .build(); - let block5 = BlockHeaderBuilder::random() - .with_difficulty(4.into()) - .with_parents_hash(Some(vec![block2.id(), block3.id()])) - .build(); - let block6 = BlockHeaderBuilder::random() - .with_difficulty(5.into()) - .with_parents_hash(Some(vec![block4.id(), block5.id()])) - .build(); - let mut latest_id = block6.id(); - let genesis_id = genesis.id(); - let dag = build_block_dag(3); - let expect_selected_parented = vec![block5.id(), block3.id(), block3_1.id(), genesis_id]; - dag.init_with_genesis(genesis).unwrap(); - - dag.commit(block1).unwrap(); - dag.commit(block2).unwrap(); - dag.commit(block3_1).unwrap(); - dag.commit(block3).unwrap(); - dag.commit(block4).unwrap(); - dag.commit(block5).unwrap(); - dag.commit(block6).unwrap(); - let mut count = 0; - while latest_id != genesis_id && count < 4 { - let ghostdata = dag.ghostdata_by_hash(latest_id).unwrap().unwrap(); - latest_id = ghostdata.selected_parent; - assert_eq!(expect_selected_parented[count], latest_id); - count += 1; - } - } -} diff --git a/consensus/dag/src/consensusdb/access.rs b/consensus/dag/src/consensusdb/access.rs deleted file mode 100644 index 43cc9d0093..0000000000 --- a/consensus/dag/src/consensusdb/access.rs +++ /dev/null @@ -1,199 +0,0 @@ -use super::{cache::DagCache, db::DBStorage, error::StoreError}; - -use super::prelude::DbWriter; -use super::schema::{KeyCodec, Schema, ValueCodec}; -use itertools::Itertools; -use rocksdb::{Direction, IteratorMode, ReadOptions}; -use starcoin_storage::storage::RawDBStorage; -use std::{ - collections::hash_map::RandomState, error::Error, hash::BuildHasher, marker::PhantomData, - sync::Arc, -}; - -/// A concurrent DB store access with typed caching. -#[derive(Clone)] -pub struct CachedDbAccess { - db: Arc, - - // Cache - cache: DagCache, - - _phantom: PhantomData, -} - -impl CachedDbAccess -where - R: BuildHasher + Default, -{ - pub fn new(db: Arc, cache_size: usize) -> Self { - Self { - db, - cache: DagCache::new_with_capacity(cache_size), - _phantom: Default::default(), - } - } - - pub fn read_from_cache(&self, key: S::Key) -> Option { - self.cache.get(&key) - } - - pub fn has(&self, key: S::Key) -> Result { - Ok(self.cache.contains_key(&key) - || self - .db - .raw_get_pinned_cf(S::COLUMN_FAMILY, key.encode_key().unwrap()) - .map_err(|_| StoreError::CFNotExist(S::COLUMN_FAMILY.to_string()))? - .is_some()) - } - - pub fn read(&self, key: S::Key) -> Result { - if let Some(data) = self.cache.get(&key) { - Ok(data) - } else if let Some(slice) = self - .db - .raw_get_pinned_cf(S::COLUMN_FAMILY, key.encode_key().unwrap()) - .map_err(|_| StoreError::CFNotExist(S::COLUMN_FAMILY.to_string()))? - { - let data = S::Value::decode_value(slice.as_ref()) - .map_err(|o| StoreError::DecodeError(o.to_string()))?; - self.cache.insert(key, data.clone()); - Ok(data) - } else { - Err(StoreError::KeyNotFound("".to_string())) - } - } - - pub fn iterator( - &self, - ) -> Result, S::Value), Box>> + '_, StoreError> - { - let db_iterator = self - .db - .raw_iterator_cf_opt( - S::COLUMN_FAMILY, - IteratorMode::Start, - ReadOptions::default(), - ) - .map_err(|e| StoreError::CFNotExist(e.to_string()))?; - - Ok(db_iterator.map(|iter_result| match iter_result { - Ok((key, data_bytes)) => match S::Value::decode_value(&data_bytes) { - Ok(data) => Ok((key, data)), - Err(e) => Err(e.into()), - }, - Err(e) => Err(e.into()), - })) - } - - pub fn write( - &self, - mut writer: impl DbWriter, - key: S::Key, - data: S::Value, - ) -> Result<(), StoreError> { - writer.put::(&key, &data)?; - self.cache.insert(key, data); - Ok(()) - } - - pub fn write_many( - &self, - mut writer: impl DbWriter, - iter: &mut (impl Iterator + Clone), - ) -> Result<(), StoreError> { - for (key, data) in iter { - writer.put::(&key, &data)?; - self.cache.insert(key, data); - } - Ok(()) - } - - /// Write directly from an iterator and do not cache any data. NOTE: this action also clears the cache - pub fn write_many_without_cache( - &self, - mut writer: impl DbWriter, - iter: &mut impl Iterator, - ) -> Result<(), StoreError> { - for (key, data) in iter { - writer.put::(&key, &data)?; - } - // The cache must be cleared in order to avoid invalidated entries - self.cache.remove_all(); - Ok(()) - } - - pub fn delete(&self, mut writer: impl DbWriter, key: S::Key) -> Result<(), StoreError> { - self.cache.remove(&key); - writer.delete::(&key)?; - Ok(()) - } - - pub fn delete_many( - &self, - mut writer: impl DbWriter, - key_iter: &mut (impl Iterator + Clone), - ) -> Result<(), StoreError> { - let key_iter_clone = key_iter.clone(); - self.cache.remove_many(key_iter); - for key in key_iter_clone { - writer.delete::(&key)?; - } - Ok(()) - } - - pub fn delete_all(&self, mut writer: impl DbWriter) -> Result<(), StoreError> { - self.cache.remove_all(); - let keys = self - .db - .raw_iterator_cf_opt( - S::COLUMN_FAMILY, - IteratorMode::Start, - ReadOptions::default(), - ) - .map_err(|e| StoreError::CFNotExist(e.to_string()))? - .map(|iter_result| match iter_result { - Ok((key, _)) => Ok::<_, rocksdb::Error>(key), - Err(e) => Err(e), - }) - .collect_vec(); - for key in keys { - writer.delete::(&S::Key::decode_key(&key?)?)?; - } - Ok(()) - } - - /// A dynamic iterator that can iterate through a specific prefix, and from a certain start point. - //TODO: loop and chain iterators for multi-prefix iterator. - pub fn seek_iterator( - &self, - seek_from: Option, // iter whole range if None - limit: usize, // amount to take. - skip_first: bool, // skips the first value, (useful in conjunction with the seek-key, as to not re-retrieve). - ) -> Result, S::Value), Box>> + '_, StoreError> - { - let read_opts = ReadOptions::default(); - let mut db_iterator = match seek_from { - Some(seek_key) => self.db.raw_iterator_cf_opt( - S::COLUMN_FAMILY, - IteratorMode::From(seek_key.encode_key()?.as_slice(), Direction::Forward), - read_opts, - ), - None => self - .db - .raw_iterator_cf_opt(S::COLUMN_FAMILY, IteratorMode::Start, read_opts), - } - .map_err(|e| StoreError::CFNotExist(e.to_string()))?; - - if skip_first { - db_iterator.next(); - } - - Ok(db_iterator.take(limit).map(move |item| match item { - Ok((key_bytes, value_bytes)) => match S::Value::decode_value(value_bytes.as_ref()) { - Ok(value) => Ok((key_bytes, value)), - Err(err) => Err(err.into()), - }, - Err(err) => Err(err.into()), - })) - } -} diff --git a/consensus/dag/src/consensusdb/cache.rs b/consensus/dag/src/consensusdb/cache.rs deleted file mode 100644 index 51d3dda9b3..0000000000 --- a/consensus/dag/src/consensusdb/cache.rs +++ /dev/null @@ -1,44 +0,0 @@ -use core::hash::Hash; -use starcoin_storage::cache_storage::GCacheStorage; -use std::sync::Arc; - -#[derive(Clone)] -pub struct DagCache { - cache: Arc>, -} - -impl DagCache -where - K: Hash + Eq + Default, - V: Default + Clone, -{ - pub(crate) fn new_with_capacity(size: usize) -> Self { - Self { - cache: Arc::new(GCacheStorage::new_with_capacity(size, None)), - } - } - - pub(crate) fn get(&self, key: &K) -> Option { - self.cache.get_inner(key) - } - - pub(crate) fn contains_key(&self, key: &K) -> bool { - self.get(key).is_some() - } - - pub(crate) fn insert(&self, key: K, data: V) { - self.cache.put_inner(key, data); - } - - pub(crate) fn remove(&self, key: &K) { - self.cache.remove_inner(key); - } - - pub(crate) fn remove_many(&self, key_iter: &mut impl Iterator) { - key_iter.for_each(|k| self.remove(&k)); - } - - pub(crate) fn remove_all(&self) { - self.cache.remove_all(); - } -} diff --git a/consensus/dag/src/consensusdb/consensus_ghostdag.rs b/consensus/dag/src/consensusdb/consensus_ghostdag.rs deleted file mode 100644 index cf281906a0..0000000000 --- a/consensus/dag/src/consensusdb/consensus_ghostdag.rs +++ /dev/null @@ -1,512 +0,0 @@ -use super::schema::{KeyCodec, ValueCodec}; -use super::{ - db::DBStorage, - error::StoreError, - prelude::{CachedDbAccess, DirectDbWriter}, - writer::BatchDbWriter, -}; -use crate::define_schema; -use starcoin_types::blockhash::{ - BlockHashMap, BlockHashes, BlockLevel, BlueWorkType, HashKTypeMap, -}; - -use crate::types::{ - ghostdata::{CompactGhostdagData, GhostdagData}, - ordering::SortableBlock, -}; -use itertools::{ - EitherOrBoth::{Both, Left, Right}, - Itertools, -}; -use rocksdb::WriteBatch; -use starcoin_crypto::HashValue as Hash; -use std::{cell::RefCell, cmp, iter::once, sync::Arc}; - -pub trait GhostdagStoreReader { - fn get_blue_score(&self, hash: Hash) -> Result; - fn get_blue_work(&self, hash: Hash) -> Result; - fn get_selected_parent(&self, hash: Hash) -> Result; - fn get_mergeset_blues(&self, hash: Hash) -> Result; - fn get_mergeset_reds(&self, hash: Hash) -> Result; - fn get_blues_anticone_sizes(&self, hash: Hash) -> Result; - - /// Returns full block data for the requested hash - fn get_data(&self, hash: Hash) -> Result, StoreError>; - - fn get_compact_data(&self, hash: Hash) -> Result; - - /// Check if the store contains data for the requested hash - fn has(&self, hash: Hash) -> Result; -} - -pub trait GhostdagStore: GhostdagStoreReader { - /// Insert GHOSTDAG data for block `hash` into the store. Note that GHOSTDAG data - /// is added once and never modified, so no need for specific setters for each element. - /// Additionally, this means writes are semantically "append-only", which is why - /// we can keep the `insert` method non-mutable on self. See "Parallel Processing.md" for an overview. - fn insert(&self, hash: Hash, data: Arc) -> Result<(), StoreError>; -} - -pub struct GhostDagDataWrapper(GhostdagData); - -impl From for GhostDagDataWrapper { - fn from(value: GhostdagData) -> Self { - Self(value) - } -} - -impl GhostDagDataWrapper { - /// Returns an iterator to the mergeset in ascending blue work order (tie-breaking by hash) - pub fn ascending_mergeset_without_selected_parent<'a>( - &'a self, - store: &'a (impl GhostdagStoreReader + ?Sized), - ) -> impl Iterator> + '_ { - self.0 - .mergeset_blues - .iter() - .skip(1) // Skip the selected parent - .cloned() - .map(|h| { - store - .get_blue_work(h) - .map(|blue| SortableBlock::new(h, blue)) - }) - .merge_join_by( - self.0 - .mergeset_reds - .iter() - .cloned() - .map(|h| store.get_blue_work(h).map(|red| SortableBlock::new(h, red))), - |a, b| match (a, b) { - (Ok(a), Ok(b)) => a.cmp(b), - (Err(_), Ok(_)) => cmp::Ordering::Less, // select left Err node - (Ok(_), Err(_)) => cmp::Ordering::Greater, // select right Err node - (Err(_), Err(_)) => cmp::Ordering::Equal, // remove both Err nodes - }, - ) - .map(|r| match r { - Left(b) | Right(b) => b, - Both(c, _) => Err(StoreError::DAGDupBlocksError(format!("{c:?}"))), - }) - } - - /// Returns an iterator to the mergeset in descending blue work order (tie-breaking by hash) - pub fn descending_mergeset_without_selected_parent<'a>( - &'a self, - store: &'a (impl GhostdagStoreReader + ?Sized), - ) -> impl Iterator> + '_ { - self.0 - .mergeset_blues - .iter() - .skip(1) // Skip the selected parent - .rev() // Reverse since blues and reds are stored with ascending blue work order - .cloned() - .map(|h| { - store - .get_blue_work(h) - .map(|blue| SortableBlock::new(h, blue)) - }) - .merge_join_by( - self.0 - .mergeset_reds - .iter() - .rev() // Reverse - .cloned() - .map(|h| store.get_blue_work(h).map(|red| SortableBlock::new(h, red))), - |a, b| match (b, a) { - (Ok(b), Ok(a)) => b.cmp(a), - (Err(_), Ok(_)) => cmp::Ordering::Less, // select left Err node - (Ok(_), Err(_)) => cmp::Ordering::Greater, // select right Err node - (Err(_), Err(_)) => cmp::Ordering::Equal, // select both Err nodes - }, // Reverse - ) - .map(|r| match r { - Left(b) | Right(b) => b, - Both(c, _) => Err(StoreError::DAGDupBlocksError(format!("{c:?}"))), - }) - } - - /// Returns an iterator to the mergeset in topological consensus order -- starting with the selected parent, - /// and adding the mergeset in increasing blue work order. Note that this is a topological order even though - /// the selected parent has highest blue work by def -- since the mergeset is in its anticone. - pub fn consensus_ordered_mergeset<'a>( - &'a self, - store: &'a (impl GhostdagStoreReader + ?Sized), - ) -> impl Iterator> + '_ { - once(Ok(self.0.selected_parent)).chain( - self.ascending_mergeset_without_selected_parent(store) - .map(|s| s.map(|s| s.hash)), - ) - } - - /// Returns an iterator to the mergeset in topological consensus order without the selected parent - pub fn consensus_ordered_mergeset_without_selected_parent<'a>( - &'a self, - store: &'a (impl GhostdagStoreReader + ?Sized), - ) -> impl Iterator> + '_ { - self.ascending_mergeset_without_selected_parent(store) - .map(|s| s.map(|s| s.hash)) - } -} - -pub(crate) const GHOST_DAG_STORE_CF: &str = "block-ghostdag-data"; -pub(crate) const COMPACT_GHOST_DAG_STORE_CF: &str = "compact-block-ghostdag-data"; - -define_schema!(GhostDag, Hash, Arc, GHOST_DAG_STORE_CF); -define_schema!( - CompactGhostDag, - Hash, - CompactGhostdagData, - COMPACT_GHOST_DAG_STORE_CF -); - -impl KeyCodec for Hash { - fn encode_key(&self) -> Result, StoreError> { - Ok(self.to_vec()) - } - - fn decode_key(data: &[u8]) -> Result { - Hash::from_slice(data).map_err(|e| StoreError::DecodeError(e.to_string())) - } -} -impl ValueCodec for Arc { - fn encode_value(&self) -> Result, StoreError> { - bcs_ext::to_bytes(&self).map_err(|e| StoreError::EncodeError(e.to_string())) - } - - fn decode_value(data: &[u8]) -> Result { - bcs_ext::from_bytes(data).map_err(|e| StoreError::DecodeError(e.to_string())) - } -} - -impl KeyCodec for Hash { - fn encode_key(&self) -> Result, StoreError> { - Ok(self.to_vec()) - } - - fn decode_key(data: &[u8]) -> Result { - Hash::from_slice(data).map_err(|e| StoreError::DecodeError(e.to_string())) - } -} -impl ValueCodec for CompactGhostdagData { - fn encode_value(&self) -> Result, StoreError> { - bcs_ext::to_bytes(&self).map_err(|e| StoreError::EncodeError(e.to_string())) - } - - fn decode_value(data: &[u8]) -> Result { - bcs_ext::from_bytes(data).map_err(|e| StoreError::DecodeError(e.to_string())) - } -} - -/// A DB + cache implementation of `GhostdagStore` trait, with concurrency support. -#[derive(Clone)] -pub struct DbGhostdagStore { - db: Arc, - level: BlockLevel, - access: CachedDbAccess, - compact_access: CachedDbAccess, -} - -impl DbGhostdagStore { - pub fn new(db: Arc, level: BlockLevel, cache_size: usize) -> Self { - Self { - db: Arc::clone(&db), - level, - access: CachedDbAccess::new(db.clone(), cache_size), - compact_access: CachedDbAccess::new(db, cache_size), - } - } - - pub fn clone_with_new_cache(&self, cache_size: usize) -> Self { - Self::new(Arc::clone(&self.db), self.level, cache_size) - } - - pub fn insert_batch( - &self, - batch: &mut WriteBatch, - hash: Hash, - data: &Arc, - ) -> Result<(), StoreError> { - if self.access.has(hash)? { - return Err(StoreError::KeyAlreadyExists(hash.to_string())); - } - self.access - .write(BatchDbWriter::new(batch), hash, data.clone())?; - self.compact_access.write( - BatchDbWriter::new(batch), - hash, - CompactGhostdagData { - blue_score: data.blue_score, - blue_work: data.blue_work, - selected_parent: data.selected_parent, - }, - )?; - Ok(()) - } -} - -impl GhostdagStoreReader for DbGhostdagStore { - fn get_blue_score(&self, hash: Hash) -> Result { - Ok(self.access.read(hash)?.blue_score) - } - - fn get_blue_work(&self, hash: Hash) -> Result { - Ok(self.access.read(hash)?.blue_work) - } - - fn get_selected_parent(&self, hash: Hash) -> Result { - Ok(self.access.read(hash)?.selected_parent) - } - - fn get_mergeset_blues(&self, hash: Hash) -> Result { - Ok(Arc::clone(&self.access.read(hash)?.mergeset_blues)) - } - - fn get_mergeset_reds(&self, hash: Hash) -> Result { - Ok(Arc::clone(&self.access.read(hash)?.mergeset_reds)) - } - - fn get_blues_anticone_sizes(&self, hash: Hash) -> Result { - Ok(Arc::clone(&self.access.read(hash)?.blues_anticone_sizes)) - } - - fn get_data(&self, hash: Hash) -> Result, StoreError> { - self.access.read(hash) - } - - fn get_compact_data(&self, hash: Hash) -> Result { - self.compact_access.read(hash) - } - - fn has(&self, hash: Hash) -> Result { - self.access.has(hash) - } -} - -impl GhostdagStore for DbGhostdagStore { - fn insert(&self, hash: Hash, data: Arc) -> Result<(), StoreError> { - if self.access.has(hash)? { - return Err(StoreError::KeyAlreadyExists(hash.to_string())); - } - self.access - .write(DirectDbWriter::new(&self.db), hash, data.clone())?; - if self.compact_access.has(hash)? { - return Err(StoreError::KeyAlreadyExists(hash.to_string())); - } - self.compact_access.write( - DirectDbWriter::new(&self.db), - hash, - CompactGhostdagData { - blue_score: data.blue_score, - blue_work: data.blue_work, - selected_parent: data.selected_parent, - }, - )?; - Ok(()) - } -} - -/// An in-memory implementation of `GhostdagStore` trait to be used for tests. -/// Uses `RefCell` for interior mutability in order to workaround `insert` -/// being non-mutable. -pub struct MemoryGhostdagStore { - blue_score_map: RefCell>, - blue_work_map: RefCell>, - selected_parent_map: RefCell>, - mergeset_blues_map: RefCell>, - mergeset_reds_map: RefCell>, - blues_anticone_sizes_map: RefCell>, -} - -impl MemoryGhostdagStore { - pub fn new() -> Self { - Self { - blue_score_map: RefCell::new(BlockHashMap::new()), - blue_work_map: RefCell::new(BlockHashMap::new()), - selected_parent_map: RefCell::new(BlockHashMap::new()), - mergeset_blues_map: RefCell::new(BlockHashMap::new()), - mergeset_reds_map: RefCell::new(BlockHashMap::new()), - blues_anticone_sizes_map: RefCell::new(BlockHashMap::new()), - } - } -} - -impl Default for MemoryGhostdagStore { - fn default() -> Self { - Self::new() - } -} - -impl GhostdagStore for MemoryGhostdagStore { - fn insert(&self, hash: Hash, data: Arc) -> Result<(), StoreError> { - if self.has(hash)? { - return Err(StoreError::KeyAlreadyExists(hash.to_string())); - } - self.blue_score_map - .borrow_mut() - .insert(hash, data.blue_score); - self.blue_work_map.borrow_mut().insert(hash, data.blue_work); - self.selected_parent_map - .borrow_mut() - .insert(hash, data.selected_parent); - self.mergeset_blues_map - .borrow_mut() - .insert(hash, data.mergeset_blues.clone()); - self.mergeset_reds_map - .borrow_mut() - .insert(hash, data.mergeset_reds.clone()); - self.blues_anticone_sizes_map - .borrow_mut() - .insert(hash, data.blues_anticone_sizes.clone()); - Ok(()) - } -} - -impl GhostdagStoreReader for MemoryGhostdagStore { - fn get_blue_score(&self, hash: Hash) -> Result { - match self.blue_score_map.borrow().get(&hash) { - Some(blue_score) => Ok(*blue_score), - None => Err(StoreError::KeyNotFound(hash.to_string())), - } - } - - fn get_blue_work(&self, hash: Hash) -> Result { - match self.blue_work_map.borrow().get(&hash) { - Some(blue_work) => Ok(*blue_work), - None => Err(StoreError::KeyNotFound(hash.to_string())), - } - } - - fn get_selected_parent(&self, hash: Hash) -> Result { - match self.selected_parent_map.borrow().get(&hash) { - Some(selected_parent) => Ok(*selected_parent), - None => Err(StoreError::KeyNotFound(hash.to_string())), - } - } - - fn get_mergeset_blues(&self, hash: Hash) -> Result { - match self.mergeset_blues_map.borrow().get(&hash) { - Some(mergeset_blues) => Ok(BlockHashes::clone(mergeset_blues)), - None => Err(StoreError::KeyNotFound(hash.to_string())), - } - } - - fn get_mergeset_reds(&self, hash: Hash) -> Result { - match self.mergeset_reds_map.borrow().get(&hash) { - Some(mergeset_reds) => Ok(BlockHashes::clone(mergeset_reds)), - None => Err(StoreError::KeyNotFound(hash.to_string())), - } - } - - fn get_blues_anticone_sizes(&self, hash: Hash) -> Result { - match self.blues_anticone_sizes_map.borrow().get(&hash) { - Some(sizes) => Ok(HashKTypeMap::clone(sizes)), - None => Err(StoreError::KeyNotFound(hash.to_string())), - } - } - - fn get_data(&self, hash: Hash) -> Result, StoreError> { - if !self.has(hash)? { - return Err(StoreError::KeyNotFound(hash.to_string())); - } - Ok(Arc::new(GhostdagData::new( - self.blue_score_map.borrow()[&hash], - self.blue_work_map.borrow()[&hash], - self.selected_parent_map.borrow()[&hash], - self.mergeset_blues_map.borrow()[&hash].clone(), - self.mergeset_reds_map.borrow()[&hash].clone(), - self.blues_anticone_sizes_map.borrow()[&hash].clone(), - ))) - } - - fn get_compact_data(&self, hash: Hash) -> Result { - Ok(self.get_data(hash)?.to_compact()) - } - - fn has(&self, hash: Hash) -> Result { - Ok(self.blue_score_map.borrow().contains_key(&hash)) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use starcoin_types::blockhash::BlockHashSet; - use std::iter::once; - - #[test] - fn test_mergeset_iterators() { - let store = MemoryGhostdagStore::new(); - - let factory = |w: u64| { - Arc::new(GhostdagData { - blue_score: Default::default(), - blue_work: w.into(), - selected_parent: Default::default(), - mergeset_blues: Default::default(), - mergeset_reds: Default::default(), - blues_anticone_sizes: Default::default(), - }) - }; - - // Blues - store.insert(1.into(), factory(2)).unwrap(); - store.insert(2.into(), factory(7)).unwrap(); - store.insert(3.into(), factory(11)).unwrap(); - - // Reds - store.insert(4.into(), factory(4)).unwrap(); - store.insert(5.into(), factory(9)).unwrap(); - store.insert(6.into(), factory(11)).unwrap(); // Tie-breaking case - - let mut data = GhostdagData::new_with_selected_parent(1.into(), 5); - data.add_blue(2.into(), Default::default(), &Default::default()); - data.add_blue(3.into(), Default::default(), &Default::default()); - - data.add_red(4.into()); - data.add_red(5.into()); - data.add_red(6.into()); - - let wrapper: GhostDagDataWrapper = data.clone().into(); - - let mut expected: Vec = vec![4.into(), 2.into(), 5.into(), 3.into(), 6.into()]; - assert_eq!( - expected, - wrapper - .ascending_mergeset_without_selected_parent(&store) - .filter_map(|b| b.map(|b| b.hash).ok()) - .collect::>() - ); - - itertools::assert_equal( - once(1.into()).chain(expected.iter().cloned()), - wrapper - .consensus_ordered_mergeset(&store) - .filter_map(|b| b.ok()), - ); - - expected.reverse(); - assert_eq!( - expected, - wrapper - .descending_mergeset_without_selected_parent(&store) - .filter_map(|b| b.map(|b| b.hash).ok()) - .collect::>() - ); - - // Use sets since the below functions have no order guarantee - let expected = BlockHashSet::from_iter([4.into(), 2.into(), 5.into(), 3.into(), 6.into()]); - assert_eq!( - expected, - data.unordered_mergeset_without_selected_parent() - .collect::() - ); - - let expected = - BlockHashSet::from_iter([1.into(), 4.into(), 2.into(), 5.into(), 3.into(), 6.into()]); - assert_eq!( - expected, - data.unordered_mergeset().collect::() - ); - } -} diff --git a/consensus/dag/src/consensusdb/consensus_header.rs b/consensus/dag/src/consensusdb/consensus_header.rs deleted file mode 100644 index 11b842be47..0000000000 --- a/consensus/dag/src/consensusdb/consensus_header.rs +++ /dev/null @@ -1,217 +0,0 @@ -use super::schema::{KeyCodec, ValueCodec}; -use super::{ - db::DBStorage, - error::{StoreError, StoreResult}, - prelude::CachedDbAccess, - writer::{BatchDbWriter, DirectDbWriter}, -}; -use crate::define_schema; -use rocksdb::WriteBatch; -use starcoin_crypto::HashValue as Hash; -use starcoin_types::block::BlockHeader; -use starcoin_types::{ - blockhash::BlockLevel, - consensus_header::{CompactHeaderData, HeaderWithBlockLevel}, - U256, -}; -use std::sync::Arc; - -pub trait HeaderStoreReader { - fn get_daa_score(&self, hash: Hash) -> Result; - fn get_blue_score(&self, hash: Hash) -> Result; - fn get_timestamp(&self, hash: Hash) -> Result; - fn get_difficulty(&self, hash: Hash) -> Result; - fn get_header(&self, hash: Hash) -> Result, StoreError>; - fn get_header_with_block_level(&self, hash: Hash) -> Result; - fn get_compact_header_data(&self, hash: Hash) -> Result; -} - -pub trait HeaderStore: HeaderStoreReader { - // This is append only - fn insert( - &self, - hash: Hash, - header: Arc, - block_level: BlockLevel, - ) -> Result<(), StoreError>; -} - -pub(crate) const HEADERS_STORE_CF: &str = "headers-store"; -pub(crate) const COMPACT_HEADER_DATA_STORE_CF: &str = "compact-header-data"; - -define_schema!(DagHeader, Hash, HeaderWithBlockLevel, HEADERS_STORE_CF); -define_schema!( - CompactBlockHeader, - Hash, - CompactHeaderData, - COMPACT_HEADER_DATA_STORE_CF -); - -impl KeyCodec for Hash { - fn encode_key(&self) -> Result, StoreError> { - Ok(self.to_vec()) - } - - fn decode_key(data: &[u8]) -> Result { - Hash::from_slice(data).map_err(|e| StoreError::DecodeError(e.to_string())) - } -} -impl ValueCodec for HeaderWithBlockLevel { - fn encode_value(&self) -> Result, StoreError> { - bcs_ext::to_bytes(&self).map_err(|e| StoreError::EncodeError(e.to_string())) - } - - fn decode_value(data: &[u8]) -> Result { - bcs_ext::from_bytes(data).map_err(|e| StoreError::DecodeError(e.to_string())) - } -} -impl KeyCodec for Hash { - fn encode_key(&self) -> Result, StoreError> { - Ok(self.to_vec()) - } - - fn decode_key(data: &[u8]) -> Result { - Hash::from_slice(data).map_err(|e| StoreError::DecodeError(e.to_string())) - } -} -impl ValueCodec for CompactHeaderData { - fn encode_value(&self) -> Result, StoreError> { - bcs_ext::to_bytes(&self).map_err(|e| StoreError::EncodeError(e.to_string())) - } - - fn decode_value(data: &[u8]) -> Result { - bcs_ext::from_bytes(data).map_err(|e| StoreError::DecodeError(e.to_string())) - } -} - -/// A DB + cache implementation of `HeaderStore` trait, with concurrency support. -#[derive(Clone)] -pub struct DbHeadersStore { - db: Arc, - headers_access: CachedDbAccess, - compact_headers_access: CachedDbAccess, -} - -impl DbHeadersStore { - pub fn new(db: Arc, cache_size: usize) -> Self { - Self { - db: Arc::clone(&db), - headers_access: CachedDbAccess::new(db.clone(), cache_size), - compact_headers_access: CachedDbAccess::new(db, cache_size), - } - } - - pub fn clone_with_new_cache(&self, cache_size: usize) -> Self { - Self::new(Arc::clone(&self.db), cache_size) - } - - pub fn has(&self, hash: Hash) -> StoreResult { - self.headers_access.has(hash) - } - - pub fn get_header(&self, hash: Hash) -> Result { - let result = self.headers_access.read(hash)?; - Ok((*result.header).clone()) - } - - pub fn insert_batch( - &self, - batch: &mut WriteBatch, - hash: Hash, - header: Arc, - block_level: BlockLevel, - ) -> Result<(), StoreError> { - if self.headers_access.has(hash)? { - return Err(StoreError::KeyAlreadyExists(hash.to_string())); - } - self.headers_access.write( - BatchDbWriter::new(batch), - hash, - HeaderWithBlockLevel { - header: header.clone(), - block_level, - }, - )?; - self.compact_headers_access.write( - BatchDbWriter::new(batch), - hash, - CompactHeaderData { - timestamp: header.timestamp(), - difficulty: header.difficulty(), - }, - )?; - Ok(()) - } -} - -impl HeaderStoreReader for DbHeadersStore { - fn get_daa_score(&self, _hash: Hash) -> Result { - unimplemented!() - } - - fn get_blue_score(&self, _hash: Hash) -> Result { - unimplemented!() - } - - fn get_timestamp(&self, hash: Hash) -> Result { - if let Some(header_with_block_level) = self.headers_access.read_from_cache(hash) { - return Ok(header_with_block_level.header.timestamp()); - } - Ok(self.compact_headers_access.read(hash)?.timestamp) - } - - fn get_difficulty(&self, hash: Hash) -> Result { - if let Some(header_with_block_level) = self.headers_access.read_from_cache(hash) { - return Ok(header_with_block_level.header.difficulty()); - } - Ok(self.compact_headers_access.read(hash)?.difficulty) - } - - fn get_header(&self, hash: Hash) -> Result, StoreError> { - Ok(self.headers_access.read(hash)?.header) - } - - fn get_header_with_block_level(&self, hash: Hash) -> Result { - self.headers_access.read(hash) - } - - fn get_compact_header_data(&self, hash: Hash) -> Result { - if let Some(header_with_block_level) = self.headers_access.read_from_cache(hash) { - return Ok(CompactHeaderData { - timestamp: header_with_block_level.header.timestamp(), - difficulty: header_with_block_level.header.difficulty(), - }); - } - self.compact_headers_access.read(hash) - } -} - -impl HeaderStore for DbHeadersStore { - fn insert( - &self, - hash: Hash, - header: Arc, - block_level: u8, - ) -> Result<(), StoreError> { - if self.headers_access.has(hash)? { - return Err(StoreError::KeyAlreadyExists(hash.to_string())); - } - self.compact_headers_access.write( - DirectDbWriter::new(&self.db), - hash, - CompactHeaderData { - timestamp: header.timestamp(), - difficulty: header.difficulty(), - }, - )?; - self.headers_access.write( - DirectDbWriter::new(&self.db), - hash, - HeaderWithBlockLevel { - header, - block_level, - }, - )?; - Ok(()) - } -} diff --git a/consensus/dag/src/consensusdb/consensus_reachability.rs b/consensus/dag/src/consensusdb/consensus_reachability.rs deleted file mode 100644 index 8638393536..0000000000 --- a/consensus/dag/src/consensusdb/consensus_reachability.rs +++ /dev/null @@ -1,540 +0,0 @@ -use super::{ - db::DBStorage, - prelude::{BatchDbWriter, CachedDbAccess, CachedDbItem, DirectDbWriter, StoreError}, -}; -use starcoin_crypto::HashValue as Hash; -use starcoin_storage::storage::RawDBStorage; - -use crate::{ - consensusdb::schema::{KeyCodec, ValueCodec}, - define_schema, - types::{interval::Interval, reachability::ReachabilityData}, -}; -use starcoin_types::blockhash::{self, BlockHashMap, BlockHashes}; - -use parking_lot::{RwLockUpgradableReadGuard, RwLockWriteGuard}; -use rocksdb::WriteBatch; -use std::{collections::hash_map::Entry::Vacant, sync::Arc}; - -/// Reader API for `ReachabilityStore`. -pub trait ReachabilityStoreReader { - fn has(&self, hash: Hash) -> Result; - fn get_interval(&self, hash: Hash) -> Result; - fn get_parent(&self, hash: Hash) -> Result; - fn get_children(&self, hash: Hash) -> Result; - fn get_future_covering_set(&self, hash: Hash) -> Result; -} - -/// Write API for `ReachabilityStore`. All write functions are deliberately `mut` -/// since reachability writes are not append-only and thus need to be guarded. -pub trait ReachabilityStore: ReachabilityStoreReader { - fn init(&mut self, origin: Hash, capacity: Interval) -> Result<(), StoreError>; - fn insert( - &mut self, - hash: Hash, - parent: Hash, - interval: Interval, - height: u64, - ) -> Result<(), StoreError>; - fn set_interval(&mut self, hash: Hash, interval: Interval) -> Result<(), StoreError>; - fn append_child(&mut self, hash: Hash, child: Hash) -> Result; - fn insert_future_covering_item( - &mut self, - hash: Hash, - fci: Hash, - insertion_index: usize, - ) -> Result<(), StoreError>; - fn get_height(&self, hash: Hash) -> Result; - fn set_reindex_root(&mut self, root: Hash) -> Result<(), StoreError>; - fn get_reindex_root(&self) -> Result; -} - -const REINDEX_ROOT_KEY: &str = "reachability-reindex-root"; -pub(crate) const REACHABILITY_DATA_CF: &str = "reachability-data"; -// TODO: explore perf to see if using fixed-length constants for store prefixes is preferable - -define_schema!( - Reachability, - Hash, - Arc, - REACHABILITY_DATA_CF -); -define_schema!(ReachabilityCache, Vec, Hash, REACHABILITY_DATA_CF); - -impl KeyCodec for Hash { - fn encode_key(&self) -> Result, StoreError> { - Ok(self.to_vec()) - } - - fn decode_key(data: &[u8]) -> Result { - Hash::from_slice(data).map_err(|e| StoreError::DecodeError(e.to_string())) - } -} -impl ValueCodec for Arc { - fn encode_value(&self) -> Result, StoreError> { - bcs_ext::to_bytes(&self).map_err(|e| StoreError::EncodeError(e.to_string())) - } - - fn decode_value(data: &[u8]) -> Result { - bcs_ext::from_bytes(data).map_err(|e| StoreError::DecodeError(e.to_string())) - } -} -impl KeyCodec for Vec { - fn encode_key(&self) -> Result, StoreError> { - Ok(self.to_vec()) - } - - fn decode_key(data: &[u8]) -> Result { - Ok(data.to_vec()) - } -} -impl ValueCodec for Hash { - fn encode_value(&self) -> Result, StoreError> { - Ok(self.to_vec()) - } - - fn decode_value(data: &[u8]) -> Result { - Hash::from_slice(data).map_err(|e| StoreError::DecodeError(e.to_string())) - } -} - -/// A DB + cache implementation of `ReachabilityStore` trait, with concurrent readers support. -#[derive(Clone)] -pub struct DbReachabilityStore { - db: Arc, - access: CachedDbAccess, - reindex_root: CachedDbItem, -} - -impl DbReachabilityStore { - pub fn new(db: Arc, cache_size: usize) -> Self { - Self::new_with_prefix_end(db, cache_size) - } - - pub fn new_with_alternative_prefix_end(db: Arc, cache_size: usize) -> Self { - Self::new_with_prefix_end(db, cache_size) - } - - fn new_with_prefix_end(db: Arc, cache_size: usize) -> Self { - Self { - db: Arc::clone(&db), - access: CachedDbAccess::new(Arc::clone(&db), cache_size), - reindex_root: CachedDbItem::new(db, REINDEX_ROOT_KEY.as_bytes().to_vec()), - } - } - - pub fn clone_with_new_cache(&self, cache_size: usize) -> Self { - Self::new_with_prefix_end(Arc::clone(&self.db), cache_size) - } -} - -impl ReachabilityStore for DbReachabilityStore { - fn init(&mut self, origin: Hash, capacity: Interval) -> Result<(), StoreError> { - debug_assert!(!self.access.has(origin)?); - - let data = Arc::new(ReachabilityData::new( - Hash::new(blockhash::NONE), - capacity, - 0, - )); - let mut batch = WriteBatch::default(); - self.access - .write(BatchDbWriter::new(&mut batch), origin, data)?; - self.reindex_root - .write(BatchDbWriter::new(&mut batch), &origin)?; - self.db - .raw_write_batch(batch) - .map_err(|e| StoreError::DBIoError(e.to_string()))?; - - Ok(()) - } - - fn insert( - &mut self, - hash: Hash, - parent: Hash, - interval: Interval, - height: u64, - ) -> Result<(), StoreError> { - if self.access.has(hash)? { - return Err(StoreError::KeyAlreadyExists(hash.to_string())); - } - let data = Arc::new(ReachabilityData::new(parent, interval, height)); - self.access - .write(DirectDbWriter::new(&self.db), hash, data)?; - Ok(()) - } - - fn set_interval(&mut self, hash: Hash, interval: Interval) -> Result<(), StoreError> { - let mut data = self.access.read(hash)?; - Arc::make_mut(&mut data).interval = interval; - self.access - .write(DirectDbWriter::new(&self.db), hash, data)?; - Ok(()) - } - - fn append_child(&mut self, hash: Hash, child: Hash) -> Result { - let mut data = self.access.read(hash)?; - let height = data.height; - let mut_data = Arc::make_mut(&mut data); - Arc::make_mut(&mut mut_data.children).push(child); - self.access - .write(DirectDbWriter::new(&self.db), hash, data)?; - Ok(height) - } - - fn insert_future_covering_item( - &mut self, - hash: Hash, - fci: Hash, - insertion_index: usize, - ) -> Result<(), StoreError> { - let mut data = self.access.read(hash)?; - let mut_data = Arc::make_mut(&mut data); - Arc::make_mut(&mut mut_data.future_covering_set).insert(insertion_index, fci); - self.access - .write(DirectDbWriter::new(&self.db), hash, data)?; - Ok(()) - } - - fn get_height(&self, hash: Hash) -> Result { - Ok(self.access.read(hash)?.height) - } - - fn set_reindex_root(&mut self, root: Hash) -> Result<(), StoreError> { - self.reindex_root - .write(DirectDbWriter::new(&self.db), &root) - } - - fn get_reindex_root(&self) -> Result { - self.reindex_root.read() - } -} - -impl ReachabilityStoreReader for DbReachabilityStore { - fn has(&self, hash: Hash) -> Result { - self.access.has(hash) - } - - fn get_interval(&self, hash: Hash) -> Result { - Ok(self.access.read(hash)?.interval) - } - - fn get_parent(&self, hash: Hash) -> Result { - Ok(self.access.read(hash)?.parent) - } - - fn get_children(&self, hash: Hash) -> Result { - Ok(Arc::clone(&self.access.read(hash)?.children)) - } - - fn get_future_covering_set(&self, hash: Hash) -> Result { - Ok(Arc::clone(&self.access.read(hash)?.future_covering_set)) - } -} - -pub struct StagingReachabilityStore<'a> { - store_read: RwLockUpgradableReadGuard<'a, DbReachabilityStore>, - staging_writes: BlockHashMap, - staging_reindex_root: Option, -} - -impl<'a> StagingReachabilityStore<'a> { - pub fn new(store_read: RwLockUpgradableReadGuard<'a, DbReachabilityStore>) -> Self { - Self { - store_read, - staging_writes: BlockHashMap::new(), - staging_reindex_root: None, - } - } - - pub fn commit( - self, - batch: &mut WriteBatch, - ) -> Result, StoreError> { - let mut store_write = RwLockUpgradableReadGuard::upgrade(self.store_read); - for (k, v) in self.staging_writes { - let data = Arc::new(v); - store_write - .access - .write(BatchDbWriter::new(batch), k, data)? - } - if let Some(root) = self.staging_reindex_root { - store_write - .reindex_root - .write(BatchDbWriter::new(batch), &root)?; - } - Ok(store_write) - } -} - -impl ReachabilityStore for StagingReachabilityStore<'_> { - fn init(&mut self, origin: Hash, capacity: Interval) -> Result<(), StoreError> { - self.insert(origin, Hash::new(blockhash::NONE), capacity, 0)?; - self.set_reindex_root(origin)?; - Ok(()) - } - - fn insert( - &mut self, - hash: Hash, - parent: Hash, - interval: Interval, - height: u64, - ) -> Result<(), StoreError> { - if self.store_read.has(hash)? { - return Err(StoreError::KeyAlreadyExists(hash.to_string())); - } - if let Vacant(e) = self.staging_writes.entry(hash) { - e.insert(ReachabilityData::new(parent, interval, height)); - Ok(()) - } else { - Err(StoreError::KeyAlreadyExists(hash.to_string())) - } - } - - fn set_interval(&mut self, hash: Hash, interval: Interval) -> Result<(), StoreError> { - if let Some(data) = self.staging_writes.get_mut(&hash) { - data.interval = interval; - return Ok(()); - } - - let mut data = (*self.store_read.access.read(hash)?).clone(); - data.interval = interval; - self.staging_writes.insert(hash, data); - - Ok(()) - } - - fn append_child(&mut self, hash: Hash, child: Hash) -> Result { - if let Some(data) = self.staging_writes.get_mut(&hash) { - Arc::make_mut(&mut data.children).push(child); - return Ok(data.height); - } - - let mut data = (*self.store_read.access.read(hash)?).clone(); - let height = data.height; - Arc::make_mut(&mut data.children).push(child); - self.staging_writes.insert(hash, data); - - Ok(height) - } - - fn insert_future_covering_item( - &mut self, - hash: Hash, - fci: Hash, - insertion_index: usize, - ) -> Result<(), StoreError> { - if let Some(data) = self.staging_writes.get_mut(&hash) { - Arc::make_mut(&mut data.future_covering_set).insert(insertion_index, fci); - return Ok(()); - } - - let mut data = (*self.store_read.access.read(hash)?).clone(); - Arc::make_mut(&mut data.future_covering_set).insert(insertion_index, fci); - self.staging_writes.insert(hash, data); - - Ok(()) - } - - fn get_height(&self, hash: Hash) -> Result { - if let Some(data) = self.staging_writes.get(&hash) { - Ok(data.height) - } else { - Ok(self.store_read.access.read(hash)?.height) - } - } - - fn set_reindex_root(&mut self, root: Hash) -> Result<(), StoreError> { - self.staging_reindex_root = Some(root); - Ok(()) - } - - fn get_reindex_root(&self) -> Result { - if let Some(root) = self.staging_reindex_root { - Ok(root) - } else { - Ok(self.store_read.get_reindex_root()?) - } - } -} - -impl ReachabilityStoreReader for StagingReachabilityStore<'_> { - fn has(&self, hash: Hash) -> Result { - Ok(self.staging_writes.contains_key(&hash) || self.store_read.access.has(hash)?) - } - - fn get_interval(&self, hash: Hash) -> Result { - if let Some(data) = self.staging_writes.get(&hash) { - Ok(data.interval) - } else { - Ok(self.store_read.access.read(hash)?.interval) - } - } - - fn get_parent(&self, hash: Hash) -> Result { - if let Some(data) = self.staging_writes.get(&hash) { - Ok(data.parent) - } else { - Ok(self.store_read.access.read(hash)?.parent) - } - } - - fn get_children(&self, hash: Hash) -> Result { - if let Some(data) = self.staging_writes.get(&hash) { - Ok(BlockHashes::clone(&data.children)) - } else { - Ok(BlockHashes::clone( - &self.store_read.access.read(hash)?.children, - )) - } - } - - fn get_future_covering_set(&self, hash: Hash) -> Result { - if let Some(data) = self.staging_writes.get(&hash) { - Ok(BlockHashes::clone(&data.future_covering_set)) - } else { - Ok(BlockHashes::clone( - &self.store_read.access.read(hash)?.future_covering_set, - )) - } - } -} - -pub struct MemoryReachabilityStore { - map: BlockHashMap, - reindex_root: Option, -} - -impl Default for MemoryReachabilityStore { - fn default() -> Self { - Self::new() - } -} - -impl MemoryReachabilityStore { - pub fn new() -> Self { - Self { - map: BlockHashMap::new(), - reindex_root: None, - } - } - - fn get_data_mut(&mut self, hash: Hash) -> Result<&mut ReachabilityData, StoreError> { - match self.map.get_mut(&hash) { - Some(data) => Ok(data), - None => Err(StoreError::KeyNotFound(hash.to_string())), - } - } - - fn get_data(&self, hash: Hash) -> Result<&ReachabilityData, StoreError> { - match self.map.get(&hash) { - Some(data) => Ok(data), - None => Err(StoreError::KeyNotFound(hash.to_string())), - } - } -} - -impl ReachabilityStore for MemoryReachabilityStore { - fn init(&mut self, origin: Hash, capacity: Interval) -> Result<(), StoreError> { - self.insert(origin, Hash::new(blockhash::NONE), capacity, 0)?; - self.set_reindex_root(origin)?; - Ok(()) - } - - fn insert( - &mut self, - hash: Hash, - parent: Hash, - interval: Interval, - height: u64, - ) -> Result<(), StoreError> { - if let Vacant(e) = self.map.entry(hash) { - e.insert(ReachabilityData::new(parent, interval, height)); - Ok(()) - } else { - Err(StoreError::KeyAlreadyExists(hash.to_string())) - } - } - - fn set_interval(&mut self, hash: Hash, interval: Interval) -> Result<(), StoreError> { - let data = self.get_data_mut(hash)?; - data.interval = interval; - Ok(()) - } - - fn append_child(&mut self, hash: Hash, child: Hash) -> Result { - let data = self.get_data_mut(hash)?; - Arc::make_mut(&mut data.children).push(child); - Ok(data.height) - } - - fn insert_future_covering_item( - &mut self, - hash: Hash, - fci: Hash, - insertion_index: usize, - ) -> Result<(), StoreError> { - let data = self.get_data_mut(hash)?; - Arc::make_mut(&mut data.future_covering_set).insert(insertion_index, fci); - Ok(()) - } - - fn get_height(&self, hash: Hash) -> Result { - Ok(self.get_data(hash)?.height) - } - - fn set_reindex_root(&mut self, root: Hash) -> Result<(), StoreError> { - self.reindex_root = Some(root); - Ok(()) - } - - fn get_reindex_root(&self) -> Result { - match self.reindex_root { - Some(root) => Ok(root), - None => Err(StoreError::KeyNotFound(REINDEX_ROOT_KEY.to_string())), - } - } -} - -impl ReachabilityStoreReader for MemoryReachabilityStore { - fn has(&self, hash: Hash) -> Result { - Ok(self.map.contains_key(&hash)) - } - - fn get_interval(&self, hash: Hash) -> Result { - Ok(self.get_data(hash)?.interval) - } - - fn get_parent(&self, hash: Hash) -> Result { - Ok(self.get_data(hash)?.parent) - } - - fn get_children(&self, hash: Hash) -> Result { - Ok(Arc::clone(&self.get_data(hash)?.children)) - } - - fn get_future_covering_set(&self, hash: Hash) -> Result { - Ok(Arc::clone(&self.get_data(hash)?.future_covering_set)) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_store_basics() { - let mut store: Box = Box::new(MemoryReachabilityStore::new()); - let (hash, parent) = (7.into(), 15.into()); - let interval = Interval::maximal(); - store.insert(hash, parent, interval, 5).unwrap(); - let height = store.append_child(hash, 31.into()).unwrap(); - assert_eq!(height, 5); - let children = store.get_children(hash).unwrap(); - println!("{children:?}"); - store.get_interval(7.into()).unwrap(); - println!("{children:?}"); - } -} diff --git a/consensus/dag/src/consensusdb/consensus_relations.rs b/consensus/dag/src/consensusdb/consensus_relations.rs deleted file mode 100644 index d54f2bd50d..0000000000 --- a/consensus/dag/src/consensusdb/consensus_relations.rs +++ /dev/null @@ -1,240 +0,0 @@ -use super::schema::{KeyCodec, ValueCodec}; -use super::{ - db::DBStorage, - prelude::{BatchDbWriter, CachedDbAccess, DirectDbWriter, StoreError}, -}; -use crate::define_schema; -use rocksdb::WriteBatch; -use starcoin_crypto::HashValue as Hash; -use starcoin_types::blockhash::{BlockHashes, BlockLevel}; -use std::sync::Arc; - -/// Reader API for `RelationsStore`. -pub trait RelationsStoreReader { - fn get_parents(&self, hash: Hash) -> Result; - fn get_children(&self, hash: Hash) -> Result; - fn has(&self, hash: Hash) -> Result; -} - -/// Write API for `RelationsStore`. The insert function is deliberately `mut` -/// since it modifies the children arrays for previously added parents which is -/// non-append-only and thus needs to be guarded. -pub trait RelationsStore: RelationsStoreReader { - /// Inserts `parents` into a new store entry for `hash`, and for each `parent ∈ parents` adds `hash` to `parent.children` - fn insert(&self, hash: Hash, parents: BlockHashes) -> Result<(), StoreError>; -} - -pub(crate) const PARENTS_CF: &str = "block-parents"; -pub(crate) const CHILDREN_CF: &str = "block-children"; - -define_schema!(RelationParent, Hash, Arc>, PARENTS_CF); -define_schema!(RelationChildren, Hash, Arc>, CHILDREN_CF); - -impl KeyCodec for Hash { - fn encode_key(&self) -> Result, StoreError> { - Ok(self.to_vec()) - } - - fn decode_key(data: &[u8]) -> Result { - Hash::from_slice(data).map_err(|e| StoreError::DecodeError(e.to_string())) - } -} -impl ValueCodec for Arc> { - fn encode_value(&self) -> Result, StoreError> { - bcs_ext::to_bytes(self).map_err(|e| StoreError::EncodeError(e.to_string())) - } - - fn decode_value(data: &[u8]) -> Result { - bcs_ext::from_bytes(data).map_err(|e| StoreError::DecodeError(e.to_string())) - } -} -impl KeyCodec for Hash { - fn encode_key(&self) -> Result, StoreError> { - Ok(self.to_vec()) - } - - fn decode_key(data: &[u8]) -> Result { - Hash::from_slice(data).map_err(|e| StoreError::DecodeError(e.to_string())) - } -} - -impl ValueCodec for Arc> { - fn encode_value(&self) -> Result, StoreError> { - bcs_ext::to_bytes(self).map_err(|e| StoreError::EncodeError(e.to_string())) - } - - fn decode_value(data: &[u8]) -> Result { - bcs_ext::from_bytes(data).map_err(|e| StoreError::DecodeError(e.to_string())) - } -} - -/// A DB + cache implementation of `RelationsStore` trait, with concurrent readers support. -#[derive(Clone)] -pub struct DbRelationsStore { - db: Arc, - level: BlockLevel, - parents_access: CachedDbAccess, - children_access: CachedDbAccess, -} - -impl DbRelationsStore { - pub fn new(db: Arc, level: BlockLevel, cache_size: usize) -> Self { - Self { - db: Arc::clone(&db), - level, - parents_access: CachedDbAccess::new(Arc::clone(&db), cache_size), - children_access: CachedDbAccess::new(db, cache_size), - } - } - - pub fn clone_with_new_cache(&self, cache_size: usize) -> Self { - Self::new(Arc::clone(&self.db), self.level, cache_size) - } - - pub fn insert_batch( - &mut self, - batch: &mut WriteBatch, - hash: Hash, - parents: BlockHashes, - ) -> Result<(), StoreError> { - if self.has(hash)? { - return Err(StoreError::KeyAlreadyExists(hash.to_string())); - } - - // Insert a new entry for `hash` - self.parents_access - .write(BatchDbWriter::new(batch), hash, parents.clone())?; - - // The new hash has no children yet - self.children_access.write( - BatchDbWriter::new(batch), - hash, - BlockHashes::new(Vec::new()), - )?; - - // Update `children` for each parent - for parent in parents.iter().cloned() { - let mut children = (*self.get_children(parent)?).clone(); - children.push(hash); - self.children_access.write( - BatchDbWriter::new(batch), - parent, - BlockHashes::new(children), - )?; - } - - Ok(()) - } -} - -impl RelationsStoreReader for DbRelationsStore { - fn get_parents(&self, hash: Hash) -> Result { - self.parents_access.read(hash) - } - - fn get_children(&self, hash: Hash) -> Result { - self.children_access.read(hash) - } - - fn has(&self, hash: Hash) -> Result { - if self.parents_access.has(hash)? { - debug_assert!(self.children_access.has(hash)?); - Ok(true) - } else { - Ok(false) - } - } -} - -impl RelationsStore for DbRelationsStore { - /// See `insert_batch` as well - /// TODO: use one function with DbWriter for both this function and insert_batch - fn insert(&self, hash: Hash, parents: BlockHashes) -> Result<(), StoreError> { - if self.has(hash)? { - return Err(StoreError::KeyAlreadyExists(hash.to_string())); - } - - // Insert a new entry for `hash` - self.parents_access - .write(DirectDbWriter::new(&self.db), hash, parents.clone())?; - - // The new hash has no children yet - self.children_access.write( - DirectDbWriter::new(&self.db), - hash, - BlockHashes::new(Vec::new()), - )?; - - // Update `children` for each parent - for parent in parents.iter().cloned() { - let mut children = (*self.get_children(parent)?).clone(); - children.push(hash); - self.children_access.write( - DirectDbWriter::new(&self.db), - parent, - BlockHashes::new(children), - )?; - } - - Ok(()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::consensusdb::prelude::{FlexiDagStorage, FlexiDagStorageConfig}; - - #[test] - fn test_db_relations_store() { - let db_tempdir = tempfile::tempdir().unwrap(); - let config = FlexiDagStorageConfig::new(); - - let db = FlexiDagStorage::create_from_path(db_tempdir.path(), config) - .expect("failed to create flexidag storage"); - test_relations_store(db.relations_store); - } - - fn test_relations_store(store: T) { - let parents = [ - (1, vec![]), - (2, vec![1]), - (3, vec![1]), - (4, vec![2, 3]), - (5, vec![1, 4]), - ]; - for (i, vec) in parents.iter().cloned() { - store - .insert( - i.into(), - BlockHashes::new(vec.iter().copied().map(Hash::from).collect()), - ) - .unwrap(); - } - - let expected_children = [ - (1, vec![2, 3, 5]), - (2, vec![4]), - (3, vec![4]), - (4, vec![5]), - (5, vec![]), - ]; - for (i, vec) in expected_children { - assert!(store - .get_children(i.into()) - .unwrap() - .iter() - .copied() - .eq(vec.iter().copied().map(Hash::from))); - } - - for (i, vec) in parents { - assert!(store - .get_parents(i.into()) - .unwrap() - .iter() - .copied() - .eq(vec.iter().copied().map(Hash::from))); - } - } -} diff --git a/consensus/dag/src/consensusdb/db.rs b/consensus/dag/src/consensusdb/db.rs deleted file mode 100644 index 9babc7e70c..0000000000 --- a/consensus/dag/src/consensusdb/db.rs +++ /dev/null @@ -1,93 +0,0 @@ -use super::{ - error::StoreError, - schemadb::{ - DbGhostdagStore, DbHeadersStore, DbReachabilityStore, DbRelationsStore, CHILDREN_CF, - COMPACT_GHOST_DAG_STORE_CF, COMPACT_HEADER_DATA_STORE_CF, GHOST_DAG_STORE_CF, - HEADERS_STORE_CF, PARENTS_CF, REACHABILITY_DATA_CF, - }, -}; -use starcoin_config::{RocksdbConfig, StorageConfig}; -pub(crate) use starcoin_storage::db_storage::DBStorage; -use std::{path::Path, sync::Arc}; - -#[derive(Clone)] -pub struct FlexiDagStorage { - pub ghost_dag_store: DbGhostdagStore, - pub header_store: DbHeadersStore, - pub reachability_store: DbReachabilityStore, - pub relations_store: DbRelationsStore, -} - -#[derive(Clone)] -pub struct FlexiDagStorageConfig { - pub cache_size: usize, - pub rocksdb_config: RocksdbConfig, -} -impl Default for FlexiDagStorageConfig { - fn default() -> Self { - Self { - cache_size: 1, - rocksdb_config: Default::default(), - } - } -} -impl FlexiDagStorageConfig { - pub fn new() -> Self { - FlexiDagStorageConfig::default() - } - - pub fn create_with_params(cache_size: usize, rocksdb_config: RocksdbConfig) -> Self { - Self { - cache_size, - rocksdb_config, - } - } -} - -impl From for FlexiDagStorageConfig { - fn from(value: StorageConfig) -> Self { - Self { - cache_size: value.cache_size(), - rocksdb_config: value.rocksdb_config(), - } - } -} - -impl FlexiDagStorage { - /// Creates or loads an existing storage from the provided directory path. - pub fn create_from_path>( - db_path: P, - config: FlexiDagStorageConfig, - ) -> Result { - let db = Arc::new( - DBStorage::open_with_cfs( - db_path, - vec![ - // consensus headers - HEADERS_STORE_CF, - COMPACT_HEADER_DATA_STORE_CF, - // consensus relations - PARENTS_CF, - CHILDREN_CF, - // consensus reachability - REACHABILITY_DATA_CF, - // consensus ghostdag - GHOST_DAG_STORE_CF, - COMPACT_GHOST_DAG_STORE_CF, - ], - false, - config.rocksdb_config, - None, - ) - .map_err(|e| StoreError::DBIoError(e.to_string()))?, - ); - - Ok(Self { - ghost_dag_store: DbGhostdagStore::new(db.clone(), 1, config.cache_size), - - header_store: DbHeadersStore::new(db.clone(), config.cache_size), - reachability_store: DbReachabilityStore::new(db.clone(), config.cache_size), - relations_store: DbRelationsStore::new(db, 1, config.cache_size), - }) - } -} diff --git a/consensus/dag/src/consensusdb/error.rs b/consensus/dag/src/consensusdb/error.rs deleted file mode 100644 index ff2c199c93..0000000000 --- a/consensus/dag/src/consensusdb/error.rs +++ /dev/null @@ -1,58 +0,0 @@ -use thiserror::Error; - -#[derive(Error, Debug)] -pub enum StoreError { - #[error("key {0} not found in store")] - KeyNotFound(String), - - #[error("key {0} already exists in store")] - KeyAlreadyExists(String), - - #[error("column family {0} not exist in db")] - CFNotExist(String), - - #[error("IO error {0}")] - DBIoError(String), - - #[error("rocksdb error {0}")] - DbError(#[from] rocksdb::Error), - - #[error("encode error {0}")] - EncodeError(String), - - #[error("decode error {0}")] - DecodeError(String), - - #[error("ghostdag {0} duplicate blocks")] - DAGDupBlocksError(String), -} - -pub type StoreResult = std::result::Result; - -pub trait StoreResultExtensions { - fn unwrap_option(self) -> Option; -} - -impl StoreResultExtensions for StoreResult { - fn unwrap_option(self) -> Option { - match self { - Ok(value) => Some(value), - Err(StoreError::KeyNotFound(_)) => None, - Err(err) => panic!("Unexpected store error: {err:?}"), - } - } -} - -pub trait StoreResultEmptyTuple { - fn unwrap_and_ignore_key_already_exists(self); -} - -impl StoreResultEmptyTuple for StoreResult<()> { - fn unwrap_and_ignore_key_already_exists(self) { - match self { - Ok(_) => (), - Err(StoreError::KeyAlreadyExists(_)) => (), - Err(err) => panic!("Unexpected store error: {err:?}"), - } - } -} diff --git a/consensus/dag/src/consensusdb/item.rs b/consensus/dag/src/consensusdb/item.rs deleted file mode 100644 index 0d27b9c347..0000000000 --- a/consensus/dag/src/consensusdb/item.rs +++ /dev/null @@ -1,81 +0,0 @@ -use super::prelude::DbWriter; -use super::schema::{KeyCodec, Schema, ValueCodec}; -use super::{db::DBStorage, error::StoreError}; -use parking_lot::RwLock; -use starcoin_storage::storage::RawDBStorage; -use std::sync::Arc; - -/// A cached DB item with concurrency support -#[derive(Clone)] -pub struct CachedDbItem { - db: Arc, - key: S::Key, - cached_item: Arc>>, -} - -impl CachedDbItem { - pub fn new(db: Arc, key: S::Key) -> Self { - Self { - db, - key, - cached_item: Arc::new(RwLock::new(None)), - } - } - - pub fn read(&self) -> Result { - if let Some(item) = self.cached_item.read().clone() { - return Ok(item); - } - if let Some(slice) = self - .db - .raw_get_pinned_cf(S::COLUMN_FAMILY, &self.key.encode_key()?) - .map_err(|_| StoreError::CFNotExist(S::COLUMN_FAMILY.to_string()))? - { - let item = S::Value::decode_value(&slice)?; - *self.cached_item.write() = Some(item.clone()); - Ok(item) - } else { - Err(StoreError::KeyNotFound( - String::from_utf8(self.key.encode_key()?) - .unwrap_or(("unrecoverable key string").to_string()), - )) - } - } - - pub fn write(&mut self, mut writer: impl DbWriter, item: &S::Value) -> Result<(), StoreError> { - *self.cached_item.write() = Some(item.clone()); - writer.put::(&self.key, item)?; - Ok(()) - } - - pub fn remove(&mut self, mut writer: impl DbWriter) -> Result<(), StoreError> -where { - *self.cached_item.write() = None; - writer.delete::(&self.key)?; - Ok(()) - } - - pub fn update(&mut self, mut writer: impl DbWriter, op: F) -> Result - where - F: Fn(S::Value) -> S::Value, - { - let mut guard = self.cached_item.write(); - let mut item = if let Some(item) = guard.take() { - item - } else if let Some(slice) = self - .db - .raw_get_pinned_cf(S::COLUMN_FAMILY, &self.key.encode_key()?) - .map_err(|_| StoreError::CFNotExist(S::COLUMN_FAMILY.to_string()))? - { - let item = S::Value::decode_value(&slice)?; - item - } else { - return Err(StoreError::KeyNotFound("".to_string())); - }; - - item = op(item); // Apply the update op - *guard = Some(item.clone()); - writer.put::(&self.key, &item)?; - Ok(item) - } -} diff --git a/consensus/dag/src/consensusdb/mod.rs b/consensus/dag/src/consensusdb/mod.rs deleted file mode 100644 index 5aaa7c6ef2..0000000000 --- a/consensus/dag/src/consensusdb/mod.rs +++ /dev/null @@ -1,31 +0,0 @@ -mod access; -mod cache; -mod consensus_ghostdag; -mod consensus_header; -mod consensus_reachability; -pub mod consensus_relations; -mod db; -mod error; -mod item; -pub mod schema; -mod writer; - -pub mod prelude { - use super::{db, error}; - - pub use super::{ - access::CachedDbAccess, - cache::DagCache, - item::CachedDbItem, - writer::{BatchDbWriter, DbWriter, DirectDbWriter}, - }; - pub use db::{FlexiDagStorage, FlexiDagStorageConfig}; - pub use error::{StoreError, StoreResult, StoreResultEmptyTuple, StoreResultExtensions}; -} - -pub mod schemadb { - pub use super::{ - consensus_ghostdag::*, consensus_header::*, consensus_reachability::*, - consensus_relations::*, - }; -} diff --git a/consensus/dag/src/consensusdb/schema.rs b/consensus/dag/src/consensusdb/schema.rs deleted file mode 100644 index 502ee9c8c7..0000000000 --- a/consensus/dag/src/consensusdb/schema.rs +++ /dev/null @@ -1,40 +0,0 @@ -use super::error::StoreError; -use core::hash::Hash; -use std::fmt::Debug; -use std::result::Result; - -pub trait KeyCodec: Clone + Sized + Debug + Send + Sync { - /// Converts `self` to bytes to be stored in DB. - fn encode_key(&self) -> Result, StoreError>; - /// Converts bytes fetched from DB to `Self`. - fn decode_key(data: &[u8]) -> Result; -} - -pub trait ValueCodec: Clone + Sized + Debug + Send + Sync { - /// Converts `self` to bytes to be stored in DB. - fn encode_value(&self) -> Result, StoreError>; - /// Converts bytes fetched from DB to `Self`. - fn decode_value(data: &[u8]) -> Result; -} - -pub trait Schema: Debug + Send + Sync + 'static { - const COLUMN_FAMILY: &'static str; - - type Key: KeyCodec + Hash + Eq + Default; - type Value: ValueCodec + Default + Clone; -} - -#[macro_export] -macro_rules! define_schema { - ($schema_type: ident, $key_type: ty, $value_type: ty, $cf_name: expr) => { - #[derive(Clone, Debug)] - pub(crate) struct $schema_type; - - impl $crate::consensusdb::schema::Schema for $schema_type { - type Key = $key_type; - type Value = $value_type; - - const COLUMN_FAMILY: &'static str = $cf_name; - } - }; -} diff --git a/consensus/dag/src/consensusdb/writer.rs b/consensus/dag/src/consensusdb/writer.rs deleted file mode 100644 index 717d7d7e1c..0000000000 --- a/consensus/dag/src/consensusdb/writer.rs +++ /dev/null @@ -1,75 +0,0 @@ -use rocksdb::WriteBatch; -use starcoin_storage::storage::InnerStore; - -use super::schema::{KeyCodec, Schema, ValueCodec}; -use super::{db::DBStorage, error::StoreError}; - -/// Abstraction over direct/batched DB writing -pub trait DbWriter { - fn put(&mut self, key: &S::Key, value: &S::Value) -> Result<(), StoreError>; - fn delete(&mut self, key: &S::Key) -> Result<(), StoreError>; -} - -pub struct DirectDbWriter<'a> { - db: &'a DBStorage, -} - -impl<'a> DirectDbWriter<'a> { - pub fn new(db: &'a DBStorage) -> Self { - Self { db } - } -} - -impl DbWriter for DirectDbWriter<'_> { - fn put(&mut self, key: &S::Key, value: &S::Value) -> Result<(), StoreError> { - let bin_key = key.encode_key()?; - let bin_data = value.encode_value()?; - self.db - .put(S::COLUMN_FAMILY, bin_key, bin_data) - .map_err(|e| StoreError::DBIoError(e.to_string())) - } - - fn delete(&mut self, key: &S::Key) -> Result<(), StoreError> { - let key = key.encode_key()?; - self.db - .remove(S::COLUMN_FAMILY, key) - .map_err(|e| StoreError::DBIoError(e.to_string())) - } -} - -pub struct BatchDbWriter<'a> { - batch: &'a mut WriteBatch, -} - -impl<'a> BatchDbWriter<'a> { - pub fn new(batch: &'a mut WriteBatch) -> Self { - Self { batch } - } -} - -impl DbWriter for BatchDbWriter<'_> { - fn put(&mut self, key: &S::Key, value: &S::Value) -> Result<(), StoreError> { - let key = key.encode_key()?; - let value = value.encode_value()?; - self.batch.put(key, value); - Ok(()) - } - - fn delete(&mut self, key: &S::Key) -> Result<(), StoreError> { - let key = key.encode_key()?; - self.batch.delete(key); - Ok(()) - } -} - -impl DbWriter for &mut T { - #[inline] - fn put(&mut self, key: &S::Key, value: &S::Value) -> Result<(), StoreError> { - (*self).put::(key, value) - } - - #[inline] - fn delete(&mut self, key: &S::Key) -> Result<(), StoreError> { - (*self).delete::(key) - } -} diff --git a/consensus/dag/src/ghostdag/mergeset.rs b/consensus/dag/src/ghostdag/mergeset.rs deleted file mode 100644 index 5edd288b3a..0000000000 --- a/consensus/dag/src/ghostdag/mergeset.rs +++ /dev/null @@ -1,71 +0,0 @@ -use super::protocol::GhostdagManager; -use crate::consensusdb::schemadb::{GhostdagStoreReader, HeaderStoreReader, RelationsStoreReader}; -use crate::reachability::reachability_service::ReachabilityService; -use starcoin_crypto::HashValue as Hash; -use starcoin_types::blockhash::BlockHashSet; -use std::collections::VecDeque; - -impl< - T: GhostdagStoreReader, - S: RelationsStoreReader, - U: ReachabilityService, - V: HeaderStoreReader, - > GhostdagManager -{ - pub fn ordered_mergeset_without_selected_parent( - &self, - selected_parent: Hash, - parents: &[Hash], - ) -> Vec { - self.sort_blocks(self.unordered_mergeset_without_selected_parent(selected_parent, parents)) - } - - pub fn unordered_mergeset_without_selected_parent( - &self, - selected_parent: Hash, - parents: &[Hash], - ) -> BlockHashSet { - let mut queue: VecDeque<_> = parents - .iter() - .copied() - .filter(|p| p != &selected_parent) - .collect(); - let mut mergeset: BlockHashSet = queue.iter().copied().collect(); - let mut selected_parent_past = BlockHashSet::new(); - - while let Some(current) = queue.pop_front() { - let current_parents = self - .relations_store - .get_parents(current) - .unwrap_or_else(|err| { - println!("WUT"); - panic!("{err:?}"); - }); - - // For each parent of the current block we check whether it is in the past of the selected parent. If not, - // we add it to the resulting merge-set and queue it for further processing. - for parent in current_parents.iter() { - if mergeset.contains(parent) { - continue; - } - - if selected_parent_past.contains(parent) { - continue; - } - - if self - .reachability_service - .is_dag_ancestor_of(*parent, selected_parent) - { - selected_parent_past.insert(*parent); - continue; - } - - mergeset.insert(*parent); - queue.push_back(*parent); - } - } - - mergeset - } -} diff --git a/consensus/dag/src/ghostdag/mod.rs b/consensus/dag/src/ghostdag/mod.rs deleted file mode 100644 index 51a2c8fc82..0000000000 --- a/consensus/dag/src/ghostdag/mod.rs +++ /dev/null @@ -1,4 +0,0 @@ -pub mod mergeset; -pub mod protocol; - -mod util; diff --git a/consensus/dag/src/ghostdag/protocol.rs b/consensus/dag/src/ghostdag/protocol.rs deleted file mode 100644 index 089d56ce06..0000000000 --- a/consensus/dag/src/ghostdag/protocol.rs +++ /dev/null @@ -1,322 +0,0 @@ -use super::util::Refs; -use crate::consensusdb::schemadb::{GhostdagStoreReader, HeaderStoreReader, RelationsStoreReader}; -use crate::reachability::reachability_service::ReachabilityService; -use crate::types::{ghostdata::GhostdagData, ordering::*}; -use starcoin_crypto::HashValue as Hash; -use starcoin_types::block::BlockHeader; -use starcoin_types::blockhash::{BlockHashMap, BlockHashes, BlueWorkType, HashKTypeMap, KType}; -use std::sync::Arc; - -#[derive(Clone)] -pub struct GhostdagManager< - T: GhostdagStoreReader, - S: RelationsStoreReader, - U: ReachabilityService, - V: HeaderStoreReader, -> { - pub(super) k: KType, - pub(super) ghostdag_store: T, - pub(super) relations_store: S, - pub(super) headers_store: V, - pub(super) reachability_service: U, -} - -impl< - T: GhostdagStoreReader, - S: RelationsStoreReader, - U: ReachabilityService, - V: HeaderStoreReader, - > GhostdagManager -{ - pub fn new( - k: KType, - ghostdag_store: T, - relations_store: S, - headers_store: V, - reachability_service: U, - ) -> Self { - Self { - k, - ghostdag_store, - relations_store, - reachability_service, - headers_store, - } - } - - pub fn genesis_ghostdag_data(&self, genesis: &BlockHeader) -> GhostdagData { - GhostdagData::new( - 0, - genesis.difficulty(), - genesis.parent_hash(), - BlockHashes::new(vec![]), - BlockHashes::new(Vec::new()), - HashKTypeMap::new(BlockHashMap::new()), - ) - } - - pub fn origin_ghostdag_data(&self) -> Arc { - Arc::new(GhostdagData::new( - 0, - Default::default(), - 0.into(), - BlockHashes::new(Vec::new()), - BlockHashes::new(Vec::new()), - HashKTypeMap::new(BlockHashMap::new()), - )) - } - - pub fn find_selected_parent(&self, parents: impl IntoIterator) -> Hash { - parents - .into_iter() - .map(|parent| SortableBlock { - hash: parent, - blue_work: self.ghostdag_store.get_blue_work(parent).unwrap(), - }) - .max() - .unwrap() - .hash - } - - /// Runs the GHOSTDAG protocol and calculates the block GhostdagData by the given parents. - /// The function calculates mergeset blues by iterating over the blocks in - /// the anticone of the new block selected parent (which is the parent with the - /// highest blue work) and adds any block to the blue set if by adding - /// it these conditions will not be violated: - /// - /// 1) |anticone-of-candidate-block ∩ blue-set-of-new-block| ≤ K - /// - /// 2) For every blue block in blue-set-of-new-block: - /// |(anticone-of-blue-block ∩ blue-set-new-block) ∪ {candidate-block}| ≤ K. - /// We validate this condition by maintaining a map blues_anticone_sizes for - /// each block which holds all the blue anticone sizes that were affected by - /// the new added blue blocks. - /// So to find out what is |anticone-of-blue ∩ blue-set-of-new-block| we just iterate in - /// the selected parent chain of the new block until we find an existing entry in - /// blues_anticone_sizes. - /// - /// For further details see the article https://eprint.iacr.org/2018/104.pdf - pub fn ghostdag(&self, parents: &[Hash]) -> GhostdagData { - assert!( - !parents.is_empty(), - "genesis must be added via a call to init" - ); - // Run the GHOSTDAG parent selection algorithm - let selected_parent = self.find_selected_parent(&mut parents.iter().copied()); - // Initialize new GHOSTDAG block data with the selected parent - let mut new_block_data = GhostdagData::new_with_selected_parent(selected_parent, self.k); - // Get the mergeset in consensus-agreed topological order (topological here means forward in time from blocks to children) - let ordered_mergeset = - self.ordered_mergeset_without_selected_parent(selected_parent, parents); - - for blue_candidate in ordered_mergeset.iter().cloned() { - let coloring = self.check_blue_candidate(&new_block_data, blue_candidate); - - if let ColoringOutput::Blue(blue_anticone_size, blues_anticone_sizes) = coloring { - // No k-cluster violation found, we can now set the candidate block as blue - new_block_data.add_blue(blue_candidate, blue_anticone_size, &blues_anticone_sizes); - } else { - new_block_data.add_red(blue_candidate); - } - } - - let blue_score = self - .ghostdag_store - .get_blue_score(selected_parent) - .unwrap() - .checked_add(new_block_data.mergeset_blues.len() as u64) - .unwrap(); - - let added_blue_work: BlueWorkType = new_block_data - .mergeset_blues - .iter() - .cloned() - .map(|hash| self.headers_store.get_difficulty(hash).unwrap_or(0.into())) - .sum(); - - let blue_work = self - .ghostdag_store - .get_blue_work(selected_parent) - .unwrap() - .checked_add(added_blue_work) - .unwrap(); - - new_block_data.finalize_score_and_work(blue_score, blue_work); - - new_block_data - } - - fn check_blue_candidate_with_chain_block( - &self, - new_block_data: &GhostdagData, - chain_block: &ChainBlock, - blue_candidate: Hash, - candidate_blues_anticone_sizes: &mut BlockHashMap, - candidate_blue_anticone_size: &mut KType, - ) -> ColoringState { - // If blue_candidate is in the future of chain_block, it means - // that all remaining blues are in the past of chain_block and thus - // in the past of blue_candidate. In this case we know for sure that - // the anticone of blue_candidate will not exceed K, and we can mark - // it as blue. - // - // The new block is always in the future of blue_candidate, so there's - // no point in checking it. - - // We check if chain_block is not the new block by checking if it has a hash. - if let Some(hash) = chain_block.hash { - if self - .reachability_service - .is_dag_ancestor_of(hash, blue_candidate) - { - return ColoringState::Blue; - } - } - - for &block in chain_block.data.mergeset_blues.iter() { - // Skip blocks that exist in the past of blue_candidate. - if self - .reachability_service - .is_dag_ancestor_of(block, blue_candidate) - { - continue; - } - - candidate_blues_anticone_sizes - .insert(block, self.blue_anticone_size(block, new_block_data)); - - *candidate_blue_anticone_size = (*candidate_blue_anticone_size).checked_add(1).unwrap(); - if *candidate_blue_anticone_size > self.k { - // k-cluster violation: The candidate's blue anticone exceeded k - return ColoringState::Red; - } - - if *candidate_blues_anticone_sizes.get(&block).unwrap() == self.k { - // k-cluster violation: A block in candidate's blue anticone already - // has k blue blocks in its own anticone - return ColoringState::Red; - } - - // This is a sanity check that validates that a blue - // block's blue anticone is not already larger than K. - assert!( - *candidate_blues_anticone_sizes.get(&block).unwrap() <= self.k, - "found blue anticone larger than K" - ); - } - - ColoringState::Pending - } - - /// Returns the blue anticone size of `block` from the worldview of `context`. - /// Expects `block` to be in the blue set of `context` - fn blue_anticone_size(&self, block: Hash, context: &GhostdagData) -> KType { - let mut current_blues_anticone_sizes = HashKTypeMap::clone(&context.blues_anticone_sizes); - let mut current_selected_parent = context.selected_parent; - loop { - if let Some(size) = current_blues_anticone_sizes.get(&block) { - return *size; - } - /* TODO: consider refactor it - if current_selected_parent == self.genesis_hash - || current_selected_parent == Hash::new(blockhash::ORIGIN) - { - panic!("block {block} is not in blue set of the given context"); - } - */ - current_blues_anticone_sizes = self - .ghostdag_store - .get_blues_anticone_sizes(current_selected_parent) - .unwrap(); - current_selected_parent = self - .ghostdag_store - .get_selected_parent(current_selected_parent) - .unwrap(); - } - } - - pub fn check_blue_candidate( - &self, - new_block_data: &GhostdagData, - blue_candidate: Hash, - ) -> ColoringOutput { - // The maximum length of new_block_data.mergeset_blues can be K+1 because - // it contains the selected parent. - if new_block_data.mergeset_blues.len() as KType == self.k.checked_add(1).unwrap() { - return ColoringOutput::Red; - } - - let mut candidate_blues_anticone_sizes: BlockHashMap = - BlockHashMap::with_capacity(self.k as usize); - // Iterate over all blocks in the blue past of the new block that are not in the past - // of blue_candidate, and check for each one of them if blue_candidate potentially - // enlarges their blue anticone to be over K, or that they enlarge the blue anticone - // of blue_candidate to be over K. - let mut chain_block = ChainBlock { - hash: None, - data: new_block_data.into(), - }; - let mut candidate_blue_anticone_size: KType = 0; - - loop { - let state = self.check_blue_candidate_with_chain_block( - new_block_data, - &chain_block, - blue_candidate, - &mut candidate_blues_anticone_sizes, - &mut candidate_blue_anticone_size, - ); - - match state { - ColoringState::Blue => { - return ColoringOutput::Blue( - candidate_blue_anticone_size, - candidate_blues_anticone_sizes, - ); - } - ColoringState::Red => return ColoringOutput::Red, - ColoringState::Pending => (), // continue looping - } - - chain_block = ChainBlock { - hash: Some(chain_block.data.selected_parent), - data: self - .ghostdag_store - .get_data(chain_block.data.selected_parent) - .unwrap() - .into(), - } - } - } - - pub fn sort_blocks(&self, blocks: impl IntoIterator) -> Vec { - let mut sorted_blocks: Vec = blocks.into_iter().collect(); - sorted_blocks.sort_by_cached_key(|block| SortableBlock { - hash: *block, - blue_work: self.ghostdag_store.get_blue_work(*block).unwrap(), - }); - sorted_blocks - } -} - -/// Chain block with attached ghostdag data -struct ChainBlock<'a> { - hash: Option, - // if set to `None`, signals being the new block - data: Refs<'a, GhostdagData>, -} - -/// Represents the intermediate GHOSTDAG coloring state for the current candidate -enum ColoringState { - Blue, - Red, - Pending, -} - -#[derive(Debug)] -/// Represents the final output of GHOSTDAG coloring for the current candidate -pub enum ColoringOutput { - Blue(KType, BlockHashMap), - // (blue anticone size, map of blue anticone sizes for each affected blue) - Red, -} diff --git a/consensus/dag/src/ghostdag/util.rs b/consensus/dag/src/ghostdag/util.rs deleted file mode 100644 index 68eb4b9b31..0000000000 --- a/consensus/dag/src/ghostdag/util.rs +++ /dev/null @@ -1,57 +0,0 @@ -use std::{ops::Deref, rc::Rc, sync::Arc}; -/// Enum used to represent a concrete varying pointer type which only needs to be accessed by ref. -/// We avoid adding a `Val(T)` variant in order to keep the size of the enum minimal -pub enum Refs<'a, T> { - Ref(&'a T), - Arc(Arc), - Rc(Rc), - Box(Box), -} - -impl AsRef for Refs<'_, T> { - fn as_ref(&self) -> &T { - match self { - Refs::Ref(r) => r, - Refs::Arc(a) => a, - Refs::Rc(r) => r, - Refs::Box(b) => b, - } - } -} - -impl Deref for Refs<'_, T> { - type Target = T; - - fn deref(&self) -> &Self::Target { - match self { - Refs::Ref(r) => r, - Refs::Arc(a) => a, - Refs::Rc(r) => r, - Refs::Box(b) => b, - } - } -} - -impl<'a, T> From<&'a T> for Refs<'a, T> { - fn from(r: &'a T) -> Self { - Self::Ref(r) - } -} - -impl From> for Refs<'_, T> { - fn from(a: Arc) -> Self { - Self::Arc(a) - } -} - -impl From> for Refs<'_, T> { - fn from(r: Rc) -> Self { - Self::Rc(r) - } -} - -impl From> for Refs<'_, T> { - fn from(b: Box) -> Self { - Self::Box(b) - } -} diff --git a/consensus/dag/src/lib.rs b/consensus/dag/src/lib.rs deleted file mode 100644 index 51beedfdfa..0000000000 --- a/consensus/dag/src/lib.rs +++ /dev/null @@ -1,5 +0,0 @@ -pub mod blockdag; -pub mod consensusdb; -pub mod ghostdag; -pub mod reachability; -pub mod types; diff --git a/consensus/dag/src/reachability/extensions.rs b/consensus/dag/src/reachability/extensions.rs deleted file mode 100644 index 59630fb47d..0000000000 --- a/consensus/dag/src/reachability/extensions.rs +++ /dev/null @@ -1,50 +0,0 @@ -use crate::consensusdb::{prelude::StoreResult, schemadb::ReachabilityStoreReader}; -use crate::types::interval::Interval; -use starcoin_crypto::hash::HashValue as Hash; - -pub(super) trait ReachabilityStoreIntervalExtensions { - fn interval_children_capacity(&self, block: Hash) -> StoreResult; - fn interval_remaining_before(&self, block: Hash) -> StoreResult; - fn interval_remaining_after(&self, block: Hash) -> StoreResult; -} - -impl ReachabilityStoreIntervalExtensions for T { - /// Returns the reachability allocation capacity for children of `block` - fn interval_children_capacity(&self, block: Hash) -> StoreResult { - // The interval of a block should *strictly* contain the intervals of its - // tree children, hence we subtract 1 from the end of the range. - Ok(self.get_interval(block)?.decrease_end(1)) - } - - /// Returns the available interval to allocate for tree children, taken from the - /// beginning of children allocation capacity - fn interval_remaining_before(&self, block: Hash) -> StoreResult { - let alloc_capacity = self.interval_children_capacity(block)?; - match self.get_children(block)?.first() { - Some(first_child) => { - let first_alloc = self.get_interval(*first_child)?; - Ok(Interval::new( - alloc_capacity.start, - first_alloc.start.checked_sub(1).unwrap(), - )) - } - None => Ok(alloc_capacity), - } - } - - /// Returns the available interval to allocate for tree children, taken from the - /// end of children allocation capacity - fn interval_remaining_after(&self, block: Hash) -> StoreResult { - let alloc_capacity = self.interval_children_capacity(block)?; - match self.get_children(block)?.last() { - Some(last_child) => { - let last_alloc = self.get_interval(*last_child)?; - Ok(Interval::new( - last_alloc.end.checked_add(1).unwrap(), - alloc_capacity.end, - )) - } - None => Ok(alloc_capacity), - } - } -} diff --git a/consensus/dag/src/reachability/inquirer.rs b/consensus/dag/src/reachability/inquirer.rs deleted file mode 100644 index 3b8ab258d8..0000000000 --- a/consensus/dag/src/reachability/inquirer.rs +++ /dev/null @@ -1,344 +0,0 @@ -use super::{tree::*, *}; -use crate::consensusdb::schemadb::{ReachabilityStore, ReachabilityStoreReader}; -use crate::types::{interval::Interval, perf}; -use starcoin_crypto::{HashValue as Hash, HashValue}; - -/// Init the reachability store to match the state required by the algorithmic layer. -/// The function first checks the store for possibly being initialized already. -pub fn init(store: &mut (impl ReachabilityStore + ?Sized), origin: HashValue) -> Result<()> { - init_with_params(store, origin, Interval::maximal()) -} - -pub(super) fn init_with_params( - store: &mut (impl ReachabilityStore + ?Sized), - origin: Hash, - capacity: Interval, -) -> Result<()> { - if store.has(origin)? { - return Ok(()); - } - store.init(origin, capacity)?; - Ok(()) -} - -type HashIterator<'a> = &'a mut dyn Iterator; - -/// Add a block to the DAG reachability data structures and persist using the provided `store`. -pub fn add_block( - store: &mut (impl ReachabilityStore + ?Sized), - new_block: Hash, - selected_parent: Hash, - mergeset_iterator: HashIterator, -) -> Result<()> { - add_block_with_params( - store, - new_block, - selected_parent, - mergeset_iterator, - None, - None, - ) -} - -fn add_block_with_params( - store: &mut (impl ReachabilityStore + ?Sized), - new_block: Hash, - selected_parent: Hash, - mergeset_iterator: HashIterator, - reindex_depth: Option, - reindex_slack: Option, -) -> Result<()> { - add_tree_block( - store, - new_block, - selected_parent, - reindex_depth.unwrap_or(perf::DEFAULT_REINDEX_DEPTH), - reindex_slack.unwrap_or(perf::DEFAULT_REINDEX_SLACK), - )?; - add_dag_block(store, new_block, mergeset_iterator)?; - Ok(()) -} - -fn add_dag_block( - store: &mut (impl ReachabilityStore + ?Sized), - new_block: Hash, - mergeset_iterator: HashIterator, -) -> Result<()> { - // Update the future covering set for blocks in the mergeset - for merged_block in mergeset_iterator { - insert_to_future_covering_set(store, merged_block, new_block)?; - } - Ok(()) -} - -fn insert_to_future_covering_set( - store: &mut (impl ReachabilityStore + ?Sized), - merged_block: Hash, - new_block: Hash, -) -> Result<()> { - match binary_search_descendant( - store, - store.get_future_covering_set(merged_block)?.as_slice(), - new_block, - )? { - // We expect the query to not succeed, and to only return the correct insertion index. - // The existences of a `future covering item` (`FCI`) which is a chain ancestor of `new_block` - // contradicts `merged_block ∈ mergeset(new_block)`. Similarly, the existence of an FCI - // which `new_block` is a chain ancestor of, contradicts processing order. - SearchOutput::Found(_, _) => Err(ReachabilityError::DataInconsistency), - SearchOutput::NotFound(i) => { - store.insert_future_covering_item(merged_block, new_block, i)?; - Ok(()) - } - } -} - -/// Hint to the reachability algorithm that `hint` is a candidate to become -/// the `virtual selected parent` (`VSP`). This might affect internal reachability heuristics such -/// as moving the reindex point. The consensus runtime is expected to call this function -/// for a new header selected tip which is `header only` / `pending UTXO verification`, or for a completely resolved `VSP`. -pub fn hint_virtual_selected_parent( - store: &mut (impl ReachabilityStore + ?Sized), - hint: Hash, -) -> Result<()> { - try_advancing_reindex_root( - store, - hint, - perf::DEFAULT_REINDEX_DEPTH, - perf::DEFAULT_REINDEX_SLACK, - ) -} - -/// Checks if the `this` block is a strict chain ancestor of the `queried` block (aka `this ∈ chain(queried)`). -/// Note that this results in `false` if `this == queried` -pub fn is_strict_chain_ancestor_of( - store: &(impl ReachabilityStoreReader + ?Sized), - this: Hash, - queried: Hash, -) -> Result { - Ok(store - .get_interval(this)? - .strictly_contains(store.get_interval(queried)?)) -} - -/// Checks if `this` block is a chain ancestor of `queried` block (aka `this ∈ chain(queried) ∪ {queried}`). -/// Note that we use the graph theory convention here which defines that a block is also an ancestor of itself. -pub fn is_chain_ancestor_of( - store: &(impl ReachabilityStoreReader + ?Sized), - this: Hash, - queried: Hash, -) -> Result { - Ok(store - .get_interval(this)? - .contains(store.get_interval(queried)?)) -} - -/// Returns true if `this` is a DAG ancestor of `queried` (aka `queried ∈ future(this) ∪ {this}`). -/// Note: this method will return true if `this == queried`. -/// The complexity of this method is O(log(|future_covering_set(this)|)) -pub fn is_dag_ancestor_of( - store: &(impl ReachabilityStoreReader + ?Sized), - this: Hash, - queried: Hash, -) -> Result { - // First, check if `this` is a chain ancestor of queried - if is_chain_ancestor_of(store, this, queried)? { - return Ok(true); - } - // Otherwise, use previously registered future blocks to complete the - // DAG reachability test - match binary_search_descendant( - store, - store.get_future_covering_set(this)?.as_slice(), - queried, - )? { - SearchOutput::Found(_, _) => Ok(true), - SearchOutput::NotFound(_) => Ok(false), - } -} - -/// Finds the child of `ancestor` which is also a chain ancestor of `descendant`. -pub fn get_next_chain_ancestor( - store: &(impl ReachabilityStoreReader + ?Sized), - descendant: Hash, - ancestor: Hash, -) -> Result { - if descendant == ancestor { - // The next ancestor does not exist - return Err(ReachabilityError::BadQuery); - } - if !is_strict_chain_ancestor_of(store, ancestor, descendant)? { - // `ancestor` isn't actually a chain ancestor of `descendant`, so by def - // we cannot find the next ancestor as well - return Err(ReachabilityError::BadQuery); - } - - get_next_chain_ancestor_unchecked(store, descendant, ancestor) -} - -/// Note: it is important to keep the unchecked version for internal module use, -/// since in some scenarios during reindexing `descendant` might have a modified -/// interval which was not propagated yet. -pub(super) fn get_next_chain_ancestor_unchecked( - store: &(impl ReachabilityStoreReader + ?Sized), - descendant: Hash, - ancestor: Hash, -) -> Result { - match binary_search_descendant(store, store.get_children(ancestor)?.as_slice(), descendant)? { - SearchOutput::Found(hash, _) => Ok(hash), - SearchOutput::NotFound(_) => Err(ReachabilityError::BadQuery), - } -} - -enum SearchOutput { - NotFound(usize), // `usize` is the position to insert at - Found(Hash, usize), -} - -fn binary_search_descendant( - store: &(impl ReachabilityStoreReader + ?Sized), - ordered_hashes: &[Hash], - descendant: Hash, -) -> Result { - if cfg!(debug_assertions) { - // This is a linearly expensive assertion, keep it debug only - assert_hashes_ordered(store, ordered_hashes); - } - - // `Interval::end` represents the unique number allocated to this block - let point = store.get_interval(descendant)?.end; - - // We use an `unwrap` here since otherwise we need to implement `binary_search` - // ourselves, which is not worth the effort given that this would be an unrecoverable - // error anyhow - match ordered_hashes.binary_search_by_key(&point, |c| store.get_interval(*c).unwrap().start) { - Ok(i) => Ok(SearchOutput::Found(ordered_hashes[i], i)), - Err(i) => { - // `i` is where `point` was expected (i.e., point < ordered_hashes[i].interval.start), - // so we expect `ordered_hashes[i - 1].interval` to be the only candidate to contain `point` - if i > 0 - && is_chain_ancestor_of( - store, - ordered_hashes[i.checked_sub(1).unwrap()], - descendant, - )? - { - Ok(SearchOutput::Found( - ordered_hashes[i.checked_sub(1).unwrap()], - i.checked_sub(1).unwrap(), - )) - } else { - Ok(SearchOutput::NotFound(i)) - } - } - } -} - -fn assert_hashes_ordered(store: &(impl ReachabilityStoreReader + ?Sized), ordered_hashes: &[Hash]) { - let intervals: Vec = ordered_hashes - .iter() - .cloned() - .map(|c| store.get_interval(c).unwrap()) - .collect(); - debug_assert!(intervals - .as_slice() - .windows(2) - .all(|w| w[0].end < w[1].start)) -} - -#[cfg(test)] -mod tests { - use super::{super::tests::*, *}; - use crate::consensusdb::schemadb::MemoryReachabilityStore; - use starcoin_types::blockhash::ORIGIN; - - #[test] - fn test_add_tree_blocks() { - // Arrange - let mut store = MemoryReachabilityStore::new(); - // Act - let root: Hash = 1.into(); - TreeBuilder::new(&mut store) - .init_with_params(root, Interval::new(1, 15)) - .add_block(2.into(), root) - .add_block(3.into(), 2.into()) - .add_block(4.into(), 2.into()) - .add_block(5.into(), 3.into()) - .add_block(6.into(), 5.into()) - .add_block(7.into(), 1.into()) - .add_block(8.into(), 6.into()) - .add_block(9.into(), 6.into()) - .add_block(10.into(), 6.into()) - .add_block(11.into(), 6.into()); - // Assert - store.validate_intervals(root).unwrap(); - } - - #[test] - fn test_add_early_blocks() { - // Arrange - let mut store = MemoryReachabilityStore::new(); - - // Act - let root: Hash = Hash::from_u64(1); - let mut builder = TreeBuilder::new_with_params(&mut store, 2, 5); - builder.init_with_params(root, Interval::maximal()); - for i in 2u64..100 { - builder.add_block(Hash::from_u64(i), Hash::from_u64(i / 2)); - } - - // Should trigger an earlier than reindex root allocation - builder.add_block(Hash::from_u64(100), Hash::from_u64(2)); - store.validate_intervals(root).unwrap(); - } - - #[test] - fn test_add_dag_blocks() { - // Arrange - let mut store = MemoryReachabilityStore::new(); - let origin_hash = Hash::new(ORIGIN); - // Act - DagBuilder::new(&mut store) - .init(origin_hash) - .add_block(DagBlock::new(1.into(), vec![origin_hash])) - .add_block(DagBlock::new(2.into(), vec![1.into()])) - .add_block(DagBlock::new(3.into(), vec![1.into()])) - .add_block(DagBlock::new(4.into(), vec![2.into(), 3.into()])) - .add_block(DagBlock::new(5.into(), vec![4.into()])) - .add_block(DagBlock::new(6.into(), vec![1.into()])) - .add_block(DagBlock::new(7.into(), vec![5.into(), 6.into()])) - .add_block(DagBlock::new(8.into(), vec![1.into()])) - .add_block(DagBlock::new(9.into(), vec![1.into()])) - .add_block(DagBlock::new(10.into(), vec![7.into(), 8.into(), 9.into()])) - .add_block(DagBlock::new(11.into(), vec![1.into()])) - .add_block(DagBlock::new(12.into(), vec![11.into(), 10.into()])); - - // Assert intervals - store.validate_intervals(origin_hash).unwrap(); - - // Assert genesis - for i in 2u64..=12 { - assert!(store.in_past_of(1, i)); - } - - // Assert some futures - assert!(store.in_past_of(2, 4)); - assert!(store.in_past_of(2, 5)); - assert!(store.in_past_of(2, 7)); - assert!(store.in_past_of(5, 10)); - assert!(store.in_past_of(6, 10)); - assert!(store.in_past_of(10, 12)); - assert!(store.in_past_of(11, 12)); - - // Assert some anticones - assert!(store.are_anticone(2, 3)); - assert!(store.are_anticone(2, 6)); - assert!(store.are_anticone(3, 6)); - assert!(store.are_anticone(5, 6)); - assert!(store.are_anticone(3, 8)); - assert!(store.are_anticone(11, 2)); - assert!(store.are_anticone(11, 4)); - assert!(store.are_anticone(11, 6)); - assert!(store.are_anticone(11, 9)); - } -} diff --git a/consensus/dag/src/reachability/mod.rs b/consensus/dag/src/reachability/mod.rs deleted file mode 100644 index ceb2905b03..0000000000 --- a/consensus/dag/src/reachability/mod.rs +++ /dev/null @@ -1,50 +0,0 @@ -mod extensions; -pub mod inquirer; -pub mod reachability_service; -mod reindex; -pub mod relations_service; - -#[cfg(test)] -mod tests; -mod tree; - -use crate::consensusdb::prelude::StoreError; -use thiserror::Error; - -#[derive(Error, Debug)] -pub enum ReachabilityError { - #[error("data store error")] - StoreError(#[from] StoreError), - - #[error("data overflow error")] - DataOverflow(String), - - #[error("data inconsistency error")] - DataInconsistency, - - #[error("query is inconsistent")] - BadQuery, -} - -impl ReachabilityError { - pub fn is_key_not_found(&self) -> bool { - matches!(self, ReachabilityError::StoreError(e) if matches!(e, StoreError::KeyNotFound(_))) - } -} - -pub type Result = std::result::Result; - -pub trait ReachabilityResultExtensions { - /// Unwraps the error into `None` if the internal error is `StoreError::KeyNotFound` or panics otherwise - fn unwrap_option(self) -> Option; -} - -impl ReachabilityResultExtensions for Result { - fn unwrap_option(self) -> Option { - match self { - Ok(value) => Some(value), - Err(err) if err.is_key_not_found() => None, - Err(err) => panic!("Unexpected reachability error: {err:?}"), - } - } -} diff --git a/consensus/dag/src/reachability/reachability_service.rs b/consensus/dag/src/reachability/reachability_service.rs deleted file mode 100644 index 33796991d7..0000000000 --- a/consensus/dag/src/reachability/reachability_service.rs +++ /dev/null @@ -1,316 +0,0 @@ -use super::{inquirer, Result}; -use crate::consensusdb::schemadb::ReachabilityStoreReader; -use parking_lot::RwLock; -use starcoin_crypto::{HashValue as Hash, HashValue}; -use starcoin_types::blockhash; -use std::{ops::Deref, sync::Arc}; - -pub trait ReachabilityService { - fn is_chain_ancestor_of(&self, this: Hash, queried: Hash) -> bool; - fn is_dag_ancestor_of_result(&self, this: Hash, queried: Hash) -> Result; - fn is_dag_ancestor_of(&self, this: Hash, queried: Hash) -> bool; - fn is_dag_ancestor_of_any(&self, this: Hash, queried: &mut impl Iterator) -> bool; - fn is_any_dag_ancestor(&self, list: &mut impl Iterator, queried: Hash) -> bool; - fn is_any_dag_ancestor_result( - &self, - list: &mut impl Iterator, - queried: Hash, - ) -> Result; - fn get_next_chain_ancestor(&self, descendant: Hash, ancestor: Hash) -> Hash; -} - -/// Multi-threaded reachability service imp -#[derive(Clone)] -pub struct MTReachabilityService { - store: Arc>, -} - -impl MTReachabilityService { - pub fn new(store: Arc>) -> Self { - Self { store } - } -} - -impl ReachabilityService for MTReachabilityService { - fn is_chain_ancestor_of(&self, this: Hash, queried: Hash) -> bool { - let read_guard = self.store.read(); - inquirer::is_chain_ancestor_of(read_guard.deref(), this, queried).unwrap() - } - - fn is_dag_ancestor_of_result(&self, this: Hash, queried: Hash) -> Result { - let read_guard = self.store.read(); - inquirer::is_dag_ancestor_of(read_guard.deref(), this, queried) - } - - fn is_dag_ancestor_of(&self, this: Hash, queried: Hash) -> bool { - let read_guard = self.store.read(); - inquirer::is_dag_ancestor_of(read_guard.deref(), this, queried).unwrap() - } - - fn is_any_dag_ancestor(&self, list: &mut impl Iterator, queried: Hash) -> bool { - let read_guard = self.store.read(); - list.any(|hash| inquirer::is_dag_ancestor_of(read_guard.deref(), hash, queried).unwrap()) - } - - fn is_any_dag_ancestor_result( - &self, - list: &mut impl Iterator, - queried: Hash, - ) -> Result { - let read_guard = self.store.read(); - for hash in list { - if inquirer::is_dag_ancestor_of(read_guard.deref(), hash, queried)? { - return Ok(true); - } - } - Ok(false) - } - - fn is_dag_ancestor_of_any(&self, this: Hash, queried: &mut impl Iterator) -> bool { - let read_guard = self.store.read(); - queried.any(|hash| inquirer::is_dag_ancestor_of(read_guard.deref(), this, hash).unwrap()) - } - - fn get_next_chain_ancestor(&self, descendant: Hash, ancestor: Hash) -> Hash { - let read_guard = self.store.read(); - inquirer::get_next_chain_ancestor(read_guard.deref(), descendant, ancestor).unwrap() - } -} - -impl MTReachabilityService { - /// Returns a forward iterator walking up the chain-selection tree from `from_ancestor` - /// to `to_descendant`, where `to_descendant` is included if `inclusive` is set to true. - /// - /// To skip `from_ancestor` simply apply `skip(1)`. - /// - /// The caller is expected to verify that `from_ancestor` is indeed a chain ancestor of - /// `to_descendant`, otherwise the function will panic. - pub fn forward_chain_iterator( - &self, - from_ancestor: Hash, - to_descendant: Hash, - inclusive: bool, - ) -> impl Iterator { - ForwardChainIterator::new(self.store.clone(), from_ancestor, to_descendant, inclusive) - } - - /// Returns a backward iterator walking down the selected chain from `from_descendant` - /// to `to_ancestor`, where `to_ancestor` is included if `inclusive` is set to true. - /// - /// To skip `from_descendant` simply apply `skip(1)`. - /// - /// The caller is expected to verify that `to_ancestor` is indeed a chain ancestor of - /// `from_descendant`, otherwise the function will panic. - pub fn backward_chain_iterator( - &self, - from_descendant: Hash, - to_ancestor: Hash, - inclusive: bool, - ) -> impl Iterator { - BackwardChainIterator::new(self.store.clone(), from_descendant, to_ancestor, inclusive) - } - - /// Returns the default chain iterator, walking from `from` backward down the - /// selected chain until `virtual genesis` (aka `blockhash::ORIGIN`; exclusive) - pub fn default_backward_chain_iterator(&self, from: Hash) -> impl Iterator { - BackwardChainIterator::new( - self.store.clone(), - from, - HashValue::new(blockhash::ORIGIN), - false, - ) - } -} - -/// Iterator design: we currently read-lock at each movement of the iterator. -/// Other options are to keep the read guard throughout the iterator lifetime, or -/// a compromise where the lock is released every constant number of items. -struct BackwardChainIterator { - store: Arc>, - current: Option, - ancestor: Hash, - inclusive: bool, -} - -impl BackwardChainIterator { - fn new( - store: Arc>, - from_descendant: Hash, - to_ancestor: Hash, - inclusive: bool, - ) -> Self { - Self { - store, - current: Some(from_descendant), - ancestor: to_ancestor, - inclusive, - } - } -} - -impl Iterator for BackwardChainIterator { - type Item = Hash; - - fn next(&mut self) -> Option { - if let Some(current) = self.current { - if current == self.ancestor { - if self.inclusive { - self.current = None; - Some(current) - } else { - self.current = None; - None - } - } else { - debug_assert_ne!(current, HashValue::new(blockhash::NONE)); - let next = self.store.read().get_parent(current).unwrap(); - self.current = Some(next); - Some(current) - } - } else { - None - } - } -} - -struct ForwardChainIterator { - store: Arc>, - current: Option, - descendant: Hash, - inclusive: bool, -} - -impl ForwardChainIterator { - fn new( - store: Arc>, - from_ancestor: Hash, - to_descendant: Hash, - inclusive: bool, - ) -> Self { - Self { - store, - current: Some(from_ancestor), - descendant: to_descendant, - inclusive, - } - } -} - -impl Iterator for ForwardChainIterator { - type Item = Hash; - - fn next(&mut self) -> Option { - if let Some(current) = self.current { - if current == self.descendant { - if self.inclusive { - self.current = None; - Some(current) - } else { - self.current = None; - None - } - } else { - let next = inquirer::get_next_chain_ancestor( - self.store.read().deref(), - self.descendant, - current, - ) - .unwrap(); - self.current = Some(next); - Some(current) - } - } else { - None - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::consensusdb::schemadb::MemoryReachabilityStore; - use crate::reachability::tests::TreeBuilder; - use crate::types::interval::Interval; - - #[test] - fn test_forward_iterator() { - // Arrange - let mut store = MemoryReachabilityStore::new(); - - // Act - let root: Hash = 1.into(); - TreeBuilder::new(&mut store) - .init_with_params(root, Interval::new(1, 15)) - .add_block(2.into(), root) - .add_block(3.into(), 2.into()) - .add_block(4.into(), 2.into()) - .add_block(5.into(), 3.into()) - .add_block(6.into(), 5.into()) - .add_block(7.into(), 1.into()) - .add_block(8.into(), 6.into()) - .add_block(9.into(), 6.into()) - .add_block(10.into(), 6.into()) - .add_block(11.into(), 6.into()); - - let service = MTReachabilityService::new(Arc::new(RwLock::new(store))); - - // Exclusive - let iter = service.forward_chain_iterator(2.into(), 10.into(), false); - - // Assert - let expected_hashes = [2u64, 3, 5, 6].map(Hash::from); - assert!(expected_hashes.iter().cloned().eq(iter)); - - // Inclusive - let iter = service.forward_chain_iterator(2.into(), 10.into(), true); - - // Assert - let expected_hashes = [2u64, 3, 5, 6, 10].map(Hash::from); - assert!(expected_hashes.iter().cloned().eq(iter)); - - // Compare backward to reversed forward - let forward_iter = service.forward_chain_iterator(2.into(), 10.into(), true); - let backward_iter: Vec = service - .backward_chain_iterator(10.into(), 2.into(), true) - .collect(); - assert!(forward_iter.eq(backward_iter.iter().cloned().rev())) - } - - #[test] - fn test_iterator_boundaries() { - // Arrange & Act - let mut store = MemoryReachabilityStore::new(); - let root: Hash = 1.into(); - TreeBuilder::new(&mut store) - .init_with_params(root, Interval::new(1, 5)) - .add_block(2.into(), root); - - let service = MTReachabilityService::new(Arc::new(RwLock::new(store))); - - // Asserts - assert!([1u64, 2] - .map(Hash::from) - .iter() - .cloned() - .eq(service.forward_chain_iterator(1.into(), 2.into(), true))); - assert!([1u64] - .map(Hash::from) - .iter() - .cloned() - .eq(service.forward_chain_iterator(1.into(), 2.into(), false))); - assert!([2u64, 1] - .map(Hash::from) - .iter() - .cloned() - .eq(service.backward_chain_iterator(2.into(), root, true))); - assert!([2u64] - .map(Hash::from) - .iter() - .cloned() - .eq(service.backward_chain_iterator(2.into(), root, false))); - assert!(std::iter::once(root).eq(service.backward_chain_iterator(root, root, true))); - assert!(std::iter::empty::().eq(service.backward_chain_iterator(root, root, false))); - assert!(std::iter::once(root).eq(service.forward_chain_iterator(root, root, true))); - assert!(std::iter::empty::().eq(service.forward_chain_iterator(root, root, false))); - } -} diff --git a/consensus/dag/src/reachability/reindex.rs b/consensus/dag/src/reachability/reindex.rs deleted file mode 100644 index ebb8aab83f..0000000000 --- a/consensus/dag/src/reachability/reindex.rs +++ /dev/null @@ -1,683 +0,0 @@ -use super::{ - extensions::ReachabilityStoreIntervalExtensions, inquirer::get_next_chain_ancestor_unchecked, *, -}; -use crate::consensusdb::schemadb::ReachabilityStore; -use crate::types::interval::Interval; -use starcoin_crypto::HashValue as Hash; -use starcoin_types::blockhash::{BlockHashExtensions, BlockHashMap}; -use std::collections::VecDeque; - -/// A struct used during reindex operations. It represents a temporary context -/// for caching subtree information during the *current* reindex operation only -pub(super) struct ReindexOperationContext<'a, T: ReachabilityStore + ?Sized> { - store: &'a mut T, - subtree_sizes: BlockHashMap, // Cache for subtree sizes computed during this operation - _depth: u64, - slack: u64, -} - -impl<'a, T: ReachabilityStore + ?Sized> ReindexOperationContext<'a, T> { - pub(super) fn new(store: &'a mut T, depth: u64, slack: u64) -> Self { - Self { - store, - subtree_sizes: BlockHashMap::new(), - _depth: depth, - slack, - } - } - - /// Traverses the reachability subtree that's defined by the new child - /// block and reallocates reachability interval space - /// such that another reindexing is unlikely to occur shortly - /// thereafter. It does this by traversing down the reachability - /// tree until it finds a block with an interval size that's greater than - /// its subtree size. See `propagate_interval` for further details. - pub(super) fn reindex_intervals(&mut self, new_child: Hash, reindex_root: Hash) -> Result<()> { - let mut current = new_child; - - // Search for the first ancestor with sufficient interval space - loop { - let current_interval = self.store.get_interval(current)?; - self.count_subtrees(current)?; - - // `current` has sufficient space, break and propagate - if current_interval.size() >= self.subtree_sizes[¤t] { - break; - } - - let parent = self.store.get_parent(current)?; - - if parent.is_none() { - // If we ended up here it means that there are more - // than 2^64 blocks, which shouldn't ever happen. - return Err(ReachabilityError::DataOverflow( - "missing tree - parent during reindexing. Theoretically, this - should only ever happen if there are more - than 2^64 blocks in the DAG." - .to_string(), - )); - } - - if current == reindex_root { - // Reindex root is expected to hold enough capacity as long as there are less - // than ~2^52 blocks in the DAG, which should never happen in our lifetimes - // even if block rate per second is above 100. The calculation follows from the allocation of - // 2^12 (which equals 2^64/2^52) for slack per chain block below the reindex root. - return Err(ReachabilityError::DataOverflow(format!( - "unexpected behavior: reindex root {reindex_root} is out of capacity during reindexing. - Theoretically, this should only ever happen if there are more than ~2^52 blocks in the DAG." - ))); - } - - if inquirer::is_strict_chain_ancestor_of(self.store, parent, reindex_root)? { - // In this case parent is guaranteed to have sufficient interval space, - // however we avoid reindexing the entire subtree above parent - // (which includes root and thus majority of blocks mined since) - // and use slacks along the chain up forward from parent to reindex root. - // Notes: - // 1. we set `required_allocation` = subtree size of current in order to double the - // current interval capacity - // 2. it might be the case that current is the `new_child` itself - return self.reindex_intervals_earlier_than_root( - current, - reindex_root, - parent, - self.subtree_sizes[¤t], - ); - } - - current = parent - } - - self.propagate_interval(current) - } - - /// - /// Core (BFS) algorithms used during reindexing (see `count_subtrees` and `propagate_interval` below) - /// - /// - /// count_subtrees counts the size of each subtree under this block, - /// and populates self.subtree_sizes with the results. - /// It is equivalent to the following recursive implementation: - /// - /// fn count_subtrees(&mut self, block: Hash) -> Result { - /// let mut subtree_size = 0u64; - /// for child in self.store.get_children(block)?.iter().cloned() { - /// subtree_size += self.count_subtrees(child)?; - /// } - /// self.subtree_sizes.insert(block, subtree_size + 1); - /// Ok(subtree_size + 1) - /// } - /// - /// However, we are expecting (linearly) deep trees, and so a - /// recursive stack-based approach is inefficient and will hit - /// recursion limits. Instead, the same logic was implemented - /// using a (queue-based) BFS method. At a high level, the - /// algorithm uses BFS for reaching all leaves and pushes - /// intermediate updates from leaves via parent chains until all - /// size information is gathered at the root of the operation - /// (i.e. at block). - fn count_subtrees(&mut self, block: Hash) -> Result<()> { - if self.subtree_sizes.contains_key(&block) { - return Ok(()); - } - - let mut queue = VecDeque::::from([block]); - let mut counts = BlockHashMap::::new(); - - while let Some(mut current) = queue.pop_front() { - let children = self.store.get_children(current)?; - if children.is_empty() { - // We reached a leaf - self.subtree_sizes.insert(current, 1); - } else if !self.subtree_sizes.contains_key(¤t) { - // We haven't yet calculated the subtree size of - // the current block. Add all its children to the - // queue - queue.extend(children.iter()); - continue; - } - - // We reached a leaf or a pre-calculated subtree. - // Push information up - while current != block { - current = self.store.get_parent(current)?; - - let count = counts.entry(current).or_insert(0); - let children = self.store.get_children(current)?; - - *count = (*count).checked_add(1).unwrap(); - if *count < children.len() as u64 { - // Not all subtrees of the current block are ready - break; - } - - // All children of `current` have calculated their subtree size. - // Sum them all together and add 1 to get the sub tree size of - // `current`. - let subtree_sum: u64 = children.iter().map(|c| self.subtree_sizes[c]).sum(); - self.subtree_sizes - .insert(current, subtree_sum.checked_add(1).unwrap()); - } - } - - Ok(()) - } - - /// Propagates a new interval using a BFS traversal. - /// Subtree intervals are recursively allocated according to subtree sizes and - /// the allocation rule in `Interval::split_exponential`. - fn propagate_interval(&mut self, block: Hash) -> Result<()> { - // Make sure subtrees are counted before propagating - self.count_subtrees(block)?; - - let mut queue = VecDeque::::from([block]); - while let Some(current) = queue.pop_front() { - let children = self.store.get_children(current)?; - if !children.is_empty() { - let sizes: Vec = children.iter().map(|c| self.subtree_sizes[c]).collect(); - let interval = self.store.interval_children_capacity(current)?; - let intervals = interval.split_exponential(&sizes); - for (c, ci) in children.iter().copied().zip(intervals) { - self.store.set_interval(c, ci)?; - } - queue.extend(children.iter()); - } - } - Ok(()) - } - - /// This method implements the reindex algorithm for the case where the - /// new child node is not in reindex root's subtree. The function is expected to allocate - /// `required_allocation` to be added to interval of `allocation_block`. `common_ancestor` is - /// expected to be a direct parent of `allocation_block` and an ancestor of current `reindex_root`. - fn reindex_intervals_earlier_than_root( - &mut self, - allocation_block: Hash, - reindex_root: Hash, - common_ancestor: Hash, - required_allocation: u64, - ) -> Result<()> { - // The chosen child is: (i) child of `common_ancestor`; (ii) an - // ancestor of `reindex_root` or `reindex_root` itself - let chosen_child = - get_next_chain_ancestor_unchecked(self.store, reindex_root, common_ancestor)?; - let block_interval = self.store.get_interval(allocation_block)?; - let chosen_interval = self.store.get_interval(chosen_child)?; - - if block_interval.start < chosen_interval.start { - // `allocation_block` is in the subtree before the chosen child - self.reclaim_interval_before( - allocation_block, - common_ancestor, - chosen_child, - reindex_root, - required_allocation, - ) - } else { - // `allocation_block` is in the subtree after the chosen child - self.reclaim_interval_after( - allocation_block, - common_ancestor, - chosen_child, - reindex_root, - required_allocation, - ) - } - } - - fn reclaim_interval_before( - &mut self, - allocation_block: Hash, - common_ancestor: Hash, - chosen_child: Hash, - reindex_root: Hash, - required_allocation: u64, - ) -> Result<()> { - let mut slack_sum = 0u64; - let mut path_len = 0u64; - let mut path_slack_alloc = 0u64; - - let mut current = chosen_child; - // Walk up the chain from common ancestor's chosen child towards reindex root - loop { - if current == reindex_root { - // Reached reindex root. In this case, since we reached (the unlimited) root, - // we also re-allocate new slack for the chain we just traversed - let offset = required_allocation - .checked_add(self.slack.checked_mul(path_len).unwrap()) - .unwrap() - .checked_sub(slack_sum) - .unwrap(); - self.apply_interval_op_and_propagate(current, offset, Interval::increase_start)?; - self.offset_siblings_before(allocation_block, current, offset)?; - - // Set the slack for each chain block to be reserved below during the chain walk-down - path_slack_alloc = self.slack; - break; - } - - let slack_before_current = self.store.interval_remaining_before(current)?.size(); - slack_sum = slack_sum.checked_add(slack_before_current).unwrap(); - - if slack_sum >= required_allocation { - // Set offset to be just enough to satisfy required allocation - let offset = slack_before_current - .checked_sub(slack_sum.checked_sub(required_allocation).unwrap()) - .unwrap(); - self.apply_interval_op(current, offset, Interval::increase_start)?; - self.offset_siblings_before(allocation_block, current, offset)?; - - break; - } - - current = get_next_chain_ancestor_unchecked(self.store, reindex_root, current)?; - path_len = path_len.checked_add(1).unwrap(); - } - - // Go back down the reachability tree towards the common ancestor. - // On every hop we reindex the reachability subtree before the - // current block with an interval that is smaller. - // This is to make room for the required allocation. - loop { - current = self.store.get_parent(current)?; - if current == common_ancestor { - break; - } - - let slack_before_current = self.store.interval_remaining_before(current)?.size(); - let offset = slack_before_current.checked_sub(path_slack_alloc).unwrap(); - self.apply_interval_op(current, offset, Interval::increase_start)?; - self.offset_siblings_before(allocation_block, current, offset)?; - } - - Ok(()) - } - - fn reclaim_interval_after( - &mut self, - allocation_block: Hash, - common_ancestor: Hash, - chosen_child: Hash, - reindex_root: Hash, - required_allocation: u64, - ) -> Result<()> { - let mut slack_sum = 0u64; - let mut path_len = 0u64; - let mut path_slack_alloc = 0u64; - - let mut current = chosen_child; - // Walk up the chain from common ancestor's chosen child towards reindex root - loop { - if current == reindex_root { - // Reached reindex root. In this case, since we reached (the unlimited) root, - // we also re-allocate new slack for the chain we just traversed - let offset = required_allocation - .checked_add(self.slack.checked_mul(path_len).unwrap()) - .unwrap() - .checked_sub(slack_sum) - .unwrap(); - self.apply_interval_op_and_propagate(current, offset, Interval::decrease_end)?; - self.offset_siblings_after(allocation_block, current, offset)?; - - // Set the slack for each chain block to be reserved below during the chain walk-down - path_slack_alloc = self.slack; - break; - } - - let slack_after_current = self.store.interval_remaining_after(current)?.size(); - slack_sum = slack_sum.checked_add(slack_after_current).unwrap(); - - if slack_sum >= required_allocation { - // Set offset to be just enough to satisfy required allocation - let offset = slack_after_current - .checked_sub(slack_sum.checked_sub(required_allocation).unwrap()) - .unwrap(); - self.apply_interval_op(current, offset, Interval::decrease_end)?; - self.offset_siblings_after(allocation_block, current, offset)?; - - break; - } - - current = get_next_chain_ancestor_unchecked(self.store, reindex_root, current)?; - path_len = path_len.checked_add(1).unwrap(); - } - - // Go back down the reachability tree towards the common ancestor. - // On every hop we reindex the reachability subtree before the - // current block with an interval that is smaller. - // This is to make room for the required allocation. - loop { - current = self.store.get_parent(current)?; - if current == common_ancestor { - break; - } - - let slack_after_current = self.store.interval_remaining_after(current)?.size(); - let offset = slack_after_current.checked_sub(path_slack_alloc).unwrap(); - self.apply_interval_op(current, offset, Interval::decrease_end)?; - self.offset_siblings_after(allocation_block, current, offset)?; - } - - Ok(()) - } - - fn offset_siblings_before( - &mut self, - allocation_block: Hash, - current: Hash, - offset: u64, - ) -> Result<()> { - let parent = self.store.get_parent(current)?; - let children = self.store.get_children(parent)?; - - let (siblings_before, _) = split_children(&children, current)?; - for sibling in siblings_before.iter().cloned().rev() { - if sibling == allocation_block { - // We reached our final destination, allocate `offset` to `allocation_block` by increasing end and break - self.apply_interval_op_and_propagate( - allocation_block, - offset, - Interval::increase_end, - )?; - break; - } - // For non-`allocation_block` siblings offset the interval upwards in order to create space - self.apply_interval_op_and_propagate(sibling, offset, Interval::increase)?; - } - - Ok(()) - } - - fn offset_siblings_after( - &mut self, - allocation_block: Hash, - current: Hash, - offset: u64, - ) -> Result<()> { - let parent = self.store.get_parent(current)?; - let children = self.store.get_children(parent)?; - - let (_, siblings_after) = split_children(&children, current)?; - for sibling in siblings_after.iter().cloned() { - if sibling == allocation_block { - // We reached our final destination, allocate `offset` to `allocation_block` by decreasing only start and break - self.apply_interval_op_and_propagate( - allocation_block, - offset, - Interval::decrease_start, - )?; - break; - } - // For siblings before `allocation_block` offset the interval downwards to create space - self.apply_interval_op_and_propagate(sibling, offset, Interval::decrease)?; - } - - Ok(()) - } - - fn apply_interval_op( - &mut self, - block: Hash, - offset: u64, - op: fn(&Interval, u64) -> Interval, - ) -> Result<()> { - self.store - .set_interval(block, op(&self.store.get_interval(block)?, offset))?; - Ok(()) - } - - fn apply_interval_op_and_propagate( - &mut self, - block: Hash, - offset: u64, - op: fn(&Interval, u64) -> Interval, - ) -> Result<()> { - self.store - .set_interval(block, op(&self.store.get_interval(block)?, offset))?; - self.propagate_interval(block)?; - Ok(()) - } - - /// A method for handling reindex operations triggered by moving the reindex root - pub(super) fn concentrate_interval( - &mut self, - parent: Hash, - child: Hash, - is_final_reindex_root: bool, - ) -> Result<()> { - let children = self.store.get_children(parent)?; - - // Split the `children` of `parent` to siblings before `child` and siblings after `child` - let (siblings_before, siblings_after) = split_children(&children, child)?; - - let siblings_before_subtrees_sum: u64 = - self.tighten_intervals_before(parent, siblings_before)?; - let siblings_after_subtrees_sum: u64 = - self.tighten_intervals_after(parent, siblings_after)?; - - self.expand_interval_to_chosen( - parent, - child, - siblings_before_subtrees_sum, - siblings_after_subtrees_sum, - is_final_reindex_root, - )?; - - Ok(()) - } - - pub(super) fn tighten_intervals_before( - &mut self, - parent: Hash, - children_before: &[Hash], - ) -> Result { - let sizes = children_before - .iter() - .cloned() - .map(|block| { - self.count_subtrees(block)?; - Ok(self.subtree_sizes[&block]) - }) - .collect::>>()?; - let sum = sizes.iter().sum(); - - let interval = self.store.get_interval(parent)?; - let interval_before = Interval::new( - interval.start.checked_add(self.slack).unwrap(), - interval - .start - .checked_add(self.slack) - .unwrap() - .checked_add(sum) - .unwrap() - .checked_sub(1) - .unwrap(), - ); - - for (c, ci) in children_before - .iter() - .cloned() - .zip(interval_before.split_exact(sizes.as_slice())) - { - self.store.set_interval(c, ci)?; - self.propagate_interval(c)?; - } - - Ok(sum) - } - - pub(super) fn tighten_intervals_after( - &mut self, - parent: Hash, - children_after: &[Hash], - ) -> Result { - let sizes = children_after - .iter() - .cloned() - .map(|block| { - self.count_subtrees(block)?; - Ok(self.subtree_sizes[&block]) - }) - .collect::>>()?; - let sum = sizes.iter().sum(); - - let interval = self.store.get_interval(parent)?; - let interval_after = Interval::new( - interval - .end - .checked_sub(self.slack) - .unwrap() - .checked_sub(sum) - .unwrap(), - interval - .end - .checked_sub(self.slack) - .unwrap() - .checked_sub(1) - .unwrap(), - ); - - for (c, ci) in children_after - .iter() - .cloned() - .zip(interval_after.split_exact(sizes.as_slice())) - { - self.store.set_interval(c, ci)?; - self.propagate_interval(c)?; - } - - Ok(sum) - } - - pub(super) fn expand_interval_to_chosen( - &mut self, - parent: Hash, - child: Hash, - siblings_before_subtrees_sum: u64, - siblings_after_subtrees_sum: u64, - is_final_reindex_root: bool, - ) -> Result<()> { - let interval = self.store.get_interval(parent)?; - let allocation = Interval::new( - interval - .start - .checked_add(siblings_before_subtrees_sum) - .unwrap() - .checked_add(self.slack) - .unwrap(), - interval - .end - .checked_sub(siblings_after_subtrees_sum) - .unwrap() - .checked_sub(self.slack) - .unwrap() - .checked_sub(1) - .unwrap(), - ); - let current = self.store.get_interval(child)?; - - // Propagate interval only if the chosen `child` is the final reindex root AND - // the new interval doesn't contain the previous one - if is_final_reindex_root && !allocation.contains(current) { - /* - We deallocate slack on both sides as an optimization. Were we to - assign the fully allocated interval, the next time the reindex root moves we - would need to propagate intervals again. However when we do allocate slack, - next time this method is called (next time the reindex root moves), `allocation` is likely to contain `current`. - Note that below following the propagation we reassign the full `allocation` to `child`. - */ - let narrowed = Interval::new( - allocation.start.checked_add(self.slack).unwrap(), - allocation.end.checked_sub(self.slack).unwrap(), - ); - self.store.set_interval(child, narrowed)?; - self.propagate_interval(child)?; - } - - self.store.set_interval(child, allocation)?; - Ok(()) - } -} - -/// Splits `children` into two slices: the blocks that are before `pivot` and the blocks that are after. -fn split_children(children: &std::sync::Arc>, pivot: Hash) -> Result<(&[Hash], &[Hash])> { - if let Some(index) = children.iter().cloned().position(|c| c == pivot) { - Ok(( - &children[..index], - &children[index.checked_add(1).unwrap()..], - )) - } else { - Err(ReachabilityError::DataInconsistency) - } -} - -#[cfg(test)] -mod tests { - use super::{super::tests::*, *}; - use crate::consensusdb::schemadb::{MemoryReachabilityStore, ReachabilityStoreReader}; - use starcoin_types::blockhash; - - #[test] - fn test_count_subtrees() { - let mut store = MemoryReachabilityStore::new(); - - // Arrange - let root: Hash = 1.into(); - StoreBuilder::new(&mut store) - .add_block(root, Hash::new(blockhash::NONE)) - .add_block(2.into(), root) - .add_block(3.into(), 2.into()) - .add_block(4.into(), 2.into()) - .add_block(5.into(), 3.into()) - .add_block(6.into(), 5.into()) - .add_block(7.into(), 1.into()) - .add_block(8.into(), 6.into()); - - // Act - let mut ctx = ReindexOperationContext::new(&mut store, 10, 16); - ctx.count_subtrees(root).unwrap(); - - // Assert - let expected = [ - (1u64, 8u64), - (2, 6), - (3, 4), - (4, 1), - (5, 3), - (6, 2), - (7, 1), - (8, 1), - ] - .iter() - .cloned() - .map(|(h, c)| (Hash::from(h), c)) - .collect::>(); - - assert_eq!(expected, ctx.subtree_sizes); - - // Act - ctx.store.set_interval(root, Interval::new(1, 8)).unwrap(); - ctx.propagate_interval(root).unwrap(); - - // Assert intervals manually - let expected_intervals = [ - (1u64, (1u64, 8u64)), - (2, (1, 6)), - (3, (1, 4)), - (4, (5, 5)), - (5, (1, 3)), - (6, (1, 2)), - (7, (7, 7)), - (8, (1, 1)), - ]; - let actual_intervals = (1u64..=8) - .map(|i| (i, ctx.store.get_interval(i.into()).unwrap().into())) - .collect::>(); - assert_eq!(actual_intervals, expected_intervals); - - // Assert intervals follow the general rules - store.validate_intervals(root).unwrap(); - } -} diff --git a/consensus/dag/src/reachability/relations_service.rs b/consensus/dag/src/reachability/relations_service.rs deleted file mode 100644 index 755cfb49be..0000000000 --- a/consensus/dag/src/reachability/relations_service.rs +++ /dev/null @@ -1,34 +0,0 @@ -use crate::consensusdb::{prelude::StoreError, schemadb::RelationsStoreReader}; -use parking_lot::RwLock; -use starcoin_crypto::HashValue as Hash; -use starcoin_types::blockhash::BlockHashes; -use std::sync::Arc; -/// Multi-threaded block-relations service imp -#[derive(Clone)] -pub struct MTRelationsService { - store: Arc>>, - level: usize, -} - -impl MTRelationsService { - pub fn new(store: Arc>>, level: u8) -> Self { - Self { - store, - level: level as usize, - } - } -} - -impl RelationsStoreReader for MTRelationsService { - fn get_parents(&self, hash: Hash) -> Result { - self.store.read()[self.level].get_parents(hash) - } - - fn get_children(&self, hash: Hash) -> Result { - self.store.read()[self.level].get_children(hash) - } - - fn has(&self, hash: Hash) -> Result { - self.store.read()[self.level].has(hash) - } -} diff --git a/consensus/dag/src/reachability/tests.rs b/consensus/dag/src/reachability/tests.rs deleted file mode 100644 index d580f0e4c9..0000000000 --- a/consensus/dag/src/reachability/tests.rs +++ /dev/null @@ -1,265 +0,0 @@ -//! -//! Test utils for reachability -//! -use super::{inquirer::*, tree::*}; -use crate::consensusdb::{ - prelude::StoreError, - schemadb::{ReachabilityStore, ReachabilityStoreReader}, -}; -use crate::types::interval::Interval; -use crate::types::perf; -use starcoin_crypto::HashValue as Hash; -use starcoin_types::blockhash::{BlockHashExtensions, BlockHashMap, BlockHashSet}; -use std::collections::VecDeque; -use thiserror::Error; - -/// A struct with fluent API to streamline reachability store building -pub struct StoreBuilder<'a, T: ReachabilityStore + ?Sized> { - store: &'a mut T, -} - -impl<'a, T: ReachabilityStore + ?Sized> StoreBuilder<'a, T> { - pub fn new(store: &'a mut T) -> Self { - Self { store } - } - - pub fn add_block(&mut self, hash: Hash, parent: Hash) -> &mut Self { - let parent_height = if !parent.is_none() { - self.store.append_child(parent, hash).unwrap() - } else { - 0 - }; - self.store - .insert(hash, parent, Interval::empty(), parent_height + 1) - .unwrap(); - self - } -} - -/// A struct with fluent API to streamline tree building -pub struct TreeBuilder<'a, T: ReachabilityStore + ?Sized> { - store: &'a mut T, - reindex_depth: u64, - reindex_slack: u64, -} - -impl<'a, T: ReachabilityStore + ?Sized> TreeBuilder<'a, T> { - pub fn new(store: &'a mut T) -> Self { - Self { - store, - reindex_depth: perf::DEFAULT_REINDEX_DEPTH, - reindex_slack: perf::DEFAULT_REINDEX_SLACK, - } - } - - pub fn new_with_params(store: &'a mut T, reindex_depth: u64, reindex_slack: u64) -> Self { - Self { - store, - reindex_depth, - reindex_slack, - } - } - - pub fn init(&mut self, origin: Hash) -> &mut Self { - init(self.store, origin).unwrap(); - self - } - - pub fn init_with_params(&mut self, origin: Hash, capacity: Interval) -> &mut Self { - init_with_params(self.store, origin, capacity).unwrap(); - self - } - - pub fn add_block(&mut self, hash: Hash, parent: Hash) -> &mut Self { - add_tree_block( - self.store, - hash, - parent, - self.reindex_depth, - self.reindex_slack, - ) - .unwrap(); - try_advancing_reindex_root(self.store, hash, self.reindex_depth, self.reindex_slack) - .unwrap(); - self - } - - pub fn store(&self) -> &&'a mut T { - &self.store - } -} - -#[derive(Clone)] -pub struct DagBlock { - pub hash: Hash, - pub parents: Vec, -} - -impl DagBlock { - pub fn new(hash: Hash, parents: Vec) -> Self { - Self { hash, parents } - } -} - -/// A struct with fluent API to streamline DAG building -pub struct DagBuilder<'a, T: ReachabilityStore + ?Sized> { - store: &'a mut T, - map: BlockHashMap, -} - -impl<'a, T: ReachabilityStore + ?Sized> DagBuilder<'a, T> { - pub fn new(store: &'a mut T) -> Self { - Self { - store, - map: BlockHashMap::new(), - } - } - - pub fn init(&mut self, origin: Hash) -> &mut Self { - init(self.store, origin).unwrap(); - self - } - - pub fn add_block(&mut self, block: DagBlock) -> &mut Self { - // Select by height (longest chain) just for the sake of internal isolated tests - let selected_parent = block - .parents - .iter() - .cloned() - .max_by_key(|p| self.store.get_height(*p).unwrap()) - .unwrap(); - let mergeset = self.mergeset(&block, selected_parent); - add_block( - self.store, - block.hash, - selected_parent, - &mut mergeset.iter().cloned(), - ) - .unwrap(); - hint_virtual_selected_parent(self.store, block.hash).unwrap(); - self.map.insert(block.hash, block); - self - } - - fn mergeset(&self, block: &DagBlock, selected_parent: Hash) -> Vec { - let mut queue: VecDeque = block - .parents - .iter() - .copied() - .filter(|p| *p != selected_parent) - .collect(); - let mut mergeset: BlockHashSet = queue.iter().copied().collect(); - let mut past = BlockHashSet::new(); - - while let Some(current) = queue.pop_front() { - for parent in self.map[¤t].parents.iter() { - if mergeset.contains(parent) || past.contains(parent) { - continue; - } - - if is_dag_ancestor_of(self.store, *parent, selected_parent).unwrap() { - past.insert(*parent); - continue; - } - - mergeset.insert(*parent); - queue.push_back(*parent); - } - } - mergeset.into_iter().collect() - } - - pub fn store(&self) -> &&'a mut T { - &self.store - } -} - -#[derive(Error, Debug)] -pub enum TestError { - #[error("data store error")] - StoreError(#[from] StoreError), - - #[error("empty interval")] - EmptyInterval(Hash, Interval), - - #[error("sibling intervals are expected to be consecutive")] - NonConsecutiveSiblingIntervals(Interval, Interval), - - #[error("child interval out of parent bounds")] - IntervalOutOfParentBounds { - parent: Hash, - child: Hash, - parent_interval: Interval, - child_interval: Interval, - }, -} - -pub trait StoreValidationExtensions { - /// Checks if `block` is in the past of `other` (creates hashes from the u64 numbers) - fn in_past_of(&self, block: u64, other: u64) -> bool; - - /// Checks if `block` and `other` are in the anticone of each other - /// (creates hashes from the u64 numbers) - fn are_anticone(&self, block: u64, other: u64) -> bool; - - /// Validates that all tree intervals match the expected interval relations - fn validate_intervals(&self, root: Hash) -> std::result::Result<(), TestError>; -} - -impl StoreValidationExtensions for T { - fn in_past_of(&self, block: u64, other: u64) -> bool { - if block == other { - return false; - } - let res = is_dag_ancestor_of(self, block.into(), other.into()).unwrap(); - if res { - // Assert that the `future` relation is indeed asymmetric - assert!(!is_dag_ancestor_of(self, other.into(), block.into()).unwrap()) - } - res - } - - fn are_anticone(&self, block: u64, other: u64) -> bool { - !is_dag_ancestor_of(self, block.into(), other.into()).unwrap() - && !is_dag_ancestor_of(self, other.into(), block.into()).unwrap() - } - - fn validate_intervals(&self, root: Hash) -> std::result::Result<(), TestError> { - let mut queue = VecDeque::::from([root]); - while let Some(parent) = queue.pop_front() { - let children = self.get_children(parent)?; - queue.extend(children.iter()); - - let parent_interval = self.get_interval(parent)?; - if parent_interval.is_empty() { - return Err(TestError::EmptyInterval(parent, parent_interval)); - } - - // Verify parent-child strict relation - for child in children.iter().cloned() { - let child_interval = self.get_interval(child)?; - if !parent_interval.strictly_contains(child_interval) { - return Err(TestError::IntervalOutOfParentBounds { - parent, - child, - parent_interval, - child_interval, - }); - } - } - - // Iterate over consecutive siblings - for siblings in children.windows(2) { - let sibling_interval = self.get_interval(siblings[0])?; - let current_interval = self.get_interval(siblings[1])?; - if sibling_interval.end + 1 != current_interval.start { - return Err(TestError::NonConsecutiveSiblingIntervals( - sibling_interval, - current_interval, - )); - } - } - } - Ok(()) - } -} diff --git a/consensus/dag/src/reachability/tree.rs b/consensus/dag/src/reachability/tree.rs deleted file mode 100644 index a0d98a9b23..0000000000 --- a/consensus/dag/src/reachability/tree.rs +++ /dev/null @@ -1,161 +0,0 @@ -//! -//! Tree-related functions internal to the module -//! -use super::{ - extensions::ReachabilityStoreIntervalExtensions, inquirer::*, reindex::ReindexOperationContext, - *, -}; -use crate::consensusdb::schemadb::ReachabilityStore; -use starcoin_crypto::HashValue as Hash; - -/// Adds `new_block` as a child of `parent` in the tree structure. If this block -/// has no remaining interval to allocate, a reindexing is triggered. When a reindexing -/// is triggered, the reindex root point is used within the reindex algorithm's logic -pub fn add_tree_block( - store: &mut (impl ReachabilityStore + ?Sized), - new_block: Hash, - parent: Hash, - reindex_depth: u64, - reindex_slack: u64, -) -> Result<()> { - // Get the remaining interval capacity - let remaining = store.interval_remaining_after(parent)?; - // Append the new child to `parent.children` - let parent_height = store.append_child(parent, new_block)?; - if remaining.is_empty() { - // Init with the empty interval. - // Note: internal logic relies on interval being this specific interval - // which comes exactly at the end of current capacity - store.insert( - new_block, - parent, - remaining, - parent_height.checked_add(1).unwrap(), - )?; - - // Start a reindex operation (TODO: add timing) - let reindex_root = store.get_reindex_root()?; - let mut ctx = ReindexOperationContext::new(store, reindex_depth, reindex_slack); - ctx.reindex_intervals(new_block, reindex_root)?; - } else { - let allocated = remaining.split_half().0; - store.insert( - new_block, - parent, - allocated, - parent_height.checked_add(1).unwrap(), - )?; - }; - Ok(()) -} - -/// Finds the most recent tree ancestor common to both `block` and the given `reindex root`. -/// Note that we assume that almost always the chain between the reindex root and the common -/// ancestor is longer than the chain between block and the common ancestor, hence we iterate -/// from `block`. -pub fn find_common_tree_ancestor( - store: &(impl ReachabilityStore + ?Sized), - block: Hash, - reindex_root: Hash, -) -> Result { - let mut current = block; - loop { - if is_chain_ancestor_of(store, current, reindex_root)? { - return Ok(current); - } - current = store.get_parent(current)?; - } -} - -/// Finds a possible new reindex root, based on the `current` reindex root and the selected tip `hint` -pub fn find_next_reindex_root( - store: &(impl ReachabilityStore + ?Sized), - current: Hash, - hint: Hash, - reindex_depth: u64, - reindex_slack: u64, -) -> Result<(Hash, Hash)> { - let mut ancestor = current; - let mut next = current; - - let hint_height = store.get_height(hint)?; - - // Test if current root is ancestor of selected tip (`hint`) - if not, this is a reorg case - if !is_chain_ancestor_of(store, current, hint)? { - let current_height = store.get_height(current)?; - - // We have reindex root out of (hint) selected tip chain, however we switch chains only after a sufficient - // threshold of `reindex_slack` diff in order to address possible alternating reorg attacks. - // The `reindex_slack` constant is used as an heuristic large enough on the one hand, but - // one which will not harm performance on the other hand - given the available slack at the chain split point. - // - // Note: In some cases the height of the (hint) selected tip can be lower than the current reindex root height. - // If that's the case we keep the reindex root unchanged. - if hint_height < current_height - || hint_height.checked_sub(current_height).unwrap() < reindex_slack - { - return Ok((current, current)); - } - - let common = find_common_tree_ancestor(store, hint, current)?; - ancestor = common; - next = common; - } - - // Iterate from ancestor towards the selected tip (`hint`) until passing the - // `reindex_window` threshold, for finding the new reindex root - loop { - let child = get_next_chain_ancestor_unchecked(store, hint, next)?; - let child_height = store.get_height(child)?; - - if hint_height < child_height { - return Err(ReachabilityError::DataInconsistency); - } - if hint_height.checked_sub(child_height).unwrap() < reindex_depth { - break; - } - next = child; - } - - Ok((ancestor, next)) -} - -/// Attempts to advance or move the current reindex root according to the -/// provided `virtual selected parent` (`VSP`) hint. -/// It is important for the reindex root point to follow the consensus-agreed chain -/// since this way it can benefit from chain-robustness which is implied by the security -/// of the ordering protocol. That is, it enjoys from the fact that all future blocks are -/// expected to elect the root subtree (by converging to the agreement to have it on the -/// selected chain). See also the reachability algorithms overview (TODO) -pub fn try_advancing_reindex_root( - store: &mut (impl ReachabilityStore + ?Sized), - hint: Hash, - reindex_depth: u64, - reindex_slack: u64, -) -> Result<()> { - // Get current root from the store - let current = store.get_reindex_root()?; - - // Find the possible new root - let (mut ancestor, next) = - find_next_reindex_root(store, current, hint, reindex_depth, reindex_slack)?; - - // No update to root, return - if current == next { - return Ok(()); - } - - // if ancestor == next { - // trace!("next reindex root is an ancestor of current one, skipping concentration.") - // } - while ancestor != next { - let child = get_next_chain_ancestor_unchecked(store, next, ancestor)?; - let mut ctx = ReindexOperationContext::new(store, reindex_depth, reindex_slack); - ctx.concentrate_interval(ancestor, child, child == next)?; - ancestor = child; - } - - // Update reindex root in the data store - store.set_reindex_root(next)?; - Ok(()) -} diff --git a/consensus/dag/src/types/ghostdata.rs b/consensus/dag/src/types/ghostdata.rs deleted file mode 100644 index c680172148..0000000000 --- a/consensus/dag/src/types/ghostdata.rs +++ /dev/null @@ -1,147 +0,0 @@ -use super::trusted::ExternalGhostdagData; -use serde::{Deserialize, Serialize}; -use starcoin_crypto::HashValue as Hash; -use starcoin_types::blockhash::{BlockHashMap, BlockHashes, BlueWorkType, HashKTypeMap, KType}; -use std::sync::Arc; - -#[derive(Clone, Serialize, Deserialize, Default, Debug)] -pub struct GhostdagData { - pub blue_score: u64, - pub blue_work: BlueWorkType, - pub selected_parent: Hash, - pub mergeset_blues: BlockHashes, - pub mergeset_reds: BlockHashes, - pub blues_anticone_sizes: HashKTypeMap, -} - -#[derive(Clone, Debug, Default, Serialize, Deserialize, Copy)] -pub struct CompactGhostdagData { - pub blue_score: u64, - pub blue_work: BlueWorkType, - pub selected_parent: Hash, -} - -impl From for GhostdagData { - fn from(value: ExternalGhostdagData) -> Self { - Self { - blue_score: value.blue_score, - blue_work: value.blue_work, - selected_parent: value.selected_parent, - mergeset_blues: Arc::new(value.mergeset_blues), - mergeset_reds: Arc::new(value.mergeset_reds), - blues_anticone_sizes: Arc::new(value.blues_anticone_sizes), - } - } -} - -impl From<&GhostdagData> for ExternalGhostdagData { - fn from(value: &GhostdagData) -> Self { - Self { - blue_score: value.blue_score, - blue_work: value.blue_work, - selected_parent: value.selected_parent, - mergeset_blues: (*value.mergeset_blues).clone(), - mergeset_reds: (*value.mergeset_reds).clone(), - blues_anticone_sizes: (*value.blues_anticone_sizes).clone(), - } - } -} - -impl GhostdagData { - pub fn new( - blue_score: u64, - blue_work: BlueWorkType, - selected_parent: Hash, - mergeset_blues: BlockHashes, - mergeset_reds: BlockHashes, - blues_anticone_sizes: HashKTypeMap, - ) -> Self { - Self { - blue_score, - blue_work, - selected_parent, - mergeset_blues, - mergeset_reds, - blues_anticone_sizes, - } - } - - pub fn new_with_selected_parent(selected_parent: Hash, k: KType) -> Self { - let mut mergeset_blues: Vec = Vec::with_capacity(k.checked_add(1).unwrap() as usize); - let mut blues_anticone_sizes: BlockHashMap = BlockHashMap::with_capacity(k as usize); - mergeset_blues.push(selected_parent); - blues_anticone_sizes.insert(selected_parent, 0); - - Self { - blue_score: Default::default(), - blue_work: Default::default(), - selected_parent, - mergeset_blues: BlockHashes::new(mergeset_blues), - mergeset_reds: Default::default(), - blues_anticone_sizes: HashKTypeMap::new(blues_anticone_sizes), - } - } - - pub fn mergeset_size(&self) -> usize { - self.mergeset_blues - .len() - .checked_add(self.mergeset_reds.len()) - .unwrap() - } - - /// Returns an iterator to the mergeset with no specified order (excluding the selected parent) - pub fn unordered_mergeset_without_selected_parent(&self) -> impl Iterator + '_ { - self.mergeset_blues - .iter() - .skip(1) // Skip the selected parent - .cloned() - .chain(self.mergeset_reds.iter().cloned()) - } - - /// Returns an iterator to the mergeset with no specified order (including the selected parent) - pub fn unordered_mergeset(&self) -> impl Iterator + '_ { - self.mergeset_blues - .iter() - .cloned() - .chain(self.mergeset_reds.iter().cloned()) - } - - pub fn to_compact(&self) -> CompactGhostdagData { - CompactGhostdagData { - blue_score: self.blue_score, - blue_work: self.blue_work, - selected_parent: self.selected_parent, - } - } - - pub fn add_blue( - &mut self, - block: Hash, - blue_anticone_size: KType, - block_blues_anticone_sizes: &BlockHashMap, - ) { - // Add the new blue block to mergeset blues - BlockHashes::make_mut(&mut self.mergeset_blues).push(block); - - // Get a mut ref to internal anticone size map - let blues_anticone_sizes = HashKTypeMap::make_mut(&mut self.blues_anticone_sizes); - - // Insert the new blue block with its blue anticone size to the map - blues_anticone_sizes.insert(block, blue_anticone_size); - - // Insert/update map entries for blocks affected by this insertion - for (blue, size) in block_blues_anticone_sizes { - blues_anticone_sizes.insert(*blue, size.checked_add(1).unwrap()); - } - } - - pub fn add_red(&mut self, block: Hash) { - // Add the new red block to mergeset reds - BlockHashes::make_mut(&mut self.mergeset_reds).push(block); - } - - pub fn finalize_score_and_work(&mut self, blue_score: u64, blue_work: BlueWorkType) { - self.blue_score = blue_score; - self.blue_work = blue_work; - } -} diff --git a/consensus/dag/src/types/interval.rs b/consensus/dag/src/types/interval.rs deleted file mode 100644 index 0b5cc4f6e5..0000000000 --- a/consensus/dag/src/types/interval.rs +++ /dev/null @@ -1,377 +0,0 @@ -use serde::{Deserialize, Serialize}; -use std::fmt::{Display, Formatter}; - -#[derive(Debug, Default, PartialEq, Eq, Clone, Copy, Serialize, Deserialize)] -pub struct Interval { - pub start: u64, - pub end: u64, -} - -impl Display for Interval { - fn fmt(&self, f: &mut Formatter) -> std::fmt::Result { - write!(f, "[{}, {}]", self.start, self.end) - } -} - -impl From for (u64, u64) { - fn from(val: Interval) -> Self { - (val.start, val.end) - } -} - -impl Interval { - pub fn new(start: u64, end: u64) -> Self { - debug_assert!(start > 0 && end < u64::MAX && end >= start.checked_sub(1).unwrap()); // TODO: make sure this is actually debug-only - Interval { start, end } - } - - pub fn empty() -> Self { - Self::new(1, 0) - } - - /// Returns the maximally allowed `u64` interval. We leave a margin of 1 from - /// both `u64` bounds (`0` and `u64::MAX`) in order to support the reduction of any - /// legal interval to an empty one by setting `end = start - 1` or `start = end + 1` - pub fn maximal() -> Self { - Self::new(1, u64::MAX.saturating_sub(1)) - } - - pub fn size(&self) -> u64 { - // Empty intervals are indicated by `self.end == self.start - 1`, so - // we avoid the overflow by first adding 1 - // Note: this function will panic if `self.end < self.start - 1` due to overflow - (self.end.checked_add(1).unwrap()) - .checked_sub(self.start) - .unwrap() - } - - pub fn is_empty(&self) -> bool { - self.size() == 0 - } - - pub fn increase(&self, offset: u64) -> Self { - Self::new( - self.start.checked_add(offset).unwrap(), - self.end.checked_add(offset).unwrap(), - ) - } - - pub fn decrease(&self, offset: u64) -> Self { - Self::new( - self.start.checked_sub(offset).unwrap(), - self.end.checked_sub(offset).unwrap(), - ) - } - - pub fn increase_start(&self, offset: u64) -> Self { - Self::new(self.start.checked_add(offset).unwrap(), self.end) - } - - pub fn decrease_start(&self, offset: u64) -> Self { - Self::new(self.start.checked_sub(offset).unwrap(), self.end) - } - - pub fn increase_end(&self, offset: u64) -> Self { - Self::new(self.start, self.end.checked_add(offset).unwrap()) - } - - pub fn decrease_end(&self, offset: u64) -> Self { - Self::new(self.start, self.end.checked_sub(offset).unwrap()) - } - - pub fn split_half(&self) -> (Self, Self) { - self.split_fraction(0.5) - } - - /// Splits this interval to two parts such that their - /// union is equal to the original interval and the first (left) part - /// contains the given fraction of the original interval's size. - /// Note: if the split results in fractional parts, this method rounds - /// the first part up and the last part down. - fn split_fraction(&self, fraction: f32) -> (Self, Self) { - let left_size = f32::ceil(self.size() as f32 * fraction) as u64; - - ( - Self::new( - self.start, - self.start - .checked_add(left_size) - .unwrap() - .checked_sub(1) - .unwrap(), - ), - Self::new(self.start.checked_add(left_size).unwrap(), self.end), - ) - } - - /// Splits this interval to exactly |sizes| parts where - /// |part_i| = sizes[i]. This method expects sum(sizes) to be exactly - /// equal to the interval's size. - pub fn split_exact(&self, sizes: &[u64]) -> Vec { - assert_eq!( - sizes.iter().sum::(), - self.size(), - "sum of sizes must be equal to the interval's size" - ); - let mut start = self.start; - sizes - .iter() - .map(|size| { - let interval = Self::new( - start, - start.checked_add(*size).unwrap().checked_sub(1).unwrap(), - ); - start = start.checked_add(*size).unwrap(); - interval - }) - .collect() - } - - /// Splits this interval to |sizes| parts - /// by the allocation rule described below. This method expects sum(sizes) - /// to be smaller or equal to the interval's size. Every part_i is - /// allocated at least sizes[i] capacity. The remaining budget is - /// split by an exponentially biased rule described below. - /// - /// This rule follows the GHOSTDAG protocol behavior where the child - /// with the largest subtree is expected to dominate the competition - /// for new blocks and thus grow the most. However, we may need to - /// add slack for non-largest subtrees in order to make CPU reindexing - /// attacks unworthy. - pub fn split_exponential(&self, sizes: &[u64]) -> Vec { - let interval_size = self.size(); - let sizes_sum = sizes.iter().sum::(); - assert!( - interval_size >= sizes_sum, - "interval's size must be greater than or equal to sum of sizes" - ); - assert!(sizes_sum > 0, "cannot split to 0 parts"); - if interval_size == sizes_sum { - return self.split_exact(sizes); - } - - // - // Add a fractional bias to every size in the provided sizes - // - - let mut remaining_bias = interval_size.checked_sub(sizes_sum).unwrap(); - let total_bias = remaining_bias as f64; - - let mut biased_sizes = Vec::::with_capacity(sizes.len()); - let exp_fractions = exponential_fractions(sizes); - for (i, fraction) in exp_fractions.iter().enumerate() { - let bias: u64 = if i == exp_fractions.len().checked_sub(1).unwrap() { - remaining_bias - } else { - remaining_bias.min(f64::round(total_bias * fraction) as u64) - }; - biased_sizes.push(sizes[i].checked_add(bias).unwrap()); - remaining_bias = remaining_bias.checked_sub(bias).unwrap(); - } - - self.split_exact(biased_sizes.as_slice()) - } - - pub fn contains(&self, other: Self) -> bool { - self.start <= other.start && other.end <= self.end - } - - pub fn strictly_contains(&self, other: Self) -> bool { - self.start <= other.start && other.end < self.end - } -} - -/// Returns a fraction for each size in sizes -/// as follows: -/// fraction[i] = 2^size[i] / sum_j(2^size[j]) -/// In the code below the above equation is divided by 2^max(size) -/// to avoid exploding numbers. Note that in 1 / 2^(max(size)-size[i]) -/// we divide 1 by potentially a very large number, which will -/// result in loss of float precision. This is not a problem - all -/// numbers close to 0 bear effectively the same weight. -fn exponential_fractions(sizes: &[u64]) -> Vec { - let max_size = sizes.iter().copied().max().unwrap_or_default(); - - let mut fractions = sizes - .iter() - .map(|s| 1f64 / 2f64.powf((max_size - s) as f64)) - .collect::>(); - - let fractions_sum = fractions.iter().sum::(); - for item in &mut fractions { - *item /= fractions_sum; - } - - fractions -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_interval_basics() { - let interval = Interval::new(101, 164); - let increased = interval.increase(10); - let decreased = increased.decrease(5); - // println!("{}", interval.clone()); - - assert_eq!(interval.start + 10, increased.start); - assert_eq!(interval.end + 10, increased.end); - - assert_eq!(interval.start + 5, decreased.start); - assert_eq!(interval.end + 5, decreased.end); - - assert_eq!(interval.size(), 64); - assert_eq!(Interval::maximal().size(), u64::MAX - 1); - assert_eq!(Interval::empty().size(), 0); - - let (empty_left, empty_right) = Interval::empty().split_half(); - assert_eq!(empty_left.size(), 0); - assert_eq!(empty_right.size(), 0); - - assert_eq!(interval.start + 10, interval.increase_start(10).start); - assert_eq!(interval.start - 10, interval.decrease_start(10).start); - assert_eq!(interval.end + 10, interval.increase_end(10).end); - assert_eq!(interval.end - 10, interval.decrease_end(10).end); - - assert_eq!(interval.end, interval.increase_start(10).end); - assert_eq!(interval.end, interval.decrease_start(10).end); - assert_eq!(interval.start, interval.increase_end(10).start); - assert_eq!(interval.start, interval.decrease_end(10).start); - - // println!("{:?}", Interval::maximal()); - // println!("{:?}", Interval::maximal().split_half()); - } - - #[test] - fn test_split_exact() { - let sizes = vec![5u64, 10, 15, 20]; - let intervals = Interval::new(1, 50).split_exact(sizes.as_slice()); - assert_eq!(intervals.len(), sizes.len()); - for i in 0..sizes.len() { - assert_eq!(intervals[i].size(), sizes[i]) - } - } - - #[test] - fn test_exponential_fractions() { - let mut exp_fractions = exponential_fractions(vec![2, 4, 8, 16].as_slice()); - // println!("{:?}", exp_fractions); - for i in 0..exp_fractions.len() - 1 { - assert!(exp_fractions[i + 1] > exp_fractions[i]); - } - - exp_fractions = exponential_fractions(vec![].as_slice()); - assert_eq!(exp_fractions.len(), 0); - - exp_fractions = exponential_fractions(vec![0, 0].as_slice()); - assert_eq!(exp_fractions.len(), 2); - assert_eq!(0.5f64, exp_fractions[0]); - assert_eq!(exp_fractions[0], exp_fractions[1]); - } - - #[test] - fn test_contains() { - assert!(Interval::new(1, 100).contains(Interval::new(1, 100))); - assert!(Interval::new(1, 100).contains(Interval::new(1, 99))); - assert!(Interval::new(1, 100).contains(Interval::new(2, 100))); - assert!(Interval::new(1, 100).contains(Interval::new(2, 99))); - assert!(!Interval::new(1, 100).contains(Interval::new(50, 150))); - assert!(!Interval::new(1, 100).contains(Interval::new(150, 160))); - } - - #[test] - fn test_split_exponential() { - struct Test { - interval: Interval, - sizes: Vec, - expected: Vec, - } - - let tests = [ - Test { - interval: Interval::new(1, 100), - sizes: vec![100u64], - expected: vec![Interval::new(1, 100)], - }, - Test { - interval: Interval::new(1, 100), - sizes: vec![50u64, 50], - expected: vec![Interval::new(1, 50), Interval::new(51, 100)], - }, - Test { - interval: Interval::new(1, 100), - sizes: vec![10u64, 20, 30, 40], - expected: vec![ - Interval::new(1, 10), - Interval::new(11, 30), - Interval::new(31, 60), - Interval::new(61, 100), - ], - }, - Test { - interval: Interval::new(1, 100), - sizes: vec![25u64, 25], - expected: vec![Interval::new(1, 50), Interval::new(51, 100)], - }, - Test { - interval: Interval::new(1, 100), - sizes: vec![1u64, 1], - expected: vec![Interval::new(1, 50), Interval::new(51, 100)], - }, - Test { - interval: Interval::new(1, 100), - sizes: vec![33u64, 33, 33], - expected: vec![ - Interval::new(1, 33), - Interval::new(34, 66), - Interval::new(67, 100), - ], - }, - Test { - interval: Interval::new(1, 100), - sizes: vec![10u64, 15, 25], - expected: vec![ - Interval::new(1, 10), - Interval::new(11, 25), - Interval::new(26, 100), - ], - }, - Test { - interval: Interval::new(1, 100), - sizes: vec![25u64, 15, 10], - expected: vec![ - Interval::new(1, 75), - Interval::new(76, 90), - Interval::new(91, 100), - ], - }, - Test { - interval: Interval::new(1, 10_000), - sizes: vec![10u64, 10, 20], - expected: vec![ - Interval::new(1, 20), - Interval::new(21, 40), - Interval::new(41, 10_000), - ], - }, - Test { - interval: Interval::new(1, 100_000), - sizes: vec![31_000u64, 31_000, 30_001], - expected: vec![ - Interval::new(1, 35_000), - Interval::new(35_001, 69_999), - Interval::new(70_000, 100_000), - ], - }, - ]; - - for test in &tests { - assert_eq!( - test.expected, - test.interval.split_exponential(test.sizes.as_slice()) - ); - } - } -} diff --git a/consensus/dag/src/types/mod.rs b/consensus/dag/src/types/mod.rs deleted file mode 100644 index d3acae1c23..0000000000 --- a/consensus/dag/src/types/mod.rs +++ /dev/null @@ -1,6 +0,0 @@ -pub mod ghostdata; -pub mod interval; -pub mod ordering; -pub mod perf; -pub mod reachability; -pub mod trusted; diff --git a/consensus/dag/src/types/ordering.rs b/consensus/dag/src/types/ordering.rs deleted file mode 100644 index a1ed8c2561..0000000000 --- a/consensus/dag/src/types/ordering.rs +++ /dev/null @@ -1,36 +0,0 @@ -use serde::{Deserialize, Serialize}; -use starcoin_crypto::HashValue as Hash; -use starcoin_types::blockhash::BlueWorkType; -use std::cmp::Ordering; - -#[derive(Eq, Clone, Debug, Serialize, Deserialize)] -pub struct SortableBlock { - pub hash: Hash, - pub blue_work: BlueWorkType, -} - -impl SortableBlock { - pub fn new(hash: Hash, blue_work: BlueWorkType) -> Self { - Self { hash, blue_work } - } -} - -impl PartialEq for SortableBlock { - fn eq(&self, other: &Self) -> bool { - self.hash == other.hash - } -} - -impl PartialOrd for SortableBlock { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -impl Ord for SortableBlock { - fn cmp(&self, other: &Self) -> Ordering { - self.blue_work - .cmp(&other.blue_work) - .then_with(|| self.hash.cmp(&other.hash)) - } -} diff --git a/consensus/dag/src/types/perf.rs b/consensus/dag/src/types/perf.rs deleted file mode 100644 index 6da44d4cd7..0000000000 --- a/consensus/dag/src/types/perf.rs +++ /dev/null @@ -1,51 +0,0 @@ -//! -//! A module for performance critical constants which depend on consensus parameters. -//! The constants in this module should all be revisited if mainnet consensus parameters change. -//! - -/// The default target depth for reachability reindexes. -pub const DEFAULT_REINDEX_DEPTH: u64 = 100; - -/// The default slack interval used by the reachability -/// algorithm to encounter for blocks out of the selected chain. -pub const DEFAULT_REINDEX_SLACK: u64 = 1 << 12; - -#[derive(Clone, Debug)] -pub struct PerfParams { - // - // Cache sizes - // - /// Preferred cache size for header-related data - pub header_data_cache_size: u64, - - /// Preferred cache size for block-body-related data which - /// is typically orders-of magnitude larger than header data - /// (Note this cannot be set to high due to severe memory consumption) - pub block_data_cache_size: u64, - - /// Preferred cache size for UTXO-related data - pub utxo_set_cache_size: u64, - - /// Preferred cache size for block-window-related data - pub block_window_cache_size: u64, - - // - // Thread-pools - // - /// Defaults to 0 which indicates using system default - /// which is typically the number of logical CPU cores - pub block_processors_num_threads: usize, - - /// Defaults to 0 which indicates using system default - /// which is typically the number of logical CPU cores - pub virtual_processor_num_threads: usize, -} - -pub const PERF_PARAMS: PerfParams = PerfParams { - header_data_cache_size: 10_000, - block_data_cache_size: 200, - utxo_set_cache_size: 10_000, - block_window_cache_size: 2000, - block_processors_num_threads: 0, - virtual_processor_num_threads: 0, -}; diff --git a/consensus/dag/src/types/reachability.rs b/consensus/dag/src/types/reachability.rs deleted file mode 100644 index 35dc3979b6..0000000000 --- a/consensus/dag/src/types/reachability.rs +++ /dev/null @@ -1,26 +0,0 @@ -use super::interval::Interval; -use serde::{Deserialize, Serialize}; -use starcoin_crypto::HashValue as Hash; -use starcoin_types::blockhash::BlockHashes; -use std::sync::Arc; - -#[derive(Clone, Default, Debug, Serialize, Deserialize)] -pub struct ReachabilityData { - pub children: BlockHashes, - pub parent: Hash, - pub interval: Interval, - pub height: u64, - pub future_covering_set: BlockHashes, -} - -impl ReachabilityData { - pub fn new(parent: Hash, interval: Interval, height: u64) -> Self { - Self { - children: Arc::new(vec![]), - parent, - interval, - height, - future_covering_set: Arc::new(vec![]), - } - } -} diff --git a/consensus/dag/src/types/trusted.rs b/consensus/dag/src/types/trusted.rs deleted file mode 100644 index 9a4cf37bbd..0000000000 --- a/consensus/dag/src/types/trusted.rs +++ /dev/null @@ -1,26 +0,0 @@ -use serde::{Deserialize, Serialize}; -use starcoin_crypto::HashValue as Hash; -use starcoin_types::blockhash::{BlockHashMap, BlueWorkType, KType}; - -/// Represents semi-trusted externally provided Ghostdag data (by a network peer) -#[derive(Clone, Serialize, Deserialize)] -pub struct ExternalGhostdagData { - pub blue_score: u64, - pub blue_work: BlueWorkType, - pub selected_parent: Hash, - pub mergeset_blues: Vec, - pub mergeset_reds: Vec, - pub blues_anticone_sizes: BlockHashMap, -} - -/// Represents externally provided Ghostdag data associated with a block Hash -pub struct TrustedGhostdagData { - pub hash: Hash, - pub ghostdag: ExternalGhostdagData, -} - -impl TrustedGhostdagData { - pub fn new(hash: Hash, ghostdag: ExternalGhostdagData) -> Self { - Self { hash, ghostdag } - } -} diff --git a/jacktest.log b/jacktest.log new file mode 100644 index 0000000000..40575fe9c3 --- /dev/null +++ b/jacktest.log @@ -0,0 +1,949 @@ + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 1 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 2 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 4 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 5 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 45 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 1 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 3 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 28 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 4 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 1 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 10 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 1 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 2 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 3 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 7 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 1 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 1 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 19 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 2 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 14 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 1 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 1 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 2 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 1 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 14 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 13 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 16 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 1 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 1 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 4 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 23 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 10 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 5 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 1 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 1 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 7 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 1 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 1 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 20 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 3 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 1 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 3 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 1 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 2 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 2 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 7 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 1 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 3 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 4 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 6 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 7 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 1 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 9 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 8 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 1 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 1 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 2 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 7 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 17 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 4 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 1 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 4 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 3 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 2 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 1 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 1 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 2 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 1 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 1 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 1 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 1 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 1 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 1 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 8 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 7 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 24 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 1 filtered out; finished in 0.02s + + +running 1 test + +jacktest: produce testing block: HashValue(0x5bb998d19da86c4f995f6ba69593e493a56c81509832b682138555a1dcf79425), number: 1 +jacktest: produce testing block: HashValue(0x803c4c1f18bf09af1ffff704c5bbff592bdc1d7a6581c9deb70e129b95ed7ccd), number: 2 +jacktest: tips is [HashValue(0x803c4c1f18bf09af1ffff704c5bbff592bdc1d7a6581c9deb70e129b95ed7ccd)] +jacktest: produce testing block: HashValue(0xea2ac6b9ac167064dffc15c476190b754a6ac9f67017425f39a2b830e3502a15), number: 3 +jacktest: connect dag, HashValue(0xea2ac6b9ac167064dffc15c476190b754a6ac9f67017425f39a2b830e3502a15), number: 3 +jacktest: tips is [HashValue(0xea2ac6b9ac167064dffc15c476190b754a6ac9f67017425f39a2b830e3502a15)] +jacktest: produce testing block: HashValue(0x0796fc939f9aec19cb161a5cee4f6344ab38ac9c473947147492480d91808c0b), number: 4 +jacktest: connect dag, HashValue(0x0796fc939f9aec19cb161a5cee4f6344ab38ac9c473947147492480d91808c0b), number: 4 +jacktest: tips is [HashValue(0x0796fc939f9aec19cb161a5cee4f6344ab38ac9c473947147492480d91808c0b)] +jacktest: produce testing block: HashValue(0xefbb754bb6748e7b498ac941912446a7e5723ed442cb821f5b61bfebd42060ff), number: 5 +jacktest: connect dag, HashValue(0xefbb754bb6748e7b498ac941912446a7e5723ed442cb821f5b61bfebd42060ff), number: 5 +jacktest: tips is [HashValue(0xefbb754bb6748e7b498ac941912446a7e5723ed442cb821f5b61bfebd42060ff)] +jacktest: produce testing block: HashValue(0x8e9d315585dcc71af4f157cae81346a90a25fcce389240cc4c8f273e229f8d06), number: 6 +jacktest: connect dag, HashValue(0x8e9d315585dcc71af4f157cae81346a90a25fcce389240cc4c8f273e229f8d06), number: 6 +jacktest: tips is [HashValue(0x8e9d315585dcc71af4f157cae81346a90a25fcce389240cc4c8f273e229f8d06)] +jacktest: produce testing block: HashValue(0x5edb5c6ba90c547a202ce42835b38241d0d26c83606cdc1076e7b0caad7d4b39), number: 7 +jacktest: connect dag, HashValue(0x5edb5c6ba90c547a202ce42835b38241d0d26c83606cdc1076e7b0caad7d4b39), number: 7 +jacktest: tips is [HashValue(0x5edb5c6ba90c547a202ce42835b38241d0d26c83606cdc1076e7b0caad7d4b39)] +jacktest: produce testing block: HashValue(0xa293220e189ce78d4baa078dfe4baa1fb780e7b1dad8a585279af4913b307292), number: 8 +jacktest: connect dag, HashValue(0xa293220e189ce78d4baa078dfe4baa1fb780e7b1dad8a585279af4913b307292), number: 8 +jacktest: tips is [HashValue(0xa293220e189ce78d4baa078dfe4baa1fb780e7b1dad8a585279af4913b307292)] +jacktest: produce testing block: HashValue(0xdafa4b17ffa51161f5ff7fb4c0197f676d279f2f925de7ffee9c0aab5bf2903e), number: 9 +jacktest: connect dag, HashValue(0xdafa4b17ffa51161f5ff7fb4c0197f676d279f2f925de7ffee9c0aab5bf2903e), number: 9 +jacktest: tips is [HashValue(0xdafa4b17ffa51161f5ff7fb4c0197f676d279f2f925de7ffee9c0aab5bf2903e)] +jacktest: produce testing block: HashValue(0x7dc84137c2d556b2a204bb3a57238e1490fa63e97e693d65ea00f50fa73985bb), number: 10 +jacktest: connect dag, HashValue(0x7dc84137c2d556b2a204bb3a57238e1490fa63e97e693d65ea00f50fa73985bb), number: 10 +jacktest: now sync dag block -- ensure_dag_parent_blocks_exist +jacktest: block is not a dag block, skipping, its id: HashValue(0x5bb998d19da86c4f995f6ba69593e493a56c81509832b682138555a1dcf79425), its number 1 +jacktest: now sync dag block -- ensure_dag_parent_blocks_exist2 +jacktest: now sync dag block -- ensure_dag_parent_blocks_exist +jacktest: block is not a dag block, skipping, its id: HashValue(0x803c4c1f18bf09af1ffff704c5bbff592bdc1d7a6581c9deb70e129b95ed7ccd), its number 2 +jacktest: now sync dag block -- ensure_dag_parent_blocks_exist2 +jacktest: now sync dag block -- ensure_dag_parent_blocks_exist +jacktest: block is a dag block, its id: HashValue(0xea2ac6b9ac167064dffc15c476190b754a6ac9f67017425f39a2b830e3502a15), its parents: Some([HashValue(0x803c4c1f18bf09af1ffff704c5bbff592bdc1d7a6581c9deb70e129b95ed7ccd)]) +jacktest: connect block: HashValue(0x803c4c1f18bf09af1ffff704c5bbff592bdc1d7a6581c9deb70e129b95ed7ccd), number: 2 +jacktest: now apply for sync after fetching: HashValue(0xea2ac6b9ac167064dffc15c476190b754a6ac9f67017425f39a2b830e3502a15), number: 3 +jacktest: connect dag, HashValue(0xea2ac6b9ac167064dffc15c476190b754a6ac9f67017425f39a2b830e3502a15), number: 3 +jacktest: now apply for sync after fetching: HashValue(0x0796fc939f9aec19cb161a5cee4f6344ab38ac9c473947147492480d91808c0b), number: 4 +jacktest: connect dag, HashValue(0x0796fc939f9aec19cb161a5cee4f6344ab38ac9c473947147492480d91808c0b), number: 4 +jacktest: now apply for sync after fetching: HashValue(0xefbb754bb6748e7b498ac941912446a7e5723ed442cb821f5b61bfebd42060ff), number: 5 +jacktest: connect dag, HashValue(0xefbb754bb6748e7b498ac941912446a7e5723ed442cb821f5b61bfebd42060ff), number: 5 +jacktest: now apply for sync after fetching: HashValue(0x8e9d315585dcc71af4f157cae81346a90a25fcce389240cc4c8f273e229f8d06), number: 6 +jacktest: connect dag, HashValue(0x8e9d315585dcc71af4f157cae81346a90a25fcce389240cc4c8f273e229f8d06), number: 6 +jacktest: now apply for sync after fetching: HashValue(0x5edb5c6ba90c547a202ce42835b38241d0d26c83606cdc1076e7b0caad7d4b39), number: 7 +jacktest: connect dag, HashValue(0x5edb5c6ba90c547a202ce42835b38241d0d26c83606cdc1076e7b0caad7d4b39), number: 7 +jacktest: now apply for sync after fetching: HashValue(0xa293220e189ce78d4baa078dfe4baa1fb780e7b1dad8a585279af4913b307292), number: 8 +jacktest: connect dag, HashValue(0xa293220e189ce78d4baa078dfe4baa1fb780e7b1dad8a585279af4913b307292), number: 8 +jacktest: now apply for sync after fetching: HashValue(0xdafa4b17ffa51161f5ff7fb4c0197f676d279f2f925de7ffee9c0aab5bf2903e), number: 9 +jacktest: connect dag, HashValue(0xdafa4b17ffa51161f5ff7fb4c0197f676d279f2f925de7ffee9c0aab5bf2903e), number: 9 +jacktest: now apply for sync after fetching: HashValue(0x7dc84137c2d556b2a204bb3a57238e1490fa63e97e693d65ea00f50fa73985bb), number: 10 +jacktest: connect dag, HashValue(0x7dc84137c2d556b2a204bb3a57238e1490fa63e97e693d65ea00f50fa73985bb), number: 10 +test tasks::tests::test_full_sync_fork has been running for over 60 seconds +jacktest: tips is [HashValue(0x7dc84137c2d556b2a204bb3a57238e1490fa63e97e693d65ea00f50fa73985bb)] +jacktest: produce testing block: HashValue(0x21f6056cba2be45a9b1460965f56b3c27daa2fb169e735bde9935130b8d439ec), number: 11 +jacktest: connect dag, HashValue(0x21f6056cba2be45a9b1460965f56b3c27daa2fb169e735bde9935130b8d439ec), number: 11 +jacktest: tips is [HashValue(0x21f6056cba2be45a9b1460965f56b3c27daa2fb169e735bde9935130b8d439ec)] +jacktest: produce testing block: HashValue(0x12882c913970aed16ece01f13c11233f9f7b0ac7c7ae4f7505079dc7b9d9d234), number: 12 +jacktest: connect dag, HashValue(0x12882c913970aed16ece01f13c11233f9f7b0ac7c7ae4f7505079dc7b9d9d234), number: 12 +jacktest: tips is [HashValue(0x12882c913970aed16ece01f13c11233f9f7b0ac7c7ae4f7505079dc7b9d9d234)] +jacktest: produce testing block: HashValue(0x56d2f1420b4ec84354e8278111e10c04bf4283b0006ad0df911f26295fbbc30f), number: 13 +jacktest: connect dag, HashValue(0x56d2f1420b4ec84354e8278111e10c04bf4283b0006ad0df911f26295fbbc30f), number: 13 +jacktest: tips is [HashValue(0x56d2f1420b4ec84354e8278111e10c04bf4283b0006ad0df911f26295fbbc30f)] +jacktest: produce testing block: HashValue(0x0b219c4673c20597c617dc05cc7323e813db1ff30aa3b9848900b11656a7f3e7), number: 14 +jacktest: connect dag, HashValue(0x0b219c4673c20597c617dc05cc7323e813db1ff30aa3b9848900b11656a7f3e7), number: 14 +jacktest: tips is [HashValue(0x0b219c4673c20597c617dc05cc7323e813db1ff30aa3b9848900b11656a7f3e7)] +jacktest: produce testing block: HashValue(0xe33da31b7deae7aa9468393385e76186aad506ad69dd41838376c06e72ae2316), number: 15 +jacktest: connect dag, HashValue(0xe33da31b7deae7aa9468393385e76186aad506ad69dd41838376c06e72ae2316), number: 15 +jacktest: tips is [HashValue(0xe33da31b7deae7aa9468393385e76186aad506ad69dd41838376c06e72ae2316)] +jacktest: produce testing block: HashValue(0x0e550d7e32dce2678289eb5f2d9b98c7a1a29e5e552c7c0c74afb116dc7d45a3), number: 16 +jacktest: connect dag, HashValue(0x0e550d7e32dce2678289eb5f2d9b98c7a1a29e5e552c7c0c74afb116dc7d45a3), number: 16 +jacktest: tips is [HashValue(0x0e550d7e32dce2678289eb5f2d9b98c7a1a29e5e552c7c0c74afb116dc7d45a3)] +jacktest: produce testing block: HashValue(0x86b03d605be278a69f619dbec04421aea643c3e3c994dc114b5eebbb9cb462dd), number: 17 +jacktest: connect dag, HashValue(0x86b03d605be278a69f619dbec04421aea643c3e3c994dc114b5eebbb9cb462dd), number: 17 +jacktest: tips is [HashValue(0x86b03d605be278a69f619dbec04421aea643c3e3c994dc114b5eebbb9cb462dd)] +jacktest: produce testing block: HashValue(0x48b390e9bc3b8f0a4de199e6b87374eb175a382cd8115f9f97c654e5b9f5154f), number: 18 +jacktest: connect dag, HashValue(0x48b390e9bc3b8f0a4de199e6b87374eb175a382cd8115f9f97c654e5b9f5154f), number: 18 +jacktest: tips is [HashValue(0x48b390e9bc3b8f0a4de199e6b87374eb175a382cd8115f9f97c654e5b9f5154f)] +jacktest: produce testing block: HashValue(0x41928679306e1cac22cf85f4c7094b1a1ba9d675e01cdec63a8d04d0c8ea4b90), number: 19 +jacktest: connect dag, HashValue(0x41928679306e1cac22cf85f4c7094b1a1ba9d675e01cdec63a8d04d0c8ea4b90), number: 19 +jacktest: tips is [HashValue(0x41928679306e1cac22cf85f4c7094b1a1ba9d675e01cdec63a8d04d0c8ea4b90)] +jacktest: produce testing block: HashValue(0x7c824ae15dc83c4f5fc11a4699f1a7cc290055aeeb55bab42c2e71e35b22d640), number: 20 +jacktest: connect dag, HashValue(0x7c824ae15dc83c4f5fc11a4699f1a7cc290055aeeb55bab42c2e71e35b22d640), number: 20 +jacktest: tips is [HashValue(0x7dc84137c2d556b2a204bb3a57238e1490fa63e97e693d65ea00f50fa73985bb)] +jacktest: produce testing block: HashValue(0xa158c4ad05e9f4cb459e9b00d6f0b814b13e611b93e24a13f4a60d08dee9843d), number: 11 +jacktest: connect dag, HashValue(0xa158c4ad05e9f4cb459e9b00d6f0b814b13e611b93e24a13f4a60d08dee9843d), number: 11 +jacktest: tips is [HashValue(0xa158c4ad05e9f4cb459e9b00d6f0b814b13e611b93e24a13f4a60d08dee9843d)] +jacktest: produce testing block: HashValue(0xd62ca0badfa9881d94bd54ae055643ad95ff6fd47c9dee77f51c47c4214c434a), number: 12 +jacktest: connect dag, HashValue(0xd62ca0badfa9881d94bd54ae055643ad95ff6fd47c9dee77f51c47c4214c434a), number: 12 +jacktest: tips is [HashValue(0xd62ca0badfa9881d94bd54ae055643ad95ff6fd47c9dee77f51c47c4214c434a)] +jacktest: produce testing block: HashValue(0x9d226ed5459a618f1cf9e453662e0f7ee294b196e3e7766beea4a5bb79c7f7ed), number: 13 +jacktest: connect dag, HashValue(0x9d226ed5459a618f1cf9e453662e0f7ee294b196e3e7766beea4a5bb79c7f7ed), number: 13 +jacktest: tips is [HashValue(0x9d226ed5459a618f1cf9e453662e0f7ee294b196e3e7766beea4a5bb79c7f7ed)] +jacktest: produce testing block: HashValue(0x281004df1707d0c90f5daad39520b0f36982598212bca8c8d805d813b5a48cab), number: 14 +jacktest: connect dag, HashValue(0x281004df1707d0c90f5daad39520b0f36982598212bca8c8d805d813b5a48cab), number: 14 +jacktest: tips is [HashValue(0x281004df1707d0c90f5daad39520b0f36982598212bca8c8d805d813b5a48cab)] +jacktest: produce testing block: HashValue(0x7e3ed1bc7ca452c6437eaebbfb840def34cdd1507359b41a9d83a857d2260602), number: 15 +jacktest: connect dag, HashValue(0x7e3ed1bc7ca452c6437eaebbfb840def34cdd1507359b41a9d83a857d2260602), number: 15 +jacktest: now sync dag block -- ensure_dag_parent_blocks_exist +jacktest: block is a dag block, its id: HashValue(0x21f6056cba2be45a9b1460965f56b3c27daa2fb169e735bde9935130b8d439ec), its parents: Some([HashValue(0x7dc84137c2d556b2a204bb3a57238e1490fa63e97e693d65ea00f50fa73985bb)]) +jacktest: connect block: HashValue(0x7dc84137c2d556b2a204bb3a57238e1490fa63e97e693d65ea00f50fa73985bb), number: 10 +jacktest: connect dag, HashValue(0x7dc84137c2d556b2a204bb3a57238e1490fa63e97e693d65ea00f50fa73985bb), number: 10 +jacktest: now apply for sync after fetching: HashValue(0x21f6056cba2be45a9b1460965f56b3c27daa2fb169e735bde9935130b8d439ec), number: 11 +jacktest: connect dag, HashValue(0x21f6056cba2be45a9b1460965f56b3c27daa2fb169e735bde9935130b8d439ec), number: 11 +jacktest: now apply for sync after fetching: HashValue(0x12882c913970aed16ece01f13c11233f9f7b0ac7c7ae4f7505079dc7b9d9d234), number: 12 +jacktest: connect dag, HashValue(0x12882c913970aed16ece01f13c11233f9f7b0ac7c7ae4f7505079dc7b9d9d234), number: 12 +jacktest: now apply for sync after fetching: HashValue(0x56d2f1420b4ec84354e8278111e10c04bf4283b0006ad0df911f26295fbbc30f), number: 13 +jacktest: connect dag, HashValue(0x56d2f1420b4ec84354e8278111e10c04bf4283b0006ad0df911f26295fbbc30f), number: 13 +jacktest: now apply for sync after fetching: HashValue(0x0b219c4673c20597c617dc05cc7323e813db1ff30aa3b9848900b11656a7f3e7), number: 14 +jacktest: connect dag, HashValue(0x0b219c4673c20597c617dc05cc7323e813db1ff30aa3b9848900b11656a7f3e7), number: 14 +jacktest: now apply for sync after fetching: HashValue(0xe33da31b7deae7aa9468393385e76186aad506ad69dd41838376c06e72ae2316), number: 15 +jacktest: connect dag, HashValue(0xe33da31b7deae7aa9468393385e76186aad506ad69dd41838376c06e72ae2316), number: 15 +jacktest: now apply for sync after fetching: HashValue(0x0e550d7e32dce2678289eb5f2d9b98c7a1a29e5e552c7c0c74afb116dc7d45a3), number: 16 +jacktest: connect dag, HashValue(0x0e550d7e32dce2678289eb5f2d9b98c7a1a29e5e552c7c0c74afb116dc7d45a3), number: 16 +jacktest: now apply for sync after fetching: HashValue(0x86b03d605be278a69f619dbec04421aea643c3e3c994dc114b5eebbb9cb462dd), number: 17 +jacktest: connect dag, HashValue(0x86b03d605be278a69f619dbec04421aea643c3e3c994dc114b5eebbb9cb462dd), number: 17 +jacktest: now apply for sync after fetching: HashValue(0x48b390e9bc3b8f0a4de199e6b87374eb175a382cd8115f9f97c654e5b9f5154f), number: 18 +jacktest: connect dag, HashValue(0x48b390e9bc3b8f0a4de199e6b87374eb175a382cd8115f9f97c654e5b9f5154f), number: 18 +jacktest: now apply for sync after fetching: HashValue(0x41928679306e1cac22cf85f4c7094b1a1ba9d675e01cdec63a8d04d0c8ea4b90), number: 19 +jacktest: connect dag, HashValue(0x41928679306e1cac22cf85f4c7094b1a1ba9d675e01cdec63a8d04d0c8ea4b90), number: 19 +jacktest: now apply for sync after fetching: HashValue(0x7c824ae15dc83c4f5fc11a4699f1a7cc290055aeeb55bab42c2e71e35b22d640), number: 20 +jacktest: connect dag, HashValue(0x7c824ae15dc83c4f5fc11a4699f1a7cc290055aeeb55bab42c2e71e35b22d640), number: 20 +stest thread stopped +stest thread stopped +stest thread stopped +stest thread stopped +stest thread stopped +stest thread stopped +stest thread stopped +stest thread stopped +stest thread stopped +stest thread stopped +stest thread stopped +stest thread stopped +stest thread stopped +stest thread stopped +stest thread stopped +stest thread stopped +test tasks::tests::test_full_sync_fork ... ok + +test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured; 52 filtered out; finished in 152.63s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 3 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 1 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 12 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 9 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 1 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 5 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 1 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 1 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 27 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 2 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 1 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 8 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 10 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 1 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 4 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 4 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 1 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + diff --git a/jacktest2.log b/jacktest2.log new file mode 100644 index 0000000000..06ad7c6bc4 --- /dev/null +++ b/jacktest2.log @@ -0,0 +1,757 @@ + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 1 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 2 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 4 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 5 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 45 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 1 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 3 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 28 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 4 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 1 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 10 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 1 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 2 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 3 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 7 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 1 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 1 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 19 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 2 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 14 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 1 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 1 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 2 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 1 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 14 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 13 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 16 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 1 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 1 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 4 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 23 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 10 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 5 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 1 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 1 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 7 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 1 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 1 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 20 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 3 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 1 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 3 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 1 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 2 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 2 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 7 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 1 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 3 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 4 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 6 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 7 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 1 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 9 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 8 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 1 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 1 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 2 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 7 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 17 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 4 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 1 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 4 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 3 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 2 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 1 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 1 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 2 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 1 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 1 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 1 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 1 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 1 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 1 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 8 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 7 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 24 filtered out; finished in 0.00s + + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 1 filtered out; finished in 0.00s + + +running 1 test + +jacktest: produce testing block: HashValue(0x9e6f9d15fffd15ab1c8d976c56c9a84d726a84606f73197b0fb866b0b3b2c5bf), number: 1 +jacktest: produce testing block: HashValue(0xe37459694e5d9be1c66090606d98f1e6adfa08e175886b9831aa207f65340ee2), number: 2 +jacktest: tips is [HashValue(0xe37459694e5d9be1c66090606d98f1e6adfa08e175886b9831aa207f65340ee2)] +jacktest: produce testing block: HashValue(0xf26e4421af327fd0b345bff65003d7a58d8174ed7fac6f074405ba3eca1ba77d), number: 3 +jacktest: now go to execute dag block: id: HashValue(0xf26e4421af327fd0b345bff65003d7a58d8174ed7fac6f074405ba3eca1ba77d), number : 3 +jacktest: connect dag, HashValue(0xf26e4421af327fd0b345bff65003d7a58d8174ed7fac6f074405ba3eca1ba77d), number: 3 +jacktest: tips is [HashValue(0xf26e4421af327fd0b345bff65003d7a58d8174ed7fac6f074405ba3eca1ba77d)] +jacktest: produce testing block: HashValue(0x9d03d9652bbd8df277b5a3f6301ac4b6f579f8f91b466c8b310e8fcc6e036c51), number: 4 +jacktest: now go to execute dag block: id: HashValue(0x9d03d9652bbd8df277b5a3f6301ac4b6f579f8f91b466c8b310e8fcc6e036c51), number : 4 +jacktest: connect dag, HashValue(0x9d03d9652bbd8df277b5a3f6301ac4b6f579f8f91b466c8b310e8fcc6e036c51), number: 4 +jacktest: tips is [HashValue(0x9d03d9652bbd8df277b5a3f6301ac4b6f579f8f91b466c8b310e8fcc6e036c51)] +jacktest: produce testing block: HashValue(0x85054da06f86d9105297f545b8dc297504ca3d44e2a008fdcbd659025e478e13), number: 5 +jacktest: now go to execute dag block: id: HashValue(0x85054da06f86d9105297f545b8dc297504ca3d44e2a008fdcbd659025e478e13), number : 5 +jacktest: connect dag, HashValue(0x85054da06f86d9105297f545b8dc297504ca3d44e2a008fdcbd659025e478e13), number: 5 +jacktest: tips is [HashValue(0x85054da06f86d9105297f545b8dc297504ca3d44e2a008fdcbd659025e478e13)] +jacktest: produce testing block: HashValue(0xf2dc8edda109198853db90a328c228ff0b80970fad142e72c4093eb71e41b53b), number: 6 +jacktest: now go to execute dag block: id: HashValue(0xf2dc8edda109198853db90a328c228ff0b80970fad142e72c4093eb71e41b53b), number : 6 +jacktest: connect dag, HashValue(0xf2dc8edda109198853db90a328c228ff0b80970fad142e72c4093eb71e41b53b), number: 6 +jacktest: tips is [HashValue(0xf2dc8edda109198853db90a328c228ff0b80970fad142e72c4093eb71e41b53b)] +jacktest: produce testing block: HashValue(0x8056778ba78c74efa563e94ddc73ec692876044ec93090b98c6f8b4e76f9b47b), number: 7 +jacktest: now go to execute dag block: id: HashValue(0x8056778ba78c74efa563e94ddc73ec692876044ec93090b98c6f8b4e76f9b47b), number : 7 +jacktest: connect dag, HashValue(0x8056778ba78c74efa563e94ddc73ec692876044ec93090b98c6f8b4e76f9b47b), number: 7 +jacktest: tips is [HashValue(0x8056778ba78c74efa563e94ddc73ec692876044ec93090b98c6f8b4e76f9b47b)] +jacktest: produce testing block: HashValue(0xda82345a54b95de04e4b7d14069c1066e95bc120127a751b146a92b8685e44dd), number: 8 +jacktest: now go to execute dag block: id: HashValue(0xda82345a54b95de04e4b7d14069c1066e95bc120127a751b146a92b8685e44dd), number : 8 +jacktest: connect dag, HashValue(0xda82345a54b95de04e4b7d14069c1066e95bc120127a751b146a92b8685e44dd), number: 8 +jacktest: tips is [HashValue(0xda82345a54b95de04e4b7d14069c1066e95bc120127a751b146a92b8685e44dd)] +jacktest: produce testing block: HashValue(0x23d9a291e1f5932271cab856fbab1478c2fa21a6ded01c5599e0d20c2e9b0b83), number: 9 +jacktest: now go to execute dag block: id: HashValue(0x23d9a291e1f5932271cab856fbab1478c2fa21a6ded01c5599e0d20c2e9b0b83), number : 9 +jacktest: connect dag, HashValue(0x23d9a291e1f5932271cab856fbab1478c2fa21a6ded01c5599e0d20c2e9b0b83), number: 9 +jacktest: tips is [HashValue(0x23d9a291e1f5932271cab856fbab1478c2fa21a6ded01c5599e0d20c2e9b0b83)] +jacktest: produce testing block: HashValue(0xa6790dbe8e7a69ebb5768516ac3897ca1b497a45496f2c8a57d0e50b45e0b9e6), number: 10 +jacktest: now go to execute dag block: id: HashValue(0xa6790dbe8e7a69ebb5768516ac3897ca1b497a45496f2c8a57d0e50b45e0b9e6), number : 10 +jacktest: connect dag, HashValue(0xa6790dbe8e7a69ebb5768516ac3897ca1b497a45496f2c8a57d0e50b45e0b9e6), number: 10 +jacktest: node2 now create block +jacktest: produce testing block: HashValue(0xa4377951b453129fc9f9fcf19b8375cef8d0ea4f343efd1fea1540b33ba359d5), number: 1 +jacktest: produce testing block: HashValue(0x4f7d5b536cd5531d8d0088242e1efd2d1fbe8822dc47c0eb9ee49bd9c1e60513), number: 2 +jacktest: tips is [HashValue(0x4f7d5b536cd5531d8d0088242e1efd2d1fbe8822dc47c0eb9ee49bd9c1e60513)] +jacktest: produce testing block: HashValue(0xc3a015954e71bb0b6882888fb27b88134dbd4f6dce6d5b545331af0a6572e16d), number: 3 +jacktest: now go to execute dag block: id: HashValue(0xc3a015954e71bb0b6882888fb27b88134dbd4f6dce6d5b545331af0a6572e16d), number : 3 +jacktest: connect dag, HashValue(0xc3a015954e71bb0b6882888fb27b88134dbd4f6dce6d5b545331af0a6572e16d), number: 3 +jacktest: tips is [HashValue(0xc3a015954e71bb0b6882888fb27b88134dbd4f6dce6d5b545331af0a6572e16d)] +jacktest: produce testing block: HashValue(0x0b499922e9aaed45e4c9b585112abfdf40ded3ce857037e59d9928b752f12e63), number: 4 +jacktest: now go to execute dag block: id: HashValue(0x0b499922e9aaed45e4c9b585112abfdf40ded3ce857037e59d9928b752f12e63), number : 4 +jacktest: connect dag, HashValue(0x0b499922e9aaed45e4c9b585112abfdf40ded3ce857037e59d9928b752f12e63), number: 4 +jacktest: tips is [HashValue(0x0b499922e9aaed45e4c9b585112abfdf40ded3ce857037e59d9928b752f12e63)] +jacktest: produce testing block: HashValue(0xe3411a0f4ffa888e64ae6d0be760c1c9257bcd28a6f2aa0b8d8a2f3f7a8663e9), number: 5 +jacktest: now go to execute dag block: id: HashValue(0xe3411a0f4ffa888e64ae6d0be760c1c9257bcd28a6f2aa0b8d8a2f3f7a8663e9), number : 5 +jacktest: connect dag, HashValue(0xe3411a0f4ffa888e64ae6d0be760c1c9257bcd28a6f2aa0b8d8a2f3f7a8663e9), number: 5 +jacktest: tips is [HashValue(0xe3411a0f4ffa888e64ae6d0be760c1c9257bcd28a6f2aa0b8d8a2f3f7a8663e9)] +jacktest: produce testing block: HashValue(0x49eef2c87cfd1cd148751e473b90bde2d159b927b6e8f033f61ef368ce6c02a1), number: 6 +jacktest: now go to execute dag block: id: HashValue(0x49eef2c87cfd1cd148751e473b90bde2d159b927b6e8f033f61ef368ce6c02a1), number : 6 +jacktest: connect dag, HashValue(0x49eef2c87cfd1cd148751e473b90bde2d159b927b6e8f033f61ef368ce6c02a1), number: 6 +jacktest: tips is [HashValue(0x49eef2c87cfd1cd148751e473b90bde2d159b927b6e8f033f61ef368ce6c02a1)] +jacktest: produce testing block: HashValue(0x4b900e37e6ee71292c5f131c6c9691acc28c08dc257d46eebf3a74d16f395964), number: 7 +jacktest: now go to execute dag block: id: HashValue(0x4b900e37e6ee71292c5f131c6c9691acc28c08dc257d46eebf3a74d16f395964), number : 7 +jacktest: connect dag, HashValue(0x4b900e37e6ee71292c5f131c6c9691acc28c08dc257d46eebf3a74d16f395964), number: 7 +jacktest: now sync dag block -- ensure_dag_parent_blocks_exist +jacktest: block is not a dag block, skipping, its id: HashValue(0x9e6f9d15fffd15ab1c8d976c56c9a84d726a84606f73197b0fb866b0b3b2c5bf), its number 1 +jacktest: now sync dag block -- ensure_dag_parent_blocks_exist2 +jacktest: now sync dag block -- ensure_dag_parent_blocks_exist +jacktest: block is not a dag block, skipping, its id: HashValue(0xe37459694e5d9be1c66090606d98f1e6adfa08e175886b9831aa207f65340ee2), its number 2 +jacktest: now sync dag block -- ensure_dag_parent_blocks_exist2 +jacktest: now sync dag block -- ensure_dag_parent_blocks_exist +jacktest: block is a dag block, its id: HashValue(0xf26e4421af327fd0b345bff65003d7a58d8174ed7fac6f074405ba3eca1ba77d), its parents: Some([HashValue(0xe37459694e5d9be1c66090606d98f1e6adfa08e175886b9831aa207f65340ee2)]) +jacktest: apply block: HashValue(0xe37459694e5d9be1c66090606d98f1e6adfa08e175886b9831aa207f65340ee2), number: 2 +jacktest: now sync dag block -- ensure_dag_parent_blocks_exist2 +jacktest: now go to execute dag block: id: HashValue(0xf26e4421af327fd0b345bff65003d7a58d8174ed7fac6f074405ba3eca1ba77d), number : 3 +jacktest: connect dag, HashValue(0xf26e4421af327fd0b345bff65003d7a58d8174ed7fac6f074405ba3eca1ba77d), number: 3 +jacktest: now sync dag block -- ensure_dag_parent_blocks_exist +jacktest: block is a dag block, its id: HashValue(0x9d03d9652bbd8df277b5a3f6301ac4b6f579f8f91b466c8b310e8fcc6e036c51), its parents: Some([HashValue(0xf26e4421af327fd0b345bff65003d7a58d8174ed7fac6f074405ba3eca1ba77d)]) +jacktest: apply block: HashValue(0xf26e4421af327fd0b345bff65003d7a58d8174ed7fac6f074405ba3eca1ba77d), number: 3 +jacktest: connect dag, HashValue(0xf26e4421af327fd0b345bff65003d7a58d8174ed7fac6f074405ba3eca1ba77d), number: 3 +jacktest: apply block: HashValue(0xe37459694e5d9be1c66090606d98f1e6adfa08e175886b9831aa207f65340ee2), number: 2 +stest thread stopped +stest thread stopped +stest thread stopped +stest thread stopped +stest thread stopped +stest thread stopped +stest thread stopped +stest thread stopped +stest thread stopped +stest thread stopped +stest thread stopped +stest thread stopped +stest thread stopped +stest thread stopped +stest thread stopped +stest thread stopped +test tasks::tests::test_full_sync_continue ... FAILED + +failures: + +failures: + tasks::tests::test_full_sync_continue + +test result: FAILED. 0 passed; 1 failed; 0 ignored; 0 measured; 52 filtered out; finished in 23.75s + diff --git a/miner/src/create_block_template/mod.rs b/miner/src/create_block_template/mod.rs index 1e84bc28b1..990c0b2516 100644 --- a/miner/src/create_block_template/mod.rs +++ b/miner/src/create_block_template/mod.rs @@ -115,7 +115,7 @@ impl ActorService for BlockBuilderService { impl EventHandler for BlockBuilderService { fn handle_event(&mut self, msg: NewHeadBlock, _ctx: &mut ServiceContext) { - if let Err(e) = self.inner.update_chain(msg.0.as_ref().clone()) { + if let Err(e) = self.inner.update_chain(msg.executed_block.as_ref().clone()) { error!("err : {:?}", e) } } @@ -306,6 +306,18 @@ where } } + pub fn is_dag_genesis(&self, id: HashValue) -> Result { + if let Some(header) = self.storage.get_block_header_by_hash(id)? { + if header.number() == BlockDAG::dag_fork_height_with_net(self.chain.status().head().chain_id()) { + Ok(true) + } else { + Ok(false) + } + } else { + Ok(false) + } + } + pub fn create_block_template(&self) -> Result { let on_chain_block_gas_limit = self.chain.epoch().block_gas_limit(); let block_gas_limit = self diff --git a/network-rpc/api/src/lib.rs b/network-rpc/api/src/lib.rs index b0631790f3..6566b2a038 100644 --- a/network-rpc/api/src/lib.rs +++ b/network-rpc/api/src/lib.rs @@ -299,6 +299,8 @@ pub trait NetworkRpc: Sized + Send + Sync + 'static { peer_id: PeerId, request: GetTableInfo, ) -> BoxFuture>>; + + fn get_dag_block_children(&self, peer_id: PeerId, request: Vec) -> BoxFuture>>; } #[derive(Debug, Serialize, Deserialize, Clone)] diff --git a/network-rpc/src/rpc.rs b/network-rpc/src/rpc.rs index d445336f0f..3ad304b4cd 100644 --- a/network-rpc/src/rpc.rs +++ b/network-rpc/src/rpc.rs @@ -340,4 +340,13 @@ impl gen_server::NetworkRpc for NetworkRpcImpl { }; Box::pin(fut) } + + fn get_dag_block_children(&self, _peer_id:PeerId, request:Vec) -> BoxFuture > > { + let chain_service = self.chain_service.clone(); + let fut = async move { + chain_service.get_dag_block_children(request).await + }; + Box::pin(fut) + } + } diff --git a/network/tests/network_node_test.rs b/network/tests/network_node_test.rs index e17b9e94ae..c70ef5af26 100644 --- a/network/tests/network_node_test.rs +++ b/network/tests/network_node_test.rs @@ -35,7 +35,7 @@ fn test_reconnected_peers() -> anyhow::Result<()> { // stop node2, node1's peers is empty node2.stop()?; - thread::sleep(Duration::from_secs(3)); + thread::sleep(Duration::from_secs(12)); loop { let network_state = block_on(async { node1_network.network_state().await })?; debug!("network_state: {:?}", network_state); diff --git a/node/src/lib.rs b/node/src/lib.rs index 3c52be3b13..e9e44915be 100644 --- a/node/src/lib.rs +++ b/node/src/lib.rs @@ -190,7 +190,7 @@ impl NodeHandle { { //wait for new block event to been processed. Delay::new(Duration::from_millis(100)).await; - event.0.block().clone() + event.executed_block.block().clone() } else { let latest_head = chain_service.main_head_block().await?; debug!( diff --git a/node/src/node.rs b/node/src/node.rs index f237ba9277..5f8b482aa7 100644 --- a/node/src/node.rs +++ b/node/src/node.rs @@ -51,7 +51,8 @@ use starcoin_sync::block_connector::{BlockConnectorService, ExecuteRequest, Rese use starcoin_sync::sync::SyncService; use starcoin_sync::txn_sync::TxnSyncService; use starcoin_sync::verified_rpc_client::VerifiedRpcClient; -use starcoin_txpool::TxPoolActorService; +use starcoin_txpool::{TxPoolActorService, TxPoolService}; +use starcoin_txpool_api::TxPoolSyncService; use starcoin_types::system_events::{SystemShutdown, SystemStarted}; use starcoin_vm_runtime::metrics::VMMetrics; use std::sync::Arc; @@ -133,7 +134,7 @@ impl ServiceHandler for NodeService { .start_service_sync(GenerateBlockEventPacemaker::service_name()), ), NodeRequest::ResetNode(block_hash) => { - let connect_service = ctx.service_ref::()?.clone(); + let connect_service = ctx.service_ref::>()?.clone(); let fut = async move { info!("Prepare to reset node startup info to {}", block_hash); connect_service.send(ResetRequest { block_hash }).await? @@ -147,7 +148,7 @@ impl ServiceHandler for NodeService { .get_shared_sync::>() .expect("Storage must exist."); - let connect_service = ctx.service_ref::()?.clone(); + let connect_service = ctx.service_ref::>()?.clone(); let network = ctx.get_shared::()?; let fut = async move { info!("Prepare to re execute block {}", block_hash); @@ -352,7 +353,7 @@ impl NodeService { registry.register::().await?; - registry.register::().await?; + registry.register::>().await?; registry.register::().await?; let block_relayer = registry.register::().await?; diff --git a/rpc/server/src/module/pubsub/tests.rs b/rpc/server/src/module/pubsub/tests.rs index bcaef73594..a1cfa655d4 100644 --- a/rpc/server/src/module/pubsub/tests.rs +++ b/rpc/server/src/module/pubsub/tests.rs @@ -111,7 +111,9 @@ pub async fn test_subscribe_to_events() -> Result<()> { // send block let block_detail = Arc::new(executed_block); - bus.broadcast(NewHeadBlock(block_detail))?; + bus.broadcast(NewHeadBlock { + executed_block: block_detail.clone(), + })?; let mut receiver = receiver; diff --git a/state/service/src/service.rs b/state/service/src/service.rs index c27431fbe3..57432f9e8e 100644 --- a/state/service/src/service.rs +++ b/state/service/src/service.rs @@ -131,9 +131,7 @@ impl ServiceHandler for ChainStateService { impl EventHandler for ChainStateService { fn handle_event(&mut self, msg: NewHeadBlock, _ctx: &mut ServiceContext) { - let NewHeadBlock(block) = msg; - - let state_root = block.header().state_root(); + let state_root = msg.executed_block.header().state_root(); debug!("ChainStateActor change StateRoot to : {:?}", state_root); self.service.change_root(state_root); } diff --git a/sync/Cargo.toml b/sync/Cargo.toml index 2f3fb662aa..cb402751ce 100644 --- a/sync/Cargo.toml +++ b/sync/Cargo.toml @@ -42,7 +42,11 @@ stest = { workspace = true } stream-task = { workspace = true } sysinfo = { workspace = true } thiserror = { workspace = true } -starcoin-dag ={workspace = true} +starcoin-consensus = { workspace = true } +timeout-join-handler = { workspace = true } +starcoin-flexidag = { workspace = true } +starcoin-dag = { workspace = true } + [dev-dependencies] hex = { workspace = true } starcoin-miner = { workspace = true } @@ -57,6 +61,7 @@ starcoin-txpool-mock-service = { workspace = true } starcoin-executor = { workspace = true } test-helper = { workspace = true } tokio = { features = ["full"], workspace = true } +starcoin-genesis = { workspace = true } [package] authors = { workspace = true } diff --git a/sync/src/block_connector/block_connector_service.rs b/sync/src/block_connector/block_connector_service.rs index 8abcddb732..27667773bf 100644 --- a/sync/src/block_connector/block_connector_service.rs +++ b/sync/src/block_connector/block_connector_service.rs @@ -1,13 +1,18 @@ // Copyright (c) The Starcoin Core Contributors // SPDX-License-Identifier: Apache-2.0 +#[cfg(test)] +use super::CheckBlockConnectorHashValue; use crate::block_connector::{ExecuteRequest, ResetRequest, WriteBlockChainService}; use crate::sync::{CheckSyncEvent, SyncService}; -use crate::tasks::{BlockConnectedEvent, BlockDiskCheckEvent}; -use anyhow::{format_err, Result}; +use crate::tasks::{BlockConnectedEvent, BlockConnectedFinishEvent, BlockDiskCheckEvent}; +#[cfg(test)] +use anyhow::bail; +use anyhow::{format_err, Ok, Result}; use network_api::PeerProvider; -use starcoin_chain_api::{ConnectBlockError, WriteableChainService}; +use starcoin_chain_api::{ChainReader, ConnectBlockError, WriteableChainService}; use starcoin_config::{NodeConfig, G_CRATE_VERSION}; +use starcoin_crypto::HashValue; use starcoin_dag::blockdag::BlockDAG; use starcoin_executor::VMMetrics; use starcoin_logger::prelude::*; @@ -18,6 +23,9 @@ use starcoin_service_registry::{ use starcoin_storage::{BlockStore, Storage}; use starcoin_sync_api::PeerNewBlock; use starcoin_txpool::TxPoolService; +use starcoin_txpool_api::TxPoolSyncService; +#[cfg(test)] +use starcoin_txpool_mock_service::MockTxPoolService; use starcoin_types::block::ExecutedBlock; use starcoin_types::sync_status::SyncStatus; use starcoin_types::system_events::{MinedBlock, SyncStatusChangeEvent, SystemShutdown}; @@ -27,15 +35,21 @@ use sysinfo::{DiskExt, System, SystemExt}; const DISK_CHECKPOINT_FOR_PANIC: u64 = 1024 * 1024 * 1024 * 3; const DISK_CHECKPOINT_FOR_WARN: u64 = 1024 * 1024 * 1024 * 5; -pub struct BlockConnectorService { - chain_service: WriteBlockChainService, +pub struct BlockConnectorService +where + TransactionPoolServiceT: TxPoolSyncService + 'static, +{ + chain_service: WriteBlockChainService, sync_status: Option, config: Arc, } -impl BlockConnectorService { +impl BlockConnectorService +where + TransactionPoolServiceT: TxPoolSyncService + 'static, +{ pub fn new( - chain_service: WriteBlockChainService, + chain_service: WriteBlockChainService, config: Arc, ) -> Self { Self { @@ -52,6 +66,10 @@ impl BlockConnectorService { } } + pub fn chain_head_id(&self) -> HashValue { + self.chain_service.get_main().status().head.id() + } + pub fn check_disk_space(&mut self) -> Option> { if System::IS_SUPPORTED { let mut sys = System::new_all(); @@ -98,11 +116,17 @@ impl BlockConnectorService { } } -impl ServiceFactory for BlockConnectorService { - fn create(ctx: &mut ServiceContext) -> Result { +impl ServiceFactory + for BlockConnectorService +where + TransactionPoolServiceT: TxPoolSyncService + 'static, +{ + fn create( + ctx: &mut ServiceContext>, + ) -> Result> { let config = ctx.get_shared::>()?; let bus = ctx.bus_ref().clone(); - let txpool = ctx.get_shared::()?; + let txpool = ctx.get_shared::()?; let storage = ctx.get_shared::>()?; let startup_info = storage .get_startup_info()? @@ -123,7 +147,10 @@ impl ServiceFactory for BlockConnectorService { } } -impl ActorService for BlockConnectorService { +impl ActorService for BlockConnectorService +where + TransactionPoolServiceT: TxPoolSyncService + 'static, +{ fn started(&mut self, ctx: &mut ServiceContext) -> Result<()> { //TODO figure out a more suitable value. ctx.set_mailbox_capacity(1024); @@ -144,15 +171,19 @@ impl ActorService for BlockConnectorService { } } -impl EventHandler for BlockConnectorService { +impl EventHandler + for BlockConnectorService +where + TransactionPoolServiceT: TxPoolSyncService + 'static, +{ fn handle_event( &mut self, _: BlockDiskCheckEvent, - ctx: &mut ServiceContext, + ctx: &mut ServiceContext>, ) { if let Some(res) = self.check_disk_space() { match res { - Ok(available_space) => { + std::result::Result::Ok(available_space) => { warn!("Available diskspace only {}/GB left ", available_space) } Err(e) => { @@ -164,30 +195,80 @@ impl EventHandler for BlockConnectorService { } } -impl EventHandler for BlockConnectorService { +impl EventHandler for BlockConnectorService { fn handle_event( &mut self, msg: BlockConnectedEvent, - _ctx: &mut ServiceContext, + ctx: &mut ServiceContext>, ) { //because this block has execute at sync task, so just try connect to select head chain. //TODO refactor connect and execute let block = msg.block; - if let Err(e) = self.chain_service.try_connect(block) { - error!("Process connected block error: {:?}", e); + let feedback = msg.feedback; + + match msg.action { + crate::tasks::BlockConnectAction::ConnectNewBlock => { + if let Err(e) = self.chain_service.try_connect(block) { + error!("Process connected new block from sync error: {:?}", e); + } + } + crate::tasks::BlockConnectAction::ConnectExecutedBlock => { + if let Err(e) = self.chain_service.switch_new_main(block.header().id(), ctx) { + error!("Process connected executed block from sync error: {:?}", e); + } + } } + + feedback.map(|f| f.unbounded_send(BlockConnectedFinishEvent)); } } -impl EventHandler for BlockConnectorService { - fn handle_event(&mut self, msg: MinedBlock, _ctx: &mut ServiceContext) { - let MinedBlock(new_block) = msg; +#[cfg(test)] +impl EventHandler for BlockConnectorService { + fn handle_event( + &mut self, + msg: BlockConnectedEvent, + ctx: &mut ServiceContext>, + ) { + //because this block has execute at sync task, so just try connect to select head chain. + //TODO refactor connect and execute + + let block = msg.block; + let feedback = msg.feedback; + + match msg.action { + crate::tasks::BlockConnectAction::ConnectNewBlock => { + if let Err(e) = self.chain_service.apply_failed(block) { + error!("Process connected new block from sync error: {:?}", e); + } + } + crate::tasks::BlockConnectAction::ConnectExecutedBlock => { + if let Err(e) = self.chain_service.switch_new_main(block.header().id(), ctx) { + error!("Process connected executed block from sync error: {:?}", e); + } + } + } + + feedback.map(|f| f.unbounded_send(BlockConnectedFinishEvent)); + } +} + +impl EventHandler + for BlockConnectorService +where + TransactionPoolServiceT: TxPoolSyncService + 'static, +{ + fn handle_event(&mut self, msg: MinedBlock, ctx: &mut ServiceContext) { + let MinedBlock(new_block) = msg.clone(); + let block_header = new_block.header().clone(); let id = new_block.header().id(); debug!("try connect mined block: {}", id); match self.chain_service.try_connect(new_block.as_ref().clone()) { - Ok(_) => debug!("Process mined block {} success.", id), + std::result::Result::Ok(()) => { + ctx.broadcast(msg) + } Err(e) => { warn!("Process mined block {} fail, error: {:?}", id, e); } @@ -195,13 +276,21 @@ impl EventHandler for BlockConnectorService { } } -impl EventHandler for BlockConnectorService { +impl EventHandler + for BlockConnectorService +where + TransactionPoolServiceT: TxPoolSyncService + 'static, +{ fn handle_event(&mut self, msg: SyncStatusChangeEvent, _ctx: &mut ServiceContext) { self.sync_status = Some(msg.0); } } -impl EventHandler for BlockConnectorService { +impl EventHandler + for BlockConnectorService +where + TransactionPoolServiceT: TxPoolSyncService + 'static, +{ fn handle_event(&mut self, msg: PeerNewBlock, ctx: &mut ServiceContext) { if !self.is_synced() { debug!("[connector] Ignore PeerNewBlock event because the node has not been synchronized yet."); @@ -210,11 +299,13 @@ impl EventHandler for BlockConnectorService { let peer_id = msg.get_peer_id(); if let Err(e) = self.chain_service.try_connect(msg.get_block().clone()) { match e.downcast::() { - Ok(connect_error) => { + std::result::Result::Ok(connect_error) => { match connect_error { ConnectBlockError::FutureBlock(block) => { //TODO cache future block - if let Ok(sync_service) = ctx.service_ref::() { + if let std::result::Result::Ok(sync_service) = + ctx.service_ref::() + { info!( "BlockConnector try connect future block ({:?},{}), peer_id:{:?}, notify Sync service check sync.", block.id(), @@ -260,22 +351,51 @@ impl EventHandler for BlockConnectorService { } } -impl ServiceHandler for BlockConnectorService { +impl ServiceHandler + for BlockConnectorService +where + TransactionPoolServiceT: TxPoolSyncService + 'static, +{ fn handle( &mut self, msg: ResetRequest, - _ctx: &mut ServiceContext, + _ctx: &mut ServiceContext>, ) -> Result<()> { self.chain_service.reset(msg.block_hash) } } -impl ServiceHandler for BlockConnectorService { +impl ServiceHandler + for BlockConnectorService +where + TransactionPoolServiceT: TxPoolSyncService + 'static, +{ fn handle( &mut self, msg: ExecuteRequest, - _ctx: &mut ServiceContext, + _ctx: &mut ServiceContext>, ) -> Result { self.chain_service.execute(msg.block) } } + +#[cfg(test)] +impl ServiceHandler + for BlockConnectorService +where + TransactionPoolServiceT: TxPoolSyncService + 'static, +{ + fn handle( + &mut self, + msg: CheckBlockConnectorHashValue, + _ctx: &mut ServiceContext>, + ) -> Result<()> { + if self.chain_service.get_main().status().head().id() == msg.head_hash { + info!("the branch in chain service is the same as target's branch"); + Ok(()) + } else { + info!("mock branch in chain service is not the same as target's branch"); + bail!("blockchain in chain service is not the same as target!"); + } + } +} diff --git a/sync/src/block_connector/mod.rs b/sync/src/block_connector/mod.rs index 05b7cfd2b2..6d362dcf0d 100644 --- a/sync/src/block_connector/mod.rs +++ b/sync/src/block_connector/mod.rs @@ -11,6 +11,8 @@ mod metrics; mod test_illegal_block; #[cfg(test)] mod test_write_block_chain; +#[cfg(test)] +mod test_write_dag_block_chain; mod write_block_chain; pub use block_connector_service::BlockConnectorService; @@ -40,3 +42,15 @@ pub struct ExecuteRequest { impl ServiceRequest for ExecuteRequest { type Response = anyhow::Result; } + +#[cfg(test)] +#[derive(Debug, Clone)] +pub struct CheckBlockConnectorHashValue { + pub head_hash: HashValue, + pub number: u64, +} + +#[cfg(test)] +impl ServiceRequest for CheckBlockConnectorHashValue { + type Response = anyhow::Result<()>; +} diff --git a/sync/src/block_connector/test_illegal_block.rs b/sync/src/block_connector/test_illegal_block.rs index 2572ab0e39..11b572d2f0 100644 --- a/sync/src/block_connector/test_illegal_block.rs +++ b/sync/src/block_connector/test_illegal_block.rs @@ -1,7 +1,6 @@ // Copyright (c) The Starcoin Core Contributors // SPDX-License-Identifier: Apache-2.0 #![allow(clippy::integer_arithmetic)] - use crate::block_connector::{ create_writeable_block_chain, gen_blocks, new_block, WriteBlockChainService, }; diff --git a/sync/src/block_connector/test_write_dag_block_chain.rs b/sync/src/block_connector/test_write_dag_block_chain.rs new file mode 100644 index 0000000000..9d1c483946 --- /dev/null +++ b/sync/src/block_connector/test_write_dag_block_chain.rs @@ -0,0 +1,214 @@ +// Copyright (c) The Starcoin Core Contributors +// SPDX-License-Identifier: Apache-2.0 +#![allow(clippy::integer_arithmetic)] +use crate::block_connector::test_write_block_chain::create_writeable_block_chain; +use crate::block_connector::WriteBlockChainService; +use async_std::path::Path; +use starcoin_account_api::AccountInfo; +use starcoin_chain::{BlockChain, ChainReader}; +use starcoin_chain_service::WriteableChainService; +use starcoin_config::NodeConfig; +use starcoin_consensus::Consensus; +use starcoin_crypto::HashValue; +use starcoin_dag::consensusdb::prelude::FlexiDagStorageConfig; +use starcoin_time_service::TimeService; +use starcoin_txpool_mock_service::MockTxPoolService; +use starcoin_types::block::Block; +use std::sync::Arc; + +pub fn gen_dag_blocks( + times: u64, + writeable_block_chain_service: &mut WriteBlockChainService, + time_service: &dyn TimeService, +) -> Option { + let miner_account = AccountInfo::random(); + let mut last_block_hash = None; + if times > 0 { + for i in 0..times { + let block = new_dag_block( + Some(&miner_account), + writeable_block_chain_service, + time_service, + ); + last_block_hash = Some(block.id()); + let e = writeable_block_chain_service.try_connect(block); + println!("try_connect result: {:?}", e); + assert!(e.is_ok()); + if (i + 1) % 3 == 0 { + writeable_block_chain_service.time_sleep(5); + } + } + last_block_hash + } else { + None + } + + // match result { + // super::write_block_chain::ConnectOk::Duplicate(block) + // | super::write_block_chain::ConnectOk::ExeConnectMain(block) + // | super::write_block_chain::ConnectOk::ExeConnectBranch(block) + // | super::write_block_chain::ConnectOk::Connect(block) => Some(block.header().id()), + // super::write_block_chain::ConnectOk::DagConnected + // | super::write_block_chain::ConnectOk::MainDuplicate + // | super::write_block_chain::ConnectOk::DagPending + // | super::write_block_chain::ConnectOk::DagConnectMissingBlock => { + // unreachable!("should not reach here, result: {:?}", result); + // } + // } +} + +pub fn new_dag_block( + miner_account: Option<&AccountInfo>, + writeable_block_chain_service: &mut WriteBlockChainService, + time_service: &dyn TimeService, +) -> Block { + let miner = match miner_account { + Some(m) => m.clone(), + None => AccountInfo::random(), + }; + let miner_address = *miner.address(); + let block_chain = writeable_block_chain_service.get_main(); + let tips = block_chain.current_tips_hash().expect("failed to get tips").map(|tips| tips); + let (block_template, _) = block_chain + .create_block_template(miner_address, None, Vec::new(), vec![], None, tips) + .unwrap(); + block_chain + .consensus() + .create_block(block_template, time_service) + .unwrap() +} + +#[stest::test] +async fn test_dag_block_chain_apply() { + let times = 12; + let (mut writeable_block_chain_service, node_config, _) = create_writeable_block_chain().await; + let net = node_config.net(); + let last_header_id = gen_dag_blocks( + times, + &mut writeable_block_chain_service, + net.time_service().as_ref(), + ); + assert_eq!( + writeable_block_chain_service + .get_main() + .current_header() + .id(), + last_header_id.unwrap() + ); + println!("finish test_block_chain_apply"); +} + +fn gen_fork_dag_block_chain( + fork_number: u64, + node_config: Arc, + times: u64, + writeable_block_chain_service: &mut WriteBlockChainService, +) -> Option { + let miner_account = AccountInfo::random(); + let dag_storage = starcoin_dag::consensusdb::prelude::FlexiDagStorage::create_from_path( + Path::new("dag/db/starcoindb"), + FlexiDagStorageConfig::new(), + ).expect("create dag storage fail"); + let dag = starcoin_dag::blockdag::BlockDAG::new(8, dag_storage); + if let Some(block_header) = writeable_block_chain_service + .get_main() + .get_header_by_number(fork_number) + .unwrap() + { + let mut parent_id = block_header.id(); + let net = node_config.net(); + for _i in 0..times { + let block_chain = BlockChain::new( + net.time_service(), + parent_id, + writeable_block_chain_service.get_main().get_storage(), + None, + dag.clone(), + ) + .unwrap(); + let (block_template, _) = block_chain + .create_block_template(*miner_account.address(), None, Vec::new(), vec![], None, None) + .unwrap(); + let block = block_chain + .consensus() + .create_block(block_template, net.time_service().as_ref()) + .unwrap(); + parent_id = block.id(); + + writeable_block_chain_service.try_connect(block).unwrap(); + } + return Some(parent_id); + } + return None; +} + +#[stest::test(timeout = 120)] +async fn test_block_chain_switch_main() { + let times = 12; + let (mut writeable_block_chain_service, node_config, _) = create_writeable_block_chain().await; + let net = node_config.net(); + let mut last_block = gen_dag_blocks( + times, + &mut writeable_block_chain_service, + net.time_service().as_ref(), + ); + assert_eq!( + writeable_block_chain_service + .get_main() + .current_header() + .id(), + last_block.unwrap() + ); + + last_block = gen_fork_dag_block_chain( + 0, + node_config, + 2 * times, + &mut writeable_block_chain_service, + ); + + assert_eq!( + writeable_block_chain_service + .get_main() + .current_header() + .id(), + last_block.unwrap() + ); +} + +#[stest::test] +async fn test_block_chain_reset() -> anyhow::Result<()> { + let times = 10; + let (mut writeable_block_chain_service, node_config, _) = create_writeable_block_chain().await; + let net = node_config.net(); + let mut last_block = gen_dag_blocks( + times, + &mut writeable_block_chain_service, + net.time_service().as_ref(), + ); + assert_eq!( + writeable_block_chain_service + .get_main() + .current_header() + .id(), + last_block.unwrap() + ); + let block = writeable_block_chain_service + .get_main() + .get_block_by_number(3)? + .unwrap(); + writeable_block_chain_service.reset(block.id())?; + assert_eq!( + writeable_block_chain_service + .get_main() + .current_header() + .number(), + 3 + ); + + assert!(writeable_block_chain_service + .get_main() + .get_block_by_number(2)? + .is_some()); + Ok(()) +} diff --git a/sync/src/block_connector/write_block_chain.rs b/sync/src/block_connector/write_block_chain.rs index db94159751..e295aa38d2 100644 --- a/sync/src/block_connector/write_block_chain.rs +++ b/sync/src/block_connector/write_block_chain.rs @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 use crate::block_connector::metrics::ChainMetrics; -use anyhow::{format_err, Result}; +use anyhow::{bail, format_err, Ok, Result}; use starcoin_chain::BlockChain; use starcoin_chain_api::{ChainReader, ChainWriter, ConnectBlockError, WriteableChainService}; use starcoin_config::NodeConfig; @@ -11,7 +11,7 @@ use starcoin_dag::blockdag::BlockDAG; use starcoin_executor::VMMetrics; use starcoin_logger::prelude::*; use starcoin_service_registry::bus::{Bus, BusService}; -use starcoin_service_registry::ServiceRef; +use starcoin_service_registry::{ServiceContext, ServiceRef}; use starcoin_storage::Store; use starcoin_txpool_api::TxPoolSyncService; use starcoin_types::block::BlockInfo; @@ -20,8 +20,9 @@ use starcoin_types::{ startup_info::StartupInfo, system_events::{NewBranch, NewHeadBlock}, }; -use std::fmt::Formatter; -use std::sync::Arc; +use std::{fmt::Formatter, sync::Arc}; + +use super::BlockConnectorService; const MAX_ROLL_BACK_BLOCK: usize = 10; @@ -77,7 +78,7 @@ where if let Some(metrics) = self.metrics.as_ref() { let result = match result.as_ref() { - Ok(connect) => format!("Ok_{}", connect), + std::result::Result::Ok(connect) => format!("Ok_{}", connect), Err(err) => { if let Some(connect_err) = err.downcast_ref::() { format!("Err_{}", connect_err.reason()) @@ -95,15 +96,15 @@ where } } -impl

WriteBlockChainService

+impl WriteBlockChainService where - P: TxPoolSyncService + 'static, + TransactionPoolServiceT: TxPoolSyncService + 'static, { pub fn new( config: Arc, startup_info: StartupInfo, storage: Arc, - txpool: P, + txpool: TransactionPoolServiceT, bus: ServiceRef, vm_metrics: Option, dag: BlockDAG, @@ -176,6 +177,61 @@ where &self.main } + #[cfg(test)] + pub fn time_sleep(&self, sec: u64) { + self.config.net().time_service().sleep(sec * 1000000); + } + + #[cfg(test)] + pub fn apply_failed(&mut self, block: Block) -> Result<()> { + use anyhow::bail; + use starcoin_chain::verifier::FullVerifier; + + // apply but no connection + let verified_block = self.main.verify_with_verifier::(block)?; + let executed_block = self.main.execute(verified_block)?; + let enacted_blocks = vec![executed_block.block().clone()]; + self.do_new_head(executed_block, 1, enacted_blocks, 0, vec![])?; + // bail!("failed to apply for tesing the connection later!"); + Ok(()) + } + + // for sync task to connect to its chain, if chain's total difficulties is larger than the main + // switch by: + // 1, update the startup info + // 2, broadcast the new header + pub fn switch_new_main( + &mut self, + new_head_block: HashValue, + ctx: &mut ServiceContext>, + ) -> Result<()> + where + TransactionPoolServiceT: TxPoolSyncService, + { + let new_branch = BlockChain::new( + self.config.net().time_service(), + new_head_block, + self.storage.clone(), + self.vm_metrics.clone(), + self.main.dag().clone(), + )?; + + let main_total_difficulty = self.main.get_total_difficulty()?; + let branch_total_difficulty = new_branch.get_total_difficulty()?; + if branch_total_difficulty > main_total_difficulty { + // todo: handle StartupInfo.dag_main + self.main = new_branch; + self.update_startup_info(self.main.head_block().header())?; + ctx.broadcast(NewHeadBlock { + executed_block: Arc::new(self.main.head_block()), + // tips: self.main.status().tips_hash.clone(), + }); + Ok(()) + } else { + bail!("no need to switch"); + } + } + pub fn select_head(&mut self, new_branch: BlockChain) -> Result<()> { let executed_block = new_branch.head_block(); let main_total_difficulty = self.main.get_total_difficulty()?; @@ -390,7 +446,10 @@ where .inc() } - if let Err(e) = self.bus.broadcast(NewHeadBlock(Arc::new(block))) { + if let Err(e) = self.bus.broadcast(NewHeadBlock { + executed_block: Arc::new(block), + // tips: self.main.status().tips_hash.clone(), + }) { error!("Broadcast NewHeadBlock error: {:?}", e); } } diff --git a/sync/src/sync.rs b/sync/src/sync.rs index 66b21e03e8..57a900b625 100644 --- a/sync/src/sync.rs +++ b/sync/src/sync.rs @@ -27,10 +27,12 @@ use starcoin_sync_api::{ PeerScoreRequest, PeerScoreResponse, SyncCancelRequest, SyncProgressReport, SyncProgressRequest, SyncServiceHandler, SyncStartRequest, SyncStatusRequest, SyncTarget, }; +use starcoin_txpool::TxPoolService; use starcoin_types::block::BlockIdAndNumber; use starcoin_types::startup_info::ChainStatus; use starcoin_types::sync_status::SyncStatus; use starcoin_types::system_events::{NewHeadBlock, SyncStatusChangeEvent, SystemStarted}; +use std::result::Result::Ok; use std::sync::Arc; use std::time::Duration; use stream_task::{TaskError, TaskEventCounterHandle, TaskHandle}; @@ -99,6 +101,73 @@ impl SyncService { vm_metrics, }) } + + pub async fn create_verified_client( + network: NetworkServiceRef, + config: Arc, + peer_strategy: Option, + peers: Vec, + peer_score_metrics: Option, + ) -> Result> { + let peer_select_strategy = + peer_strategy.unwrap_or_else(|| config.sync.peer_select_strategy()); + + let mut peer_set = network.peer_set().await?; + + loop { + if peer_set.is_empty() || peer_set.len() < (config.net().min_peers() as usize) { + let level = if config.net().is_dev() || config.net().is_test() { + Level::Debug + } else { + Level::Info + }; + log!( + level, + "[sync]Waiting enough peers to sync, current: {:?} peers, min peers: {:?}", + peer_set.len(), + config.net().min_peers() + ); + + Delay::new(Duration::from_secs(1)).await; + peer_set = network.peer_set().await?; + } else { + break; + } + } + + let peer_reputations = network + .reputations(REPUTATION_THRESHOLD) + .await? + .await? + .into_iter() + .map(|(peer, reputation)| { + ( + peer, + (REPUTATION_THRESHOLD.abs().saturating_add(reputation)) as u64, + ) + }) + .collect(); + + let peer_selector = PeerSelector::new_with_reputation( + peer_reputations, + peer_set, + peer_select_strategy, + peer_score_metrics, + ); + + peer_selector.retain_rpc_peers(); + if !peers.is_empty() { + peer_selector.retain(peers.as_ref()) + } + if peer_selector.is_empty() { + return Err(format_err!("[sync] No peers to sync.")); + } + + Ok(Arc::new(VerifiedRpcClient::new( + peer_selector.clone(), + network.clone(), + ))) + } pub fn check_and_start_sync( &mut self, @@ -145,67 +214,15 @@ impl SyncService { let network = ctx.get_shared::()?; let storage = self.storage.clone(); let self_ref = ctx.self_ref(); - let connector_service = ctx.service_ref::()?.clone(); + let connector_service = ctx + .service_ref::>()? + .clone(); let config = self.config.clone(); let peer_score_metrics = self.peer_score_metrics.clone(); let sync_metrics = self.metrics.clone(); let vm_metrics = self.vm_metrics.clone(); let dag = ctx.get_shared::()?; let fut = async move { - let peer_select_strategy = - peer_strategy.unwrap_or_else(|| config.sync.peer_select_strategy()); - - let mut peer_set = network.peer_set().await?; - - loop { - if peer_set.is_empty() || peer_set.len() < (config.net().min_peers() as usize) { - let level = if config.net().is_dev() || config.net().is_test() { - Level::Debug - } else { - Level::Info - }; - log!( - level, - "[sync]Waiting enough peers to sync, current: {:?} peers, min peers: {:?}", - peer_set.len(), - config.net().min_peers() - ); - - Delay::new(Duration::from_secs(1)).await; - peer_set = network.peer_set().await?; - } else { - break; - } - } - - let peer_reputations = network - .reputations(REPUTATION_THRESHOLD) - .await? - .await? - .into_iter() - .map(|(peer, reputation)| { - ( - peer, - (REPUTATION_THRESHOLD.abs().saturating_add(reputation)) as u64, - ) - }) - .collect(); - - let peer_selector = PeerSelector::new_with_reputation( - peer_reputations, - peer_set, - peer_select_strategy, - peer_score_metrics, - ); - - peer_selector.retain_rpc_peers(); - if !peers.is_empty() { - peer_selector.retain(peers.as_ref()) - } - if peer_selector.is_empty() { - return Err(format_err!("[sync] No peers to sync.")); - } - let startup_info = storage .get_startup_info()? .ok_or_else(|| format_err!("Startup info should exist."))?; @@ -215,10 +232,14 @@ impl SyncService { format_err!("Can not find block info by id: {}", current_block_id) })?; - let rpc_client = Arc::new(VerifiedRpcClient::new( - peer_selector.clone(), + let rpc_client = Self::create_verified_client( network.clone(), - )); + config.clone(), + peer_strategy, + peers, + peer_score_metrics, + ) + .await?; if let Some(target) = rpc_client.get_best_target(current_block_info.get_total_difficulty())? { @@ -244,14 +265,14 @@ impl SyncService { target, task_handle, task_event_handle, - peer_selector, + peer_selector: rpc_client.selector().clone(), })?; if let Some(sync_task_total) = sync_task_total.as_ref() { sync_task_total.with_label_values(&["start"]).inc(); } Ok(Some(fut.await?)) } else { - debug!("[sync]No best peer to request, current is beast."); + debug!("[sync]No best peer to request, current is best."); Ok(None) } }; @@ -577,10 +598,9 @@ impl EventHandler for SyncService { impl EventHandler for SyncService { fn handle_event(&mut self, msg: NewHeadBlock, ctx: &mut ServiceContext) { - let NewHeadBlock(block) = msg; if self.sync_status.update_chain_status(ChainStatus::new( - block.header().clone(), - block.block_info.clone(), + msg.executed_block.header().clone(), + msg.executed_block.block_info.clone(), )) { ctx.broadcast(SyncStatusChangeEvent(self.sync_status.clone())); } diff --git a/sync/src/tasks/block_sync_task.rs b/sync/src/tasks/block_sync_task.rs index 57f6703a9d..4899995691 100644 --- a/sync/src/tasks/block_sync_task.rs +++ b/sync/src/tasks/block_sync_task.rs @@ -3,7 +3,7 @@ use crate::tasks::{BlockConnectedEvent, BlockConnectedEventHandle, BlockFetcher, BlockLocalStore}; use crate::verified_rpc_client::RpcVerifyError; -use anyhow::{format_err, Result}; +use anyhow::{bail, format_err, Result}; use futures::future::BoxFuture; use futures::FutureExt; use network_api::PeerId; @@ -12,14 +12,18 @@ use starcoin_accumulator::{Accumulator, MerkleAccumulator}; use starcoin_chain::{verifier::BasicVerifier, BlockChain}; use starcoin_chain_api::{ChainReader, ChainWriter, ConnectBlockError, ExecutedBlock}; use starcoin_config::G_CRATE_VERSION; +use starcoin_crypto::HashValue; use starcoin_logger::prelude::*; -use starcoin_storage::BARNARD_HARD_FORK_HASH; +use starcoin_storage::{Store, BARNARD_HARD_FORK_HASH}; use starcoin_sync_api::SyncTarget; -use starcoin_types::block::{Block, BlockIdAndNumber, BlockInfo, BlockNumber}; +use starcoin_types::block::{Block, BlockHeader, BlockIdAndNumber, BlockInfo, BlockNumber}; use std::collections::HashMap; use std::sync::Arc; +use std::time::Duration; use stream_task::{CollectorState, TaskError, TaskResultCollector, TaskState}; +use super::{BlockConnectAction, BlockConnectedFinishEvent}; + #[derive(Clone, Debug)] pub struct SyncBlockData { pub(crate) block: Block, @@ -187,6 +191,8 @@ pub struct BlockCollector { event_handle: H, peer_provider: N, skip_pow_verify: bool, + local_store: Arc, + fetcher: Arc, } impl BlockCollector @@ -201,6 +207,8 @@ where event_handle: H, peer_provider: N, skip_pow_verify: bool, + local_store: Arc, + fetcher: Arc, ) -> Self { Self { current_block_info, @@ -209,6 +217,8 @@ where event_handle, peer_provider, skip_pow_verify, + local_store, + fetcher, } } @@ -217,6 +227,69 @@ where self.apply_block(block, None) } + fn notify_connected_block( + &mut self, + block: Block, + block_info: BlockInfo, + action: BlockConnectAction, + state: CollectorState, + ) -> Result { + let total_difficulty = block_info.get_total_difficulty(); + + // if the new block's total difficulty is smaller than the current, + // do nothing because we do not need to update the current chain in any other services. + if total_difficulty <= self.current_block_info.total_difficulty { + return Ok(state); // nothing to do + } + + // only try connect block when sync chain total_difficulty > node's current chain. + + // first, create the sender and receiver for ensuring that + // the last block is connected before the next synchronization is triggered. + // if the block is not the last one, we do not want to do this. + let (sender, mut receiver) = match state { + CollectorState::Enough => { + let (s, r) = futures::channel::mpsc::unbounded::(); + (Some(s), Some(r)) + } + CollectorState::Need => (None, None), + }; + + // second, construct the block connect event. + let block_connect_event = BlockConnectedEvent { + block, + feedback: sender, + action, + }; + + // third, broadcast it. + if let Err(e) = self.event_handle.handle(block_connect_event.clone()) { + error!( + "Send BlockConnectedEvent error: {:?}, block_id: {}", + e, + block_info.block_id() + ); + } + + // finally, if it is the last one, wait for the last block to be processed. + if block_connect_event.feedback.is_some() && receiver.is_some() { + let mut count: i32 = 0; + while count < 3 { + count = count.saturating_add(1); + match receiver.as_mut().unwrap().try_next() { + Ok(_) => { + break; + } + Err(_) => { + info!("Waiting for last block to be processed"); + async_std::task::block_on(async_std::task::sleep(Duration::from_secs(10))); + } + } + } + } + Ok(state) + } + fn apply_block(&mut self, block: Block, peer_id: Option) -> Result<()> { if let Some((_failed_block, pre_peer_id, err, version)) = self .chain @@ -282,48 +355,207 @@ where Ok(()) } } -} -impl TaskResultCollector for BlockCollector -where - N: PeerProvider + 'static, - H: BlockConnectedEventHandle + 'static, -{ - type Output = BlockChain; + fn find_absent_parent_dag_blocks( + &self, + block_header: BlockHeader, + ancestors: &mut Vec, + absent_blocks: &mut Vec, + ) -> Result<()> { + let parents = block_header.parents_hash().unwrap_or_default(); + if parents.is_empty() { + return Ok(()); + } + for parent in parents { + if !self.chain.has_dag_block(parent)? { + absent_blocks.push(parent) + } else { + ancestors.push(parent); + } + } + Ok(()) + } - fn collect(&mut self, item: SyncBlockData) -> Result { - let (block, block_info, peer_id) = item.into(); - let block_id = block.id(); - let timestamp = block.header().timestamp(); - let block_info = match block_info { - Some(block_info) => { - //If block_info exists, it means that this block was already executed and try connect in the previous sync, but the sync task was interrupted. - //So, we just need to update chain and continue - self.chain.connect(ExecutedBlock { - block, - block_info: block_info.clone(), - })?; - block_info + fn find_absent_parent_dag_blocks_for_blocks( + &self, + block_headers: Vec, + ancestors: &mut Vec, + absent_blocks: &mut Vec, + ) -> Result<()> { + for block_header in block_headers { + self.find_absent_parent_dag_blocks(block_header, ancestors, absent_blocks)?; + } + Ok(()) + } + + async fn fetch_block_headers(&self, absent_blocks: Vec) -> Result)>> { + let mut count: i32 = 20; + while count > 0 { + info!("fetch block header retry count = {}", count); + match self + .fetcher + .fetch_block_headers(absent_blocks.clone()) + .await { + Ok(result) => { + return Ok(result); + } + Err(e) => { + count = count.saturating_sub(1); + if count == 0 { + bail!("failed to fetch block headers due to: {:?}", e); + } + async_std::task::sleep(Duration::from_secs(1)).await; + } + } + } + bail!("failed to fetch block headers"); + } + + async fn find_ancestor_dag_block_header( + &self, + mut block_headers: Vec, + ) -> Result> { + let mut ancestors = vec![]; + loop { + let mut absent_blocks = vec![]; + self.find_absent_parent_dag_blocks_for_blocks( + block_headers, + &mut ancestors, + &mut absent_blocks, + )?; + if absent_blocks.is_empty() { + return Ok(ancestors); } - None => { - self.apply_block(block.clone(), peer_id)?; - self.chain.time_service().adjust(timestamp); - let block_info = self.chain.status().info; - let total_difficulty = block_info.get_total_difficulty(); - // only try connect block when sync chain total_difficulty > node's current chain. - if total_difficulty > self.current_block_info.total_difficulty { - if let Err(e) = self.event_handle.handle(BlockConnectedEvent { block }) { - error!( - "Send BlockConnectedEvent error: {:?}, block_id: {}", - e, block_id - ); + let absent_block_headers = self + .fetch_block_headers(absent_blocks) + .await?; + if absent_block_headers.iter().any(|(id, header)| { + if header.is_none() { + error!( + "fetch absent block header failed, block id: {:?}, it should not be absent!", + id + ); + return true; + } + false + }) { + bail!("fetch absent block header failed, it should not be absent!"); + } + block_headers = absent_block_headers + .into_iter() + .map(|(_, header)| header.expect("block header should not be none!")) + .collect(); + } + } + + pub fn ensure_dag_parent_blocks_exist( + &mut self, + block_header: BlockHeader, + ) -> Result<()> { + if !block_header.is_dag() { + info!("the block is not a dag block, skipping, its id: {:?}, its number {:?}", block_header.id(), block_header.number()); + return Ok(()); + } + if self.chain.has_dag_block(block_header.id())? { + info!("the dag block exists, skipping, its id: {:?}, its number {:?}", block_header.id(), block_header.number()); + return Ok(()); + } + info!("the block is a dag block, its id: {:?}, number: {:?}, its parents: {:?}", block_header.id(), block_header.number(), block_header.parents_hash()); + let fut = async { + let mut dag_ancestors = self + .find_ancestor_dag_block_header(vec![block_header.clone()]) + .await?; + + while !dag_ancestors.is_empty() { + for ancestor_block_header_id in &dag_ancestors { + match self + .local_store + .get_block_info(ancestor_block_header_id.clone())? + { + Some(block_info) => { + let block = self.local_store.get_block_by_hash(ancestor_block_header_id.clone())?.expect("failed to get block by hash"); + info!("connect a dag block: {:?}, number: {:?}", block.id(), block.header().number()); + let executed_block = self.chain.connect(ExecutedBlock { + block, + block_info, + })?; + info!("succeed to connect a dag block: {:?}, number: {:?}", executed_block.block.id(), executed_block.block.header().number()); + self.notify_connected_block(executed_block.block, executed_block.block_info.clone(), BlockConnectAction::ConnectExecutedBlock, self.check_enough_by_info(executed_block.block_info)?)?; + } + None => { + for (block, _peer_id) in self + .fetch_blocks( + vec![ancestor_block_header_id.clone()], + ) + .await? + { + if self.chain.has_dag_block(block.id())? { + continue; + } + info!("now apply for sync after fetching a dag block: {:?}, number: {:?}", block.id(), block.header().number()); + let executed_block = self.chain.apply(block.into())?; + info!("succeed to apply a dag block: {:?}, number: {:?}", executed_block.block.id(), executed_block.block.header().number()); + self.notify_connected_block(executed_block.block, executed_block.block_info.clone(), BlockConnectAction::ConnectNewBlock, self.check_enough_by_info(executed_block.block_info)?)?; + } + } } } - block_info + dag_ancestors = self + .fetch_dag_block_children(dag_ancestors) + .await?; + + info!("next dag children blocks: {:?}", dag_ancestors); } + + Ok(()) }; + async_std::task::block_on(fut) + } - //verify target + async fn fetch_blocks(&self, block_ids: Vec) -> Result)>> { + let mut count: i32 = 20; + while count > 0 { + info!("fetch blocks retry count = {}", count); + match self.fetcher.fetch_blocks(block_ids.clone()).await { + Ok(result) => { + return Ok(result); + } + Err(e) => { + count = count.saturating_sub(1); + if count == 0 { + bail!("failed to fetch blocks due to: {:?}", e); + } + async_std::task::sleep(Duration::from_secs(1)).await; + } + } + } + bail!("failed to fetch blocks"); + } + + async fn fetch_dag_block_children(&self, dag_ancestors: Vec) -> Result> { + let mut count: i32 = 20; + while count > 0 { + info!("fetch block chidlren retry count = {}", count); + match self + .fetcher + .fetch_dag_block_children(dag_ancestors.clone()) + .await { + Ok(result) => { + return Ok(result); + } + Err(e) => { + count = count.saturating_sub(1); + if count == 0 { + bail!("failed to fetch dag block children due to: {:?}", e); + } + async_std::task::sleep(Duration::from_secs(1)).await; + } + } + } + bail!("failed to fetch dag block children"); + } + + pub fn check_enough_by_info(&self, block_info: BlockInfo) -> Result { if block_info.block_accumulator_info.num_leaves == self.target.block_info.block_accumulator_info.num_leaves { @@ -332,10 +564,10 @@ where RpcVerifyError::new_with_peers( self.target.peers.clone(), format!( - "Verify target error, expect target: {:?}, collect target block_info:{:?}", - self.target.block_info, - block_info - ), + "Verify target error, expect target: {:?}, collect target block_info:{:?}", + self.target.block_info, + block_info + ), ) .into(), ) @@ -348,6 +580,62 @@ where } } + pub fn check_enough(&self) -> Result { + if let Some(block_info) = self.local_store.get_block_info(self.chain.current_header().id())? { + self.check_enough_by_info(block_info) + } else { + Ok(CollectorState::Need) + } + } +} + +impl TaskResultCollector for BlockCollector +where + N: PeerProvider + 'static, + H: BlockConnectedEventHandle + 'static, +{ + type Output = BlockChain; + + fn collect(&mut self, item: SyncBlockData) -> Result { + let (block, block_info, peer_id) = item.into(); + + // if it is a dag block, we must ensure that its dag parent blocks exist. + // if it is not, we must pull the dag parent blocks from the peer. + info!("now sync dag block -- ensure_dag_parent_blocks_exist"); + self.ensure_dag_parent_blocks_exist(block.header().clone())?; + let state = self.check_enough(); + if let anyhow::Result::Ok(CollectorState::Enough) = &state { + let header = block.header().clone(); + return self.notify_connected_block(block, self.local_store.get_block_info(header.id())?.expect("block info should exist"), BlockConnectAction::ConnectExecutedBlock, state?); + } + + let timestamp = block.header().timestamp(); + let (block_info, action) = match block_info { + Some(block_info) => { + //If block_info exists, it means that this block was already executed and try connect in the previous sync, but the sync task was interrupted. + //So, we just need to update chain and continue + self.chain.connect(ExecutedBlock { + block: block.clone(), + block_info: block_info.clone(), + })?; + (block_info, BlockConnectAction::ConnectExecutedBlock) + } + None => { + self.apply_block(block.clone(), peer_id)?; + self.chain.time_service().adjust(timestamp); + ( + self.chain.status().info, + BlockConnectAction::ConnectNewBlock, + ) + } + }; + + //verify target + let state: Result = self.check_enough_by_info(block_info.clone()); + + self.notify_connected_block(block, block_info, action, state?) + } + fn finish(self) -> Result { Ok(self.chain) } diff --git a/sync/src/tasks/inner_sync_task.rs b/sync/src/tasks/inner_sync_task.rs index 8367276da5..23e40ab711 100644 --- a/sync/src/tasks/inner_sync_task.rs +++ b/sync/src/tasks/inner_sync_task.rs @@ -1,7 +1,3 @@ -use crate::tasks::{ - AccumulatorCollector, BlockAccumulatorSyncTask, BlockCollector, BlockConnectedEventHandle, - BlockFetcher, BlockIdFetcher, BlockSyncTask, PeerOperator, -}; use anyhow::format_err; use network_api::PeerProvider; use starcoin_accumulator::node::AccumulatorStoreType; @@ -18,6 +14,8 @@ use stream_task::{ CustomErrorHandle, Generator, TaskError, TaskEventHandle, TaskGenerator, TaskHandle, TaskState, }; +use super::{BlockAccumulatorSyncTask, AccumulatorCollector, BlockSyncTask, BlockCollector, PeerOperator, BlockFetcher, BlockIdFetcher, BlockConnectedEventHandle}; + pub struct InnerSyncTask where H: BlockConnectedEventHandle + Sync + 'static, @@ -121,7 +119,7 @@ where ) .and_then(move |(ancestor, accumulator), event_handle| { let check_local_store = - ancestor_block_info.total_difficulty < current_block_info.total_difficulty; + ancestor_block_info.total_difficulty <= current_block_info.total_difficulty; let block_sync_task = BlockSyncTask::new( accumulator, @@ -136,7 +134,7 @@ where ancestor.id, self.storage.clone(), vm_metrics, - self.dag, + self.dag.clone(), )?; let block_collector = BlockCollector::new_with_handle( current_block_info.clone(), @@ -145,6 +143,8 @@ where self.block_event_handle.clone(), self.peer_provider.clone(), skip_pow_verify_when_sync, + self.storage.clone(), + self.fetcher.clone(), ); Ok(TaskGenerator::new( block_sync_task, diff --git a/sync/src/tasks/mock.rs b/sync/src/tasks/mock.rs index 5f5c66034d..45b2a85515 100644 --- a/sync/src/tasks/mock.rs +++ b/sync/src/tasks/mock.rs @@ -4,7 +4,8 @@ use crate::tasks::{ BlockConnectedEvent, BlockFetcher, BlockIdFetcher, BlockInfoFetcher, PeerOperator, SyncFetcher, }; -use anyhow::{format_err, Context, Result}; +use anyhow::{format_err, Context, Ok, Result}; +use async_std::path::Path; use async_std::task::JoinHandle; use futures::channel::mpsc::UnboundedReceiver; use futures::future::BoxFuture; @@ -14,15 +15,21 @@ use network_api::messages::NotificationMessage; use network_api::{PeerId, PeerInfo, PeerSelector, PeerStrategy}; use network_p2p_core::{NetRpcError, RpcErrorCode}; use rand::Rng; +use starcoin_account_api::AccountInfo; +use starcoin_accumulator::accumulator_info::AccumulatorInfo; use starcoin_accumulator::{Accumulator, MerkleAccumulator}; use starcoin_chain::BlockChain; use starcoin_chain_api::ChainReader; use starcoin_chain_mock::MockChain; use starcoin_config::ChainNetwork; -use starcoin_crypto::HashValue; +use starcoin_crypto::{HashValue, hash}; +use starcoin_dag::blockdag::BlockDAG; +use starcoin_dag::consensusdb::prelude::FlexiDagStorageConfig; use starcoin_network_rpc_api::G_RPC_INFO; +use starcoin_storage::Storage; use starcoin_sync_api::SyncTarget; use starcoin_types::block::{Block, BlockIdAndNumber, BlockInfo, BlockNumber}; +use starcoin_types::startup_info::ChainInfo; use std::sync::Arc; use std::time::Duration; @@ -162,6 +169,34 @@ impl SyncNodeMocker { )) } + pub fn new_with_storage( + net: ChainNetwork, + storage: Arc, + chain_info: ChainInfo, + miner: AccountInfo, + delay_milliseconds: u64, + random_error_percent: u32, + dag: BlockDAG, + ) -> Result { + let chain = MockChain::new_with_storage(net, storage.clone(), chain_info.head().id(), miner, dag)?; + let peer_id = PeerId::random(); + let peer_info = PeerInfo::new( + peer_id.clone(), + chain.chain_info(), + NotificationMessage::protocols(), + G_RPC_INFO.clone().into_protocols(), + None, + ); + let peer_selector = PeerSelector::new(vec![peer_info], PeerStrategy::default(), None); + Ok(Self::new_inner( + peer_id, + chain, + ErrorStrategy::Timeout(delay_milliseconds), + random_error_percent, + peer_selector, + )) + } + pub fn new_with_strategy( net: ChainNetwork, error_strategy: ErrorStrategy, @@ -250,10 +285,19 @@ impl SyncNodeMocker { self.chain_mocker.head() } + pub fn get_storage(&self) -> Arc { + self.chain_mocker.get_storage() + } + pub fn produce_block(&mut self, times: u64) -> Result<()> { self.chain_mocker.produce_and_apply_times(times) } + pub fn produce_block_and_create_dag(&mut self, times: u64) -> Result<()> { + self.chain_mocker.produce_and_apply_times(times)?; + Ok(()) + } + pub fn select_head(&mut self, block: Block) -> Result<()> { self.chain_mocker.select_head(block) } @@ -278,6 +322,10 @@ impl SyncNodeMocker { .select_peer() .ok_or_else(|| format_err!("No peers for send request.")) } + + pub fn get_dag_targets(&self) -> Result> { + Ok(vec![]) + } } impl PeerOperator for SyncNodeMocker { @@ -313,7 +361,7 @@ impl BlockFetcher for SyncNodeMocker { .into_iter() .map(|block_id| { if let Some(block) = self.chain().get_block(block_id)? { - Ok((block, None)) + Ok((block, Some(PeerId::random()))) } else { Err(format_err!("Can not find block by id: {}", block_id)) } @@ -326,6 +374,35 @@ impl BlockFetcher for SyncNodeMocker { } .boxed() } + + fn fetch_block_headers( + &self, + block_ids: Vec, + ) -> BoxFuture)>>> { + async move { + let blocks = self.fetch_blocks(block_ids).await?; + blocks + .into_iter() + .map(|(block, _)| Ok((block.id(), Some(block.header().clone())))) + .collect() + } + .boxed() + } + + fn fetch_dag_block_children( + &self, + block_ids: Vec, + ) -> BoxFuture>> { + async move { + let blocks = self.fetch_blocks(block_ids).await?; + let mut result = vec![]; + for block in blocks { + result.extend(self.chain().dag().get_children(block.0.id())?); + } + Ok(result) + } + .boxed() + } } impl BlockInfoFetcher for SyncNodeMocker { @@ -339,8 +416,8 @@ impl BlockInfoFetcher for SyncNodeMocker { result.push(self.chain().get_block_info(Some(hash)).unwrap()); }); async move { - let _ = self.select_a_peer()?; - self.err_mocker.random_err().await?; + // let _ = self.select_a_peer()?; + // self.err_mocker.random_err().await?; Ok(result) } .boxed() diff --git a/sync/src/tasks/mod.rs b/sync/src/tasks/mod.rs index a628205dec..ce947a924d 100644 --- a/sync/src/tasks/mod.rs +++ b/sync/src/tasks/mod.rs @@ -1,6 +1,7 @@ // Copyright (c) The Starcoin Core Contributors // SPDX-License-Identifier: Apache-2.0 +use crate::block_connector::BlockConnectorService; use crate::tasks::block_sync_task::SyncBlockData; use crate::tasks::inner_sync_task::InnerSyncTask; use crate::verified_rpc_client::{RpcVerifyError, VerifiedRpcClient}; @@ -14,12 +15,16 @@ use starcoin_accumulator::node::AccumulatorStoreType; use starcoin_accumulator::MerkleAccumulator; use starcoin_chain::{BlockChain, ChainReader}; use starcoin_crypto::HashValue; +use starcoin_dag::blockdag::BlockDAG; use starcoin_logger::prelude::*; use starcoin_service_registry::{ActorService, EventHandler, ServiceRef}; use starcoin_storage::Store; use starcoin_sync_api::SyncTarget; use starcoin_time_service::TimeService; -use starcoin_types::block::{Block, BlockIdAndNumber, BlockInfo, BlockNumber}; +use starcoin_txpool::TxPoolService; +#[cfg(test)] +use starcoin_txpool_mock_service::MockTxPoolService; +use starcoin_types::block::{Block, BlockHeader, BlockIdAndNumber, BlockInfo, BlockNumber, LegacyBlock}; use starcoin_types::startup_info::ChainStatus; use starcoin_types::U256; use std::str::FromStr; @@ -32,7 +37,10 @@ use stream_task::{ }; pub trait SyncFetcher: PeerOperator + BlockIdFetcher + BlockFetcher + BlockInfoFetcher { - fn get_best_target(&self, min_difficulty: U256) -> Result> { + fn get_best_target( + &self, + min_difficulty: U256, + ) -> Result> { if let Some(best_peers) = self.peer_selector().bests(min_difficulty) { //TODO fast verify best peers by accumulator let mut chain_statuses: Vec<(ChainStatus, Vec)> = @@ -76,7 +84,7 @@ pub trait SyncFetcher: PeerOperator + BlockIdFetcher + BlockFetcher + BlockInfoF min_difficulty ); Ok(None) - } + } } fn get_better_target( @@ -280,6 +288,16 @@ pub trait BlockFetcher: Send + Sync { &self, block_ids: Vec, ) -> BoxFuture)>>>; + + fn fetch_block_headers( + &self, + block_ids: Vec, + ) -> BoxFuture)>>>; + + fn fetch_dag_block_children( + &self, + block_ids: Vec, + ) -> BoxFuture>>; } impl BlockFetcher for Arc @@ -292,6 +310,20 @@ where ) -> BoxFuture<'_, Result)>>> { BlockFetcher::fetch_blocks(self.as_ref(), block_ids) } + + fn fetch_block_headers( + &self, + block_ids: Vec, + ) -> BoxFuture)>>> { + BlockFetcher::fetch_block_headers(self.as_ref(), block_ids) + } + + fn fetch_dag_block_children( + &self, + block_ids: Vec, + ) -> BoxFuture>> { + BlockFetcher::fetch_dag_block_children(self.as_ref(), block_ids) + } } impl BlockFetcher for VerifiedRpcClient { @@ -301,7 +333,7 @@ impl BlockFetcher for VerifiedRpcClient { ) -> BoxFuture<'_, Result)>>> { self.get_blocks(block_ids.clone()) .and_then(|blocks| async move { - let results: Result)>> = block_ids + let results = block_ids .iter() .zip(blocks) .map(|(id, block)| { @@ -309,11 +341,29 @@ impl BlockFetcher for VerifiedRpcClient { format_err!("Get block by id: {} failed, remote node return None", id) }) }) - .collect(); + .collect::>>(); results.map_err(fetcher_err_map) }) .boxed() } + + fn fetch_block_headers( + &self, + block_ids: Vec, + ) -> BoxFuture)>>> { + self.get_block_headers_by_hash(block_ids.clone()) + .map_err(fetcher_err_map) + .boxed() + } + + fn fetch_dag_block_children( + &self, + block_ids: Vec, + ) -> BoxFuture>> { + self.get_dag_block_children(block_ids) + .map_err(fetcher_err_map) + .boxed() + } } pub trait BlockInfoFetcher: Send + Sync { @@ -372,6 +422,7 @@ impl BlockLocalStore for Arc { Some(block) => { let id = block.id(); let block_info = self.get_block_info(id)?; + Ok(Some(SyncBlockData::new(block, block_info, None))) } None => Ok(None), @@ -380,11 +431,22 @@ impl BlockLocalStore for Arc { } } +#[derive(Clone, Debug)] +pub enum BlockConnectAction { + ConnectNewBlock, + ConnectExecutedBlock, +} + #[derive(Clone, Debug)] pub struct BlockConnectedEvent { pub block: Block, + pub feedback: Option>, + pub action: BlockConnectAction, } +#[derive(Clone, Debug)] +pub struct BlockConnectedFinishEvent; + #[derive(Clone, Debug)] pub struct BlockDiskCheckEvent {} @@ -392,10 +454,15 @@ pub trait BlockConnectedEventHandle: Send + Clone + std::marker::Unpin { fn handle(&mut self, event: BlockConnectedEvent) -> Result<()>; } -impl BlockConnectedEventHandle for ServiceRef -where - S: ActorService + EventHandler, -{ +impl BlockConnectedEventHandle for ServiceRef> { + fn handle(&mut self, event: BlockConnectedEvent) -> Result<()> { + self.notify(event)?; + Ok(()) + } +} + +#[cfg(test)] +impl BlockConnectedEventHandle for ServiceRef> { fn handle(&mut self, event: BlockConnectedEvent) -> Result<()> { self.notify(event)?; Ok(()) @@ -459,6 +526,24 @@ impl BlockConnectedEventHandle for UnboundedSender { } } +#[derive(Debug, Clone)] +pub struct BlockConnectEventHandleMock { + sender: UnboundedSender, +} + +impl BlockConnectEventHandleMock { + pub fn new(sender: UnboundedSender) -> Result { + Ok(Self { sender }) + } +} + +impl BlockConnectedEventHandle for BlockConnectEventHandleMock { + fn handle(&mut self, event: BlockConnectedEvent) -> Result<()> { + self.sender.start_send(event)?; + Ok(()) + } +} + pub struct ExtSyncTaskErrorHandle where F: SyncFetcher + 'static, @@ -515,7 +600,6 @@ use crate::sync_metrics::SyncMetrics; pub use accumulator_sync_task::{AccumulatorCollector, BlockAccumulatorSyncTask}; pub use block_sync_task::{BlockCollector, BlockSyncTask}; pub use find_ancestor_task::{AncestorCollector, FindAncestorTask}; -use starcoin_dag::blockdag::BlockDAG; use starcoin_executor::VMMetrics; pub fn full_sync_task( diff --git a/sync/src/tasks/tests.rs b/sync/src/tasks/tests.rs index 3d1a3311c8..36aa97af22 100644 --- a/sync/src/tasks/tests.rs +++ b/sync/src/tasks/tests.rs @@ -2,6 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 #![allow(clippy::integer_arithmetic)] +use crate::block_connector::{BlockConnectorService, CheckBlockConnectorHashValue}; use crate::tasks::block_sync_task::SyncBlockData; use crate::tasks::mock::{ErrorStrategy, MockBlockIdFetcher, SyncNodeMocker}; use crate::tasks::{ @@ -9,48 +10,62 @@ use crate::tasks::{ BlockCollector, BlockFetcher, BlockLocalStore, BlockSyncTask, FindAncestorTask, SyncFetcher, }; use crate::verified_rpc_client::RpcVerifyError; -use anyhow::Context; -use anyhow::{format_err, Result}; +use anyhow::{anyhow, format_err, Result}; +use anyhow::{Context, Ok}; use futures::channel::mpsc::unbounded; use futures::future::BoxFuture; use futures::FutureExt; use futures_timer::Delay; use network_api::{PeerId, PeerInfo, PeerSelector, PeerStrategy}; use pin_utils::core_reexport::time::Duration; +use starcoin_account_api::AccountInfo; use starcoin_accumulator::accumulator_info::AccumulatorInfo; use starcoin_accumulator::tree_store::mock::MockAccumulatorStore; use starcoin_accumulator::{Accumulator, MerkleAccumulator}; use starcoin_chain::BlockChain; use starcoin_chain_api::ChainReader; use starcoin_chain_mock::MockChain; -use starcoin_config::{BuiltinNetworkID, ChainNetwork}; +use starcoin_config::{BuiltinNetworkID, ChainNetwork, ChainNetworkID, NodeConfig, temp_dir, RocksdbConfig}; use starcoin_crypto::HashValue; +use starcoin_dag::blockdag::BlockDAG; +use starcoin_dag::consensusdb::prelude::FlexiDagStorageConfig; use starcoin_genesis::Genesis; +use starcoin_genesis::Genesis as StarcoinGenesis; use starcoin_logger::prelude::*; -use starcoin_storage::BlockStore; +use starcoin_service_registry::{RegistryAsyncService, RegistryService, ServiceRef}; +use starcoin_storage::db_storage::DBStorage; +use starcoin_storage::storage::StorageInstance; +use starcoin_storage::{BlockStore, Storage}; use starcoin_sync_api::SyncTarget; +use starcoin_txpool_mock_service::MockTxPoolService; use starcoin_types::{ block::{Block, BlockBody, BlockHeaderBuilder, BlockIdAndNumber, BlockInfo}, U256, }; use std::collections::HashMap; +use std::fs; +use std::path::{PathBuf, Path}; use std::sync::{Arc, Mutex}; +use stest::actix_export::System; +use stream_task::TaskHandle; use stream_task::{ DefaultCustomErrorHandle, Generator, TaskError, TaskEventCounterHandle, TaskGenerator, }; use test_helper::DummyNetworkService; +use super::BlockConnectedEvent; + #[stest::test(timeout = 120)] pub async fn test_full_sync_new_node() -> Result<()> { let net1 = ChainNetwork::new_builtin(BuiltinNetworkID::Test); - let mut node1 = SyncNodeMocker::new(net1, 1, 50)?; + let mut node1 = SyncNodeMocker::new(net1, 300, 50)?; node1.produce_block(10)?; let mut arc_node1 = Arc::new(node1); let net2 = ChainNetwork::new_builtin(BuiltinNetworkID::Test); - let node2 = SyncNodeMocker::new(net2.clone(), 1, 50)?; + let node2 = SyncNodeMocker::new(net2.clone(), 300, 50)?; let target = arc_node1.sync_target(); @@ -125,14 +140,14 @@ pub async fn test_full_sync_new_node() -> Result<()> { #[stest::test] pub async fn test_sync_invalid_target() -> Result<()> { let net1 = ChainNetwork::new_builtin(BuiltinNetworkID::Test); - let mut node1 = SyncNodeMocker::new(net1, 1, 0)?; + let mut node1 = SyncNodeMocker::new(net1, 300, 50)?; node1.produce_block(10)?; let arc_node1 = Arc::new(node1); let net2 = ChainNetwork::new_builtin(BuiltinNetworkID::Test); - let node2 = SyncNodeMocker::new(net2.clone(), 1, 0)?; + let node2 = SyncNodeMocker::new(net2.clone(), 300, 50)?; let dag = node2.chain().dag(); let mut target = arc_node1.sync_target(); @@ -187,6 +202,7 @@ pub async fn test_failed_block() -> Result<()> { None, dag, )?; + let fetcher = MockBlockFetcher::new(); let (sender, _) = unbounded(); let chain_status = chain.status(); let target = SyncTarget { @@ -201,6 +217,8 @@ pub async fn test_failed_block() -> Result<()> { sender, DummyNetworkService::default(), true, + storage.clone(), + Arc::new(fetcher), ); let header = BlockHeaderBuilder::random().with_number(1).build(); let body = BlockBody::new(Vec::new(), None); @@ -217,14 +235,14 @@ pub async fn test_failed_block() -> Result<()> { #[stest::test(timeout = 120)] pub async fn test_full_sync_fork() -> Result<()> { let net1 = ChainNetwork::new_builtin(BuiltinNetworkID::Test); - let mut node1 = SyncNodeMocker::new(net1, 1, 50)?; + let mut node1 = SyncNodeMocker::new(net1, 300, 50)?; node1.produce_block(10)?; let mut arc_node1 = Arc::new(node1); let net2 = ChainNetwork::new_builtin(BuiltinNetworkID::Test); - let node2 = SyncNodeMocker::new(net2.clone(), 1, 50)?; + let node2 = SyncNodeMocker::new(net2.clone(), 300, 50)?; let target = arc_node1.sync_target(); @@ -299,7 +317,7 @@ pub async fn test_full_sync_fork() -> Result<()> { #[stest::test(timeout = 120)] pub async fn test_full_sync_fork_from_genesis() -> Result<()> { let net1 = ChainNetwork::new_builtin(BuiltinNetworkID::Test); - let mut node1 = SyncNodeMocker::new(net1, 1, 50)?; + let mut node1 = SyncNodeMocker::new(net1, 300, 50)?; node1.produce_block(10)?; let arc_node1 = Arc::new(node1); @@ -307,7 +325,7 @@ pub async fn test_full_sync_fork_from_genesis() -> Result<()> { let net2 = ChainNetwork::new_builtin(BuiltinNetworkID::Test); //fork from genesis - let mut node2 = SyncNodeMocker::new(net2.clone(), 1, 50)?; + let mut node2 = SyncNodeMocker::new(net2.clone(), 300, 50)?; node2.produce_block(5)?; let target = arc_node1.sync_target(); @@ -352,14 +370,15 @@ pub async fn test_full_sync_fork_from_genesis() -> Result<()> { #[stest::test(timeout = 120)] pub async fn test_full_sync_continue() -> Result<()> { - let net1 = ChainNetwork::new_builtin(BuiltinNetworkID::Test); - let mut node1 = SyncNodeMocker::new(net1, 10, 50)?; + // let net1 = ChainNetwork::new_builtin(BuiltinNetworkID::Test); + let test_system = SyncTestSystem::initialize_sync_system().await?; + let mut node1 = test_system.target_node;// SyncNodeMocker::new(net1, 10, 50)?; let dag = node1.chain().dag(); node1.produce_block(10)?; let arc_node1 = Arc::new(node1); let net2 = ChainNetwork::new_builtin(BuiltinNetworkID::Test); //fork from genesis - let mut node2 = SyncNodeMocker::new(net2.clone(), 1, 50)?; + let mut node2 = test_system.local_node;// SyncNodeMocker::new(net2.clone(), 1, 50)?; node2.produce_block(7)?; // first set target to 5. @@ -443,7 +462,7 @@ pub async fn test_full_sync_continue() -> Result<()> { #[stest::test] pub async fn test_full_sync_cancel() -> Result<()> { let net1 = ChainNetwork::new_builtin(BuiltinNetworkID::Test); - let mut node1 = SyncNodeMocker::new(net1, 1, 50)?; + let mut node1 = SyncNodeMocker::new(net1, 300, 50)?; node1.produce_block(10)?; let arc_node1 = Arc::new(node1); @@ -529,7 +548,7 @@ async fn test_accumulator_sync_by_stream_task() -> Result<()> { task_state, 5, 3, - 1, + 300, collector, event_handle.clone(), Arc::new(DefaultCustomErrorHandle), @@ -565,7 +584,7 @@ pub async fn test_find_ancestor_same_number() -> Result<()> { task_state, 5, 3, - 1, + 300, collector, event_handle.clone(), Arc::new(DefaultCustomErrorHandle), @@ -605,7 +624,7 @@ pub async fn test_find_ancestor_block_number_behind() -> Result<()> { task_state, 5, 3, - 1, + 300, collector, event_handle.clone(), Arc::new(DefaultCustomErrorHandle), @@ -654,7 +673,7 @@ pub async fn test_find_ancestor_chain_fork() -> Result<()> { task_state, 5, 3, - 1, + 300, collector, event_handle.clone(), Arc::new(DefaultCustomErrorHandle), @@ -695,7 +714,7 @@ impl BlockFetcher for MockBlockFetcher { .iter() .map(|block_id| { if let Some(block) = blocks.get(block_id).cloned() { - Ok((block, None)) + Ok((block, Some(PeerId::random()))) } else { Err(format_err!("Can not find block by id: {:?}", block_id)) } @@ -707,6 +726,58 @@ impl BlockFetcher for MockBlockFetcher { } .boxed() } + + fn fetch_block_headers( + &self, + block_ids: Vec, + ) -> BoxFuture)>>> { + let blocks = self.blocks.lock().unwrap(); + let result = block_ids + .iter() + .map(|block_id| { + if let Some(block) = blocks.get(block_id).cloned() { + Ok((block.id(), Some(block.header().clone()))) + } else { + Err(format_err!("Can not find block by id: {:?}", block_id)) + } + }) + .collect(); + async { + Delay::new(Duration::from_millis(100)).await; + result + } + .boxed() + } + + fn fetch_dag_block_children( + &self, + block_ids: Vec, + ) -> BoxFuture>> { + let blocks = self.blocks.lock().unwrap(); + let mut result = vec![]; + block_ids + .iter() + .map(|block_id| { + if let Some(block) = blocks.get(block_id).cloned() { + for hashes in block.header().parents_hash() { + for hash in hashes { + if result.contains(&hash) { + continue; + } + result.push(hash); + } + } + Ok(()) + } else { + Err(format_err!("Can not find block by id: {:?}", block_id)) + } + }); + async { + Delay::new(Duration::from_millis(100)).await; + Ok(result) + } + .boxed() + } } fn build_block_fetcher(total_blocks: u64) -> (MockBlockFetcher, MerkleAccumulator) { @@ -744,7 +815,7 @@ impl MockLocalBlockStore { ); self.store.lock().unwrap().insert( block.id(), - SyncBlockData::new(block.clone(), Some(block_info), None), + SyncBlockData::new(block.clone(), Some(block_info), Some(PeerId::random())), ); } } @@ -782,7 +853,7 @@ async fn block_sync_task_test(total_blocks: u64, ancestor_number: u64) -> Result block_sync_state, 5, 3, - 1, + 300, vec![], event_handle.clone(), Arc::new(DefaultCustomErrorHandle), @@ -850,7 +921,7 @@ async fn test_block_sync_with_local() -> Result<()> { block_sync_state, 5, 3, - 1, + 300, vec![], event_handle.clone(), Arc::new(DefaultCustomErrorHandle), @@ -945,7 +1016,7 @@ async fn test_err_context() -> Result<()> { async fn test_sync_target() { let mut peer_infos = vec![]; let net1 = ChainNetwork::new_builtin(BuiltinNetworkID::Test); - let mut node1 = SyncNodeMocker::new(net1, 1, 0).unwrap(); + let mut node1 = SyncNodeMocker::new(net1, 300, 50).unwrap(); node1.produce_block(10).unwrap(); let low_chain_info = node1.peer_info().chain_info().clone(); peer_infos.push(PeerInfo::new( @@ -971,6 +1042,7 @@ async fn test_sync_target() { let mock_chain = MockChain::new_with_chain( net2, node1.chain().fork(high_chain_info.head().id()).unwrap(), + node1.get_storage(), ) .unwrap(); @@ -978,8 +1050,8 @@ async fn test_sync_target() { let node2 = Arc::new(SyncNodeMocker::new_with_chain_selector( PeerId::random(), mock_chain, - 1, - 0, + 300, + 50, peer_selector, )); let full_target = node2 @@ -994,3 +1066,397 @@ async fn test_sync_target() { assert_eq!(target.target_id.number(), low_chain_info.head().number()); assert_eq!(target.target_id.id(), low_chain_info.head().id()); } + +fn sync_block_in_async_connection( + mut target_node: Arc, + local_node: Arc, + storage: Arc, + block_count: u64, + dag: BlockDAG, +) -> Result> { + Arc::get_mut(&mut target_node) + .unwrap() + .produce_block(block_count)?; + let target = target_node.sync_target(); + let target_id = target.target_id.id(); + + let (sender, mut receiver) = futures::channel::mpsc::unbounded::(); + let thread_local_node = local_node.clone(); + + let inner_dag = dag.clone(); + let process_block = move || { + let mut chain = MockChain::new_with_storage( + thread_local_node.chain_mocker.net().clone(), + storage.clone(), + thread_local_node.chain_mocker.head().status().head.id(), + thread_local_node.chain_mocker.miner().clone(), + inner_dag, + ) + .unwrap(); + loop { + if let std::result::Result::Ok(result) = receiver.try_next() { + match result { + Some(event) => { + chain + .select_head(event.block) + .expect("select head must be successful"); + if event.feedback.is_some() { + event + .feedback + .unwrap() + .unbounded_send(super::BlockConnectedFinishEvent) + .unwrap(); + assert_eq!(target_id, chain.head().status().head.id()); + break; + } + } + None => break, + } + } + } + }; + let handle = std::thread::spawn(process_block); + + let current_block_header = local_node.chain().current_header(); + let storage = local_node.chain().get_storage(); + + let local_net = local_node.chain_mocker.net(); + let (local_ancestor_sender, _local_ancestor_receiver) = unbounded(); + + let (sync_task, _task_handle, task_event_counter) = full_sync_task( + current_block_header.id(), + target.clone(), + false, + local_net.time_service(), + storage.clone(), + sender, + target_node.clone(), + local_ancestor_sender, + DummyNetworkService::default(), + 15, + None, + None, + dag.clone(), + )?; + let branch = async_std::task::block_on(sync_task)?; + assert_eq!(branch.current_header().id(), target.target_id.id()); + + handle.join().unwrap(); + + let reports = task_event_counter.get_reports(); + reports + .iter() + .for_each(|report| debug!("reports: {}", report)); + + Ok(target_node) +} + +#[stest::test] +async fn test_sync_block_in_async_connection() -> Result<()> { + let net = ChainNetwork::new_builtin(BuiltinNetworkID::Test); + let test_system = SyncTestSystem::initialize_sync_system().await?; + let mut target_node = Arc::new(test_system.target_node); + + // let (storage, chain_info, _, _) = + // Genesis::init_storage_for_test(&net).expect("init storage by genesis fail."); + + let local_node = Arc::new(test_system.local_node); + + // let dag_storage = starcoin_dag::consensusdb::prelude::FlexiDagStorage::create_from_path( + // Path::new("."), + // FlexiDagStorageConfig::new(), + // )?; + // let dag = starcoin_dag::blockdag::BlockDAG::new(8, dag_storage); + + target_node = + sync_block_in_async_connection(target_node, local_node.clone(), local_node.chain_mocker.get_storage(), 10, local_node.chain().dag().clone())?; + _ = sync_block_in_async_connection(target_node, local_node.clone(), local_node.chain_mocker.get_storage(), 20, local_node.chain().dag().clone())?; + + Ok(()) +} + +#[cfg(test)] +async fn sync_block_in_block_connection_service_mock( + mut target_node: Arc, + local_node: Arc, + registry: &ServiceRef, + block_count: u64, +) -> Result> { + Arc::get_mut(&mut target_node) + .unwrap() + .produce_block(block_count)?; + loop { + let target = target_node.sync_target(); + + let storage = local_node.chain().get_storage(); + let startup_info = storage + .get_startup_info()? + .ok_or_else(|| format_err!("Startup info should exist."))?; + let current_block_id = startup_info.main; + + let local_net = local_node.chain_mocker.net(); + let (local_ancestor_sender, _local_ancestor_receiver) = unbounded(); + + let block_chain_service = async_std::task::block_on( + registry.service_ref::>(), + )?; + + let (sync_task, _task_handle, task_event_counter) = full_sync_task( + current_block_id, + target.clone(), + false, + local_net.time_service(), + storage.clone(), + block_chain_service, + target_node.clone(), + local_ancestor_sender, + DummyNetworkService::default(), + 15, + None, + None, + local_node.chain().dag().clone(), + )?; + let branch = sync_task.await?; + info!("checking branch in sync service is the same as target's branch"); + assert_eq!(branch.current_header().id(), target.target_id.id()); + + let block_connector_service = registry + .service_ref::>() + .await? + .clone(); + let result = block_connector_service + .send(CheckBlockConnectorHashValue { + head_hash: target.target_id.id(), + number: target.target_id.number(), + }) + .await?; + if result.is_ok() { + break; + } + let reports = task_event_counter.get_reports(); + reports + .iter() + .for_each(|report| debug!("reports: {}", report)); + } + + Ok(target_node) +} + +#[cfg(test)] +// async fn sync_dag_chain( +// mut target_node: Arc, +// local_node: Arc, +// registry: &ServiceRef, +// ) -> Result<()> { +// Arc::get_mut(&mut target_node) +// .unwrap() +// .produce_block_and_create_dag(21)?; +// Ok(()) + + // let flexidag_service = registry.service_ref::().await?; + // let local_dag_accumulator_info = flexidag_service.send(GetDagAccumulatorInfo).await??.ok_or(anyhow!("dag accumulator is none"))?; + + // let result = sync_dag_full_task( + // local_dag_accumulator_info, + // target_accumulator_info, + // target_node.clone(), + // accumulator_store, + // accumulator_snapshot, + // local_store, + // local_net.time_service(), + // None, + // connector_service, + // network, + // false, + // dag, + // block_chain_service, + // flexidag_service, + // local_net.id().clone(), + // )?; + + // Ok(result) +// } + +// #[cfg(test)] +// async fn sync_dag_block_from_single_chain( +// mut target_node: Arc, +// local_node: Arc, +// registry: &ServiceRef, +// block_count: u64, +// ) -> Result> { +// use starcoin_consensus::BlockDAG; + +// Arc::get_mut(&mut target_node) +// .unwrap() +// .produce_block(block_count)?; +// loop { +// let target = target_node.sync_target(); + +// let storage = local_node.chain().get_storage(); +// let startup_info = storage +// .get_startup_info()? +// .ok_or_else(|| format_err!("Startup info should exist."))?; +// let current_block_id = startup_info.main; + +// let local_net = local_node.chain_mocker.net(); +// let (local_ancestor_sender, _local_ancestor_receiver) = unbounded(); + +// let block_chain_service = async_std::task::block_on( +// registry.service_ref::>(), +// )?; + +// let (sync_task, _task_handle, task_event_counter) = if local_node.chain().head_block().block.header().number() +// > BlockDAG::dag_fork_height_with_net(local_net.id().clone()) { + +// } else { +// full_sync_task( +// current_block_id, +// target.clone(), +// false, +// local_net.time_service(), +// storage.clone(), +// block_chain_service, +// target_node.clone(), +// local_ancestor_sender, +// DummyNetworkService::default(), +// 15, +// ChainNetworkID::TEST, +// None, +// None, +// )? +// }; + +// let branch = sync_task.await?; +// info!("checking branch in sync service is the same as target's branch"); +// assert_eq!(branch.current_header().id(), target.target_id.id()); + +// let block_connector_service = registry +// .service_ref::>() +// .await? +// .clone(); +// let result = block_connector_service +// .send(CheckBlockConnectorHashValue { +// head_hash: target.target_id.id(), +// number: target.target_id.number(), +// }) +// .await?; +// if result.is_ok() { +// break; +// } +// let reports = task_event_counter.get_reports(); +// reports +// .iter() +// .for_each(|report| debug!("reports: {}", report)); +// } + +// Ok(target_node) +// } + +#[cfg(test)] +struct SyncTestSystem { + pub target_node: SyncNodeMocker, + pub local_node: SyncNodeMocker, + pub registry: ServiceRef, +} + +#[cfg(test)] +impl SyncTestSystem { + async fn initialize_sync_system() -> Result { + let config = Arc::new(NodeConfig::random_for_test()); + + // let (storage, chain_info, _, _) = StarcoinGenesis::init_storage_for_test(config.net()) + // .expect("init storage by genesis fail."); + + let temp_path = PathBuf::from(starcoin_config::temp_dir().as_ref()) ; + let storage_path = temp_path.join(Path::new("local/storage")); + let dag_path = temp_path.join(Path::new("local/dag")); + fs::create_dir_all(storage_path.clone())?; + fs::create_dir_all(dag_path.clone())?; + let storage = Arc::new(Storage::new(StorageInstance::new_db_instance( + DBStorage::new( + storage_path.as_path(), + RocksdbConfig::default(), + None, + ) + .unwrap(), + )) + .unwrap()); + let genesis = Genesis::load_or_build(config.net())?; + // init dag + let dag_storage = starcoin_dag::consensusdb::prelude::FlexiDagStorage::create_from_path( + dag_path.as_path(), + FlexiDagStorageConfig::new(), + ).expect("init dag storage fail."); + let dag = starcoin_dag::blockdag::BlockDAG::new(8, dag_storage); // local dag + + let chain_info = genesis.execute_genesis_block(config.net(), storage.clone(), dag.clone())?; + + let target_node = SyncNodeMocker::new(config.net().clone(), 300, 50)?; + let local_node = SyncNodeMocker::new_with_storage( + config.net().clone(), + storage.clone(), + chain_info.clone(), + AccountInfo::random(), + 300, + 50, + dag.clone(), + )?; + + let (registry_sender, registry_receiver) = async_std::channel::unbounded(); + + info!( + "in test_sync_block_apply_failed_but_connect_success, start tokio runtime for main thread" + ); + + let _handle = timeout_join_handler::spawn(move || { + let system = System::with_tokio_rt(|| { + tokio::runtime::Builder::new_multi_thread() + .enable_all() + .on_thread_stop(|| debug!("main thread stopped")) + .thread_name("main") + .build() + .expect("failed to create tokio runtime for main") + }); + async_std::task::block_on(async { + let registry = RegistryService::launch(); + + registry.put_shared(config.clone()).await.unwrap(); + registry.put_shared(storage.clone()).await.unwrap(); + registry.put_shared(dag).await.expect("failed to put dag in registry"); + registry.put_shared(MockTxPoolService::new()).await.unwrap(); + + Delay::new(Duration::from_secs(2)).await; + + registry + .register::>() + .await + .unwrap(); + + registry_sender.send(registry).await.unwrap(); + }); + + system.run().unwrap(); + }); + + let registry = registry_receiver.recv().await.unwrap(); + + Ok(SyncTestSystem { + target_node, + local_node, + registry, + }) + } +} + +#[stest::test(timeout = 600)] +async fn test_sync_single_chain_to_dag_chain() -> Result<()> { + let test_system = SyncTestSystem::initialize_sync_system().await?; + let _target_node = sync_block_in_block_connection_service_mock( + Arc::new(test_system.target_node), + Arc::new(test_system.local_node), + &test_system.registry, + 18, + ) + .await?; + Ok(()) +} diff --git a/sync/src/verified_rpc_client.rs b/sync/src/verified_rpc_client.rs index e756e67f60..1f56337d4c 100644 --- a/sync/src/verified_rpc_client.rs +++ b/sync/src/verified_rpc_client.rs @@ -6,6 +6,7 @@ use network_api::peer_score::{InverseScore, Score}; use network_api::PeerId; use network_api::PeerInfo; use network_api::PeerSelector; +use network_api::PeerStrategy; use starcoin_accumulator::node::AccumulatorStoreType; use starcoin_accumulator::AccumulatorNode; use starcoin_crypto::hash::HashValue; @@ -123,6 +124,10 @@ impl VerifiedRpcClient { } } + pub fn switch_strategy(&mut self, strategy: PeerStrategy) { + self.peer_selector.switch_strategy(strategy) + } + pub fn selector(&self) -> &PeerSelector { &self.peer_selector } @@ -377,6 +382,17 @@ impl VerifiedRpcClient { self.client.get_block_ids(peer_id, request).await } + pub async fn get_block_headers_by_hash( + &self, + ids: Vec, + ) -> Result)>> { + let block_headers = self + .client + .get_headers_by_hash(self.select_a_peer()?, ids.clone()) + .await?; + Ok(ids.into_iter().zip(block_headers.into_iter()).collect()) + } + pub async fn get_blocks( &self, ids: Vec, @@ -426,4 +442,11 @@ impl VerifiedRpcClient { }) .collect()) } + + pub async fn get_dag_block_children( + &self, + req: Vec, + ) -> Result> { + Ok(self.client.get_dag_block_children(self.select_a_peer()?, req).await?) + } } diff --git a/types/src/block/legacy.rs b/types/src/block/legacy.rs index a346d6f925..2c808628db 100644 --- a/types/src/block/legacy.rs +++ b/types/src/block/legacy.rs @@ -239,6 +239,10 @@ impl Block { pub fn id(&self) -> HashValue { self.header.id() } + + pub fn header(&self) -> &BlockHeader { + &self.header + } } impl From for crate::block::Block { diff --git a/types/src/block/mod.rs b/types/src/block/mod.rs index 25975584de..4fbff1934a 100644 --- a/types/src/block/mod.rs +++ b/types/src/block/mod.rs @@ -35,12 +35,13 @@ pub type BlockNumber = u64; //TODO: make sure height pub type ParentsHash = Option>; -pub static DEV_FLEXIDAG_FORK_HEIGHT: BlockNumber = 100000; +pub static DEV_FLEXIDAG_FORK_HEIGHT: BlockNumber = 2; pub static TEST_FLEXIDAG_FORK_HEIGHT: BlockNumber = 2; pub static PROXIMA_FLEXIDAG_FORK_HEIGHT: BlockNumber = 10000; pub static HALLEY_FLEXIDAG_FORK_HEIGHT: BlockNumber = 10000; pub static BARNARD_FLEXIDAG_FORK_HEIGHT: BlockNumber = 10000; pub static MAIN_FLEXIDAG_FORK_HEIGHT: BlockNumber = 1000000; +pub static CUSTOM_FLEXIDAG_FORK_HEIGHT: BlockNumber = 3; /// Type for block header extra #[derive(Clone, Default, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, JsonSchema)] @@ -346,7 +347,7 @@ impl BlockHeader { } else if self.chain_id.is_main() { MAIN_FLEXIDAG_FORK_HEIGHT } else { - DEV_FLEXIDAG_FORK_HEIGHT + CUSTOM_FLEXIDAG_FORK_HEIGHT } } diff --git a/types/src/system_events.rs b/types/src/system_events.rs index 0a84fe1a2d..138a3948c6 100644 --- a/types/src/system_events.rs +++ b/types/src/system_events.rs @@ -10,7 +10,10 @@ use starcoin_crypto::HashValue; use starcoin_vm_types::genesis_config::ConsensusStrategy; use std::sync::Arc; #[derive(Clone, Debug)] -pub struct NewHeadBlock(pub Arc); +pub struct NewHeadBlock { + pub executed_block: Arc, + // pub tips: Option>, +} /// may be uncle block #[derive(Clone, Debug)]