From aa6236ca132865c13f3ba856227edb43093b3b21 Mon Sep 17 00:00:00 2001 From: Ori Newman Date: Thu, 31 Aug 2023 14:17:33 +0000 Subject: [PATCH] Address review comments --- consensus/src/consensus/mod.rs | 5 +++-- consensus/src/pipeline/virtual_processor/processor.rs | 10 ++++++---- consensus/src/processes/pruning.rs | 11 ++++------- protocol/flows/src/v5/ibd/flow.rs | 6 +++--- 4 files changed, 16 insertions(+), 16 deletions(-) diff --git a/consensus/src/consensus/mod.rs b/consensus/src/consensus/mod.rs index 38e0d3268..e920b034d 100644 --- a/consensus/src/consensus/mod.rs +++ b/consensus/src/consensus/mod.rs @@ -519,12 +519,13 @@ impl ConsensusApi for Consensus { } fn validate_pruning_points(&self) -> ConsensusResult<()> { + let hst = self.storage.headers_selected_tip_store.read().get().unwrap().hash; let pp_info = self.pruning_point_store.read().get().unwrap(); - if !self.services.pruning_point_manager.is_valid_pruning_point(pp_info.pruning_point) { + if !self.services.pruning_point_manager.is_valid_pruning_point(pp_info.pruning_point, hst) { return Err(ConsensusError::General("invalid pruning point candidate")); } - if !self.services.pruning_point_manager.are_pruning_points_in_valid_chain(pp_info) { + if !self.services.pruning_point_manager.are_pruning_points_in_valid_chain(pp_info, hst) { return Err(ConsensusError::General("past pruning points do not form a valid chain")); } diff --git a/consensus/src/pipeline/virtual_processor/processor.rs b/consensus/src/pipeline/virtual_processor/processor.rs index 162f842c0..af0bd171c 100644 --- a/consensus/src/pipeline/virtual_processor/processor.rs +++ b/consensus/src/pipeline/virtual_processor/processor.rs @@ -938,10 +938,12 @@ impl VirtualStateProcessor { // Ideally we would want to check if the last known pruning point has the finality point // in its chain, but in some cases it's impossible: let `lkp` be the last known pruning // point from the list, and `fup` be the first unknown pruning point (the one following `lkp`). - // fup.blue_score - lkp.blue_score ∈ [finality_depth, finality_depth + k], so it's possible for - // `lkp` not to have the finality point in its past (if `fup` is close to the sink). So we have - // no choice but to check if `lkp` has `finality_point.finality_point` in its chain, meaning - // this function can only detect finality violations in depth of 2*finality_depth. + // fup.blue_score - lkp.blue_score ≈ finality_depth (±k), so it's possible for `lkp` not to + // have the finality point in its past. So we have no choice but to check if `lkp` + // has `finality_point.finality_point` in its chain (in the worst case `fup` is one block + // above the current finality point, and in this case `lkp` will be a few blocks above the + // finality_point.finality_point), meaning this function can only detect finality violations + // in depth of 2*finality_depth, and can give false negatives for smaller finality violations. let current_pp = self.pruning_point_store.read().pruning_point().unwrap(); let vf = self.virtual_finality_point(&self.virtual_stores.read().state.get().unwrap().ghostdag_data, current_pp); let vff = self.depth_manager.calc_finality_point(&self.ghostdag_primary_store.get_data(vf).unwrap(), current_pp); diff --git a/consensus/src/processes/pruning.rs b/consensus/src/processes/pruning.rs index 5f4be43e6..1d419c063 100644 --- a/consensus/src/processes/pruning.rs +++ b/consensus/src/processes/pruning.rs @@ -13,6 +13,7 @@ use crate::model::{ }, }; use kaspa_hashes::Hash; +use kaspa_utils::option::OptionExtensions; use parking_lot::RwLock; #[derive(Clone)] @@ -181,13 +182,10 @@ impl< pov_blue_score >= pp_bs + self.pruning_depth } - pub fn is_valid_pruning_point(&self, pp_candidate: Hash) -> bool { + pub fn is_valid_pruning_point(&self, pp_candidate: Hash, hst: Hash) -> bool { if pp_candidate == self.genesis_hash { return true; } - - let hst = self.header_selected_tip_store.read().get().unwrap().hash; - if !self.reachability_service.is_chain_ancestor_of(pp_candidate, hst) { return false; } @@ -196,7 +194,7 @@ impl< self.is_pruning_point_in_pruning_depth(hst_bs, pp_candidate) } - pub fn are_pruning_points_in_valid_chain(&self, pruning_info: PruningPointInfo) -> bool { + pub fn are_pruning_points_in_valid_chain(&self, pruning_info: PruningPointInfo, hst: Hash) -> bool { // We want to validate that the past pruning points form a chain to genesis. Since // each pruning point's header doesn't point to the previous pruning point, but to // the pruning point from its POV, we can't just traverse from one pruning point to @@ -213,10 +211,9 @@ impl< // any other pruning point in the list, so we are compelled to check if it's refereced by // the selected chain. let mut expected_pps_queue = VecDeque::new(); - let hst = self.header_selected_tip_store.read().get().unwrap().hash; for current in self.reachability_service.backward_chain_iterator(hst, pruning_info.pruning_point, false) { let current_header = self.headers_store.get_header(current).unwrap(); - if expected_pps_queue.is_empty() || *expected_pps_queue.back().unwrap() != current_header.pruning_point { + if expected_pps_queue.back().is_none_or(|&&h| h != current_header.pruning_point) { expected_pps_queue.push_back(current_header.pruning_point); } } diff --git a/protocol/flows/src/v5/ibd/flow.rs b/protocol/flows/src/v5/ibd/flow.rs index 350dad835..df416117a 100644 --- a/protocol/flows/src/v5/ibd/flow.rs +++ b/protocol/flows/src/v5/ibd/flow.rs @@ -150,7 +150,7 @@ impl IbdFlow { // means it's in its antichain (because if `highest_known_syncer_chain_hash` was in // the pruning point's past the pruning point itself would be // `highest_known_syncer_chain_hash`). So it means there's a finality conflict. - // TODO: Find a better way to handle finality conflicts. + // TODO: consider performing additional actions on finality conflicts in addition to disconnecting from the peer (e.g., banning, rpc notification) return Ok(IbdType::None); } @@ -165,7 +165,7 @@ impl IbdFlow { // We reject the headers proof if the node has a relatively up-to-date finality point and current // consensus has matured for long enough (and not recently synced). This is mostly a spam-protector // since subsequent checks identify these violations as well - // TODO: Find a better way to handle finality conflicts. + // TODO: consider performing additional actions on finality conflicts in addition to disconnecting from the peer (e.g., banning, rpc notification) return Ok(IbdType::None); } } @@ -231,7 +231,7 @@ impl IbdFlow { } if consensus.async_are_pruning_points_violating_finality(pruning_points.clone()).await { - // TODO: Find a better way to deal with finality conflicts + // TODO: consider performing additional actions on finality conflicts in addition to disconnecting from the peer (e.g., banning, rpc notification) return Err(ProtocolError::Other("pruning points are violating finality")); }