Skip to content

Commit

Permalink
Address review comments
Browse files Browse the repository at this point in the history
  • Loading branch information
someone235 committed Aug 31, 2023
1 parent edffc3f commit aa6236c
Show file tree
Hide file tree
Showing 4 changed files with 16 additions and 16 deletions.
5 changes: 3 additions & 2 deletions consensus/src/consensus/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -519,12 +519,13 @@ impl ConsensusApi for Consensus {
}

fn validate_pruning_points(&self) -> ConsensusResult<()> {
let hst = self.storage.headers_selected_tip_store.read().get().unwrap().hash;
let pp_info = self.pruning_point_store.read().get().unwrap();
if !self.services.pruning_point_manager.is_valid_pruning_point(pp_info.pruning_point) {
if !self.services.pruning_point_manager.is_valid_pruning_point(pp_info.pruning_point, hst) {
return Err(ConsensusError::General("invalid pruning point candidate"));
}

if !self.services.pruning_point_manager.are_pruning_points_in_valid_chain(pp_info) {
if !self.services.pruning_point_manager.are_pruning_points_in_valid_chain(pp_info, hst) {
return Err(ConsensusError::General("past pruning points do not form a valid chain"));
}

Expand Down
10 changes: 6 additions & 4 deletions consensus/src/pipeline/virtual_processor/processor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -938,10 +938,12 @@ impl VirtualStateProcessor {
// Ideally we would want to check if the last known pruning point has the finality point
// in its chain, but in some cases it's impossible: let `lkp` be the last known pruning
// point from the list, and `fup` be the first unknown pruning point (the one following `lkp`).
// fup.blue_score - lkp.blue_score ∈ [finality_depth, finality_depth + k], so it's possible for
// `lkp` not to have the finality point in its past (if `fup` is close to the sink). So we have
// no choice but to check if `lkp` has `finality_point.finality_point` in its chain, meaning
// this function can only detect finality violations in depth of 2*finality_depth.
// fup.blue_score - lkp.blue_score ≈ finality_depth (±k), so it's possible for `lkp` not to
// have the finality point in its past. So we have no choice but to check if `lkp`
// has `finality_point.finality_point` in its chain (in the worst case `fup` is one block
// above the current finality point, and in this case `lkp` will be a few blocks above the
// finality_point.finality_point), meaning this function can only detect finality violations
// in depth of 2*finality_depth, and can give false negatives for smaller finality violations.
let current_pp = self.pruning_point_store.read().pruning_point().unwrap();
let vf = self.virtual_finality_point(&self.virtual_stores.read().state.get().unwrap().ghostdag_data, current_pp);
let vff = self.depth_manager.calc_finality_point(&self.ghostdag_primary_store.get_data(vf).unwrap(), current_pp);
Expand Down
11 changes: 4 additions & 7 deletions consensus/src/processes/pruning.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ use crate::model::{
},
};
use kaspa_hashes::Hash;
use kaspa_utils::option::OptionExtensions;
use parking_lot::RwLock;

#[derive(Clone)]
Expand Down Expand Up @@ -181,13 +182,10 @@ impl<
pov_blue_score >= pp_bs + self.pruning_depth
}

pub fn is_valid_pruning_point(&self, pp_candidate: Hash) -> bool {
pub fn is_valid_pruning_point(&self, pp_candidate: Hash, hst: Hash) -> bool {
if pp_candidate == self.genesis_hash {
return true;
}

let hst = self.header_selected_tip_store.read().get().unwrap().hash;

if !self.reachability_service.is_chain_ancestor_of(pp_candidate, hst) {
return false;
}
Expand All @@ -196,7 +194,7 @@ impl<
self.is_pruning_point_in_pruning_depth(hst_bs, pp_candidate)
}

pub fn are_pruning_points_in_valid_chain(&self, pruning_info: PruningPointInfo) -> bool {
pub fn are_pruning_points_in_valid_chain(&self, pruning_info: PruningPointInfo, hst: Hash) -> bool {
// We want to validate that the past pruning points form a chain to genesis. Since
// each pruning point's header doesn't point to the previous pruning point, but to
// the pruning point from its POV, we can't just traverse from one pruning point to
Expand All @@ -213,10 +211,9 @@ impl<
// any other pruning point in the list, so we are compelled to check if it's refereced by
// the selected chain.
let mut expected_pps_queue = VecDeque::new();
let hst = self.header_selected_tip_store.read().get().unwrap().hash;
for current in self.reachability_service.backward_chain_iterator(hst, pruning_info.pruning_point, false) {
let current_header = self.headers_store.get_header(current).unwrap();
if expected_pps_queue.is_empty() || *expected_pps_queue.back().unwrap() != current_header.pruning_point {
if expected_pps_queue.back().is_none_or(|&&h| h != current_header.pruning_point) {
expected_pps_queue.push_back(current_header.pruning_point);
}
}
Expand Down
6 changes: 3 additions & 3 deletions protocol/flows/src/v5/ibd/flow.rs
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,7 @@ impl IbdFlow {
// means it's in its antichain (because if `highest_known_syncer_chain_hash` was in
// the pruning point's past the pruning point itself would be
// `highest_known_syncer_chain_hash`). So it means there's a finality conflict.
// TODO: Find a better way to handle finality conflicts.
// TODO: consider performing additional actions on finality conflicts in addition to disconnecting from the peer (e.g., banning, rpc notification)
return Ok(IbdType::None);
}

Expand All @@ -165,7 +165,7 @@ impl IbdFlow {
// We reject the headers proof if the node has a relatively up-to-date finality point and current
// consensus has matured for long enough (and not recently synced). This is mostly a spam-protector
// since subsequent checks identify these violations as well
// TODO: Find a better way to handle finality conflicts.
// TODO: consider performing additional actions on finality conflicts in addition to disconnecting from the peer (e.g., banning, rpc notification)
return Ok(IbdType::None);
}
}
Expand Down Expand Up @@ -231,7 +231,7 @@ impl IbdFlow {
}

if consensus.async_are_pruning_points_violating_finality(pruning_points.clone()).await {
// TODO: Find a better way to deal with finality conflicts
// TODO: consider performing additional actions on finality conflicts in addition to disconnecting from the peer (e.g., banning, rpc notification)
return Err(ProtocolError::Other("pruning points are violating finality"));
}

Expand Down

0 comments on commit aa6236c

Please sign in to comment.