Skip to content

Commit

Permalink
Merge branch 'tomas/pos-fix-rewards-boundary' (#1729)
Browse files Browse the repository at this point in the history
* origin/tomas/pos-fix-rewards-boundary:
  changelog: add #1729
  test/ledger/finalize_block: fix slashing tests
  pos/slash: fix the validator state update to be in sync with set changes
  app/ledger/finalize_block: log block rewards before recording slashes
  test/e2e/slashing: extend the test to discover rewards issues
  pos: error out if validator is not in expected state for rewards
  • Loading branch information
Fraccaman committed Jul 21, 2023
2 parents f1c396b + 4e52936 commit d5af059
Show file tree
Hide file tree
Showing 4 changed files with 90 additions and 43 deletions.
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
- PoS: Fixed an epoch boundary issue in which a validator who's being slashed
on a start of a new epoch is disregarded during processing of block votes.
([\#1729](https://github.com/anoma/namada/pull/1729))
97 changes: 59 additions & 38 deletions apps/src/lib/node/ledger/shell/finalize_block.rs
Original file line number Diff line number Diff line change
Expand Up @@ -114,6 +114,14 @@ where
)?;
}

// Invariant: Has to be applied before `record_slashes_from_evidence`
// because it potentially needs to be able to read validator state from
// previous epoch and jailing validator removes the historical state
self.log_block_rewards(&req.votes, height, current_epoch, new_epoch)?;
if new_epoch {
self.apply_inflation(current_epoch)?;
}

// Invariant: This has to be applied after
// `copy_validator_sets_and_positions` and before `self.update_epoch`.
self.record_slashes_from_evidence();
Expand Down Expand Up @@ -526,42 +534,6 @@ where
self.update_eth_oracle();
}

// Read the block proposer of the previously committed block in storage
// (n-1 if we are in the process of finalizing n right now).
match read_last_block_proposer_address(&self.wl_storage)? {
Some(proposer_address) => {
tracing::debug!(
"Found last block proposer: {proposer_address}"
);
let votes = pos_votes_from_abci(&self.wl_storage, &req.votes);
namada_proof_of_stake::log_block_rewards(
&mut self.wl_storage,
if new_epoch {
current_epoch.prev()
} else {
current_epoch
},
&proposer_address,
votes,
)?;
}
None => {
if height > BlockHeight::default().next_height() {
tracing::error!(
"Can't find the last block proposer at height {height}"
);
} else {
tracing::debug!(
"No last block proposer at height {height}"
);
}
}
}

if new_epoch {
self.apply_inflation(current_epoch)?;
}

if !req.proposer_address.is_empty() {
let tm_raw_hash_string =
tm_raw_hash_to_string(req.proposer_address);
Expand Down Expand Up @@ -847,6 +819,48 @@ where

Ok(())
}

// Process the proposer and votes in the block to assign their PoS rewards.
fn log_block_rewards(
&mut self,
votes: &[VoteInfo],
height: BlockHeight,
current_epoch: Epoch,
new_epoch: bool,
) -> Result<()> {
// Read the block proposer of the previously committed block in storage
// (n-1 if we are in the process of finalizing n right now).
match read_last_block_proposer_address(&self.wl_storage)? {
Some(proposer_address) => {
tracing::debug!(
"Found last block proposer: {proposer_address}"
);
let votes = pos_votes_from_abci(&self.wl_storage, votes);
namada_proof_of_stake::log_block_rewards(
&mut self.wl_storage,
if new_epoch {
current_epoch.prev()
} else {
current_epoch
},
&proposer_address,
votes,
)?;
}
None => {
if height > BlockHeight::default().next_height() {
tracing::error!(
"Can't find the last block proposer at height {height}"
);
} else {
tracing::debug!(
"No last block proposer at height {height}"
);
}
}
}
Ok(())
}
}

/// Convert ABCI vote info to PoS vote info. Any info which fails the conversion
Expand Down Expand Up @@ -2400,8 +2414,11 @@ mod test_finalize_block {
);

// Advance to the processing epoch
let votes = get_default_true_votes(&shell.wl_storage, Epoch::default());
loop {
let votes = get_default_true_votes(
&shell.wl_storage,
shell.wl_storage.storage.block.epoch,
);
next_block_for_inflation(
&mut shell,
pkh1.clone(),
Expand Down Expand Up @@ -2940,10 +2957,14 @@ mod test_finalize_block {
total_voting_power: Default::default(),
},
];
let votes = get_default_true_votes(
&shell.wl_storage,
shell.wl_storage.storage.block.epoch,
);
next_block_for_inflation(
&mut shell,
pkh1.clone(),
votes.clone(),
votes,
Some(misbehaviors),
);
assert_eq!(current_epoch.0, 7_u64);
Expand Down
12 changes: 10 additions & 2 deletions proof_of_stake/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -101,6 +101,8 @@ pub enum GenesisError {
pub enum InflationError {
#[error("Error in calculating rewards: {0}")]
Rewards(rewards::RewardsError),
#[error("Expected validator {0} to be in consensus set but got: {1:?}")]
ExpectedValidatorInConsensus(Address, Option<ValidatorState>),
}

#[allow(missing_docs)]
Expand Down Expand Up @@ -3281,7 +3283,11 @@ where
let state = validator_state_handle(&validator_address)
.get(storage, epoch, &params)?;
if state != Some(ValidatorState::Consensus) {
continue;
return Err(InflationError::ExpectedValidatorInConsensus(
validator_address,
state,
))
.into_storage_result();
}

let stake_from_deltas =
Expand Down Expand Up @@ -3580,8 +3586,10 @@ where
}
}
}
// Safe sub cause `validator_set_update_epoch > current_epoch`
let start_offset = validator_set_update_epoch.0 - current_epoch.0;
// Set the validator state as `Jailed` thru the pipeline epoch
for offset in 1..=params.pipeline_len {
for offset in start_offset..=params.pipeline_len {
validator_state_handle(validator).set(
storage,
ValidatorState::Jailed,
Expand Down
21 changes: 18 additions & 3 deletions tests/src/e2e/ledger_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4309,6 +4309,7 @@ fn test_genesis_validators() -> Result<()> {
/// 4. Run it to get it to double vote and sign blocks
/// 5. Submit a valid token transfer tx to validator 0
/// 6. Wait for double signing evidence
/// 7. Make sure the the first validator can proceed to the next epoch
#[test]
fn double_signing_gets_slashed() -> Result<()> {
use std::net::SocketAddr;
Expand All @@ -4330,7 +4331,11 @@ fn double_signing_gets_slashed() -> Result<()> {
genesis.pos_params.unbonding_len,
genesis.pos_params.cubic_slashing_window_length,
);
setup::set_validators(4, genesis, default_port_offset)
let mut genesis =
setup::set_validators(4, genesis, default_port_offset);
// Make faster epochs to be more likely to discover boundary issues
genesis.parameters.min_num_of_blocks = 2;
genesis
},
None,
)?;
Expand Down Expand Up @@ -4492,14 +4497,13 @@ fn double_signing_gets_slashed() -> Result<()> {
// 6. Wait for double signing evidence
let mut validator_1 = bg_validator_1.foreground();
validator_1.exp_string("Processing evidence")?;
// validator_1.exp_string("Slashing")?;

println!("\nPARSING SLASH MESSAGE\n");
let (_, res) = validator_1
.exp_regex(r"Slashing [a-z0-9]+ for Duplicate vote in epoch [0-9]+")
.unwrap();
println!("\n{res}\n");
let _bg_validator_1 = validator_1.background();
let bg_validator_1 = validator_1.background();

let exp_processing_epoch = Epoch::from_str(res.split(' ').last().unwrap())
.unwrap()
Expand Down Expand Up @@ -4605,6 +4609,17 @@ fn double_signing_gets_slashed() -> Result<()> {
.exp_regex(r"Validator [a-z0-9]+ is in the .* set")
.unwrap();

// 7. Make sure the the first validator can proceed to the next epoch
epoch_sleep(&test, &validator_one_rpc, 120)?;

// Make sure there are no errors
let mut validator_1 = bg_validator_1.foreground();
validator_1.interrupt()?;
// Wait for the node to stop running to finish writing the state and tx
// queue
validator_1.exp_string("Namada ledger node has shut down.")?;
validator_1.assert_success();

Ok(())
}

Expand Down

0 comments on commit d5af059

Please sign in to comment.