diff --git a/Cargo.lock b/Cargo.lock index 75ef8f54db7..068e8d32d94 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8371,6 +8371,34 @@ dependencies = [ "tokio", ] +[[package]] +name = "spl-slashing" +version = "0.1.0" +dependencies = [ + "bincode", + "bitflags 2.6.0", + "bytemuck", + "generic-array 0.14.7", + "lazy_static", + "num-derive", + "num-traits", + "num_enum", + "rand 0.8.5", + "serde", + "serde_bytes", + "serde_derive", + "serde_with", + "solana-client", + "solana-entry", + "solana-ledger", + "solana-program", + "solana-program-test", + "solana-sdk", + "spl-pod 0.5.0", + "spl-record", + "thiserror 1.0.68", +] + [[package]] name = "spl-stake-pool" version = "2.0.1" diff --git a/Cargo.toml b/Cargo.toml index 3aa974f74d1..df80da2c754 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -50,6 +50,7 @@ members = [ "shared-memory/program", "single-pool/cli", "single-pool/program", + "slashing/program", "stake-pool/cli", "stake-pool/program", "stateless-asks/program", diff --git a/slashing/README.md b/slashing/README.md new file mode 100644 index 00000000000..e69de29bb2d diff --git a/slashing/program/Cargo.toml b/slashing/program/Cargo.toml new file mode 100644 index 00000000000..a8e47e5b022 --- /dev/null +++ b/slashing/program/Cargo.toml @@ -0,0 +1,48 @@ +[package] +name = "spl-slashing" +version = "0.1.0" +description = "Solana Program Library Slashing" +authors = ["Solana Labs Maintainers "] +repository = "https://github.com/solana-labs/solana-program-library" +license = "Apache-2.0" +edition = "2021" + +[features] +no-entrypoint = [] +test-sbf = [] + +[dependencies] +bitflags = { version = "2.6.0", features = ["serde"] } +bytemuck = { version = "1.19.0", features = ["derive"] } +num_enum = "0.7.3" +generic-array = { version = "0.14.7", features = ["serde"], default-features = false } +bincode = "1.3.3" +num-derive = "0.4" +num-traits = "0.2" +solana-program = "2.1.0" +serde = "1.0.210" # must match the serde_derive version, see https://github.com/serde-rs/serde/issues/2584#issuecomment-1685252251 +serde_bytes = "0.11.15" +serde_derive = "1.0.210" # must match the serde version, see https://github.com/serde-rs/serde/issues/2584#issuecomment-1685252251 +serde_with = { version = "3.11.0", default-features = false } + +thiserror = "1.0" +spl-pod = { version = "0.5.0", path = "../../libraries/pod" } + +[dev-dependencies] +lazy_static = "1.5.0" +solana-program-test = "2.1.0" +solana-sdk = "2.1.0" +solana-ledger = "2.1.0" +solana-entry = "2.1.0" +solana-client = "2.1.0" +spl-record = { version = "0.3.0", path = "../../record/program" } +rand = "0.8.5" + +[lib] +crate-type = ["cdylib", "lib"] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[lints] +workspace = true diff --git a/slashing/program/program-id.md b/slashing/program/program-id.md new file mode 100644 index 00000000000..116e8d6bcc1 --- /dev/null +++ b/slashing/program/program-id.md @@ -0,0 +1 @@ +S1ashing11111111111111111111111111111111111 diff --git a/slashing/program/src/duplicate_block_proof.rs b/slashing/program/src/duplicate_block_proof.rs new file mode 100644 index 00000000000..8a093f6ee51 --- /dev/null +++ b/slashing/program/src/duplicate_block_proof.rs @@ -0,0 +1,872 @@ +//! Duplicate block proof data and verification +use { + crate::{ + error::SlashingError, + shred::{Shred, ShredType}, + state::{ProofType, SlashingProofData}, + }, + bytemuck::try_from_bytes, + solana_program::{clock::Slot, msg, pubkey::Pubkey}, + spl_pod::primitives::PodU32, +}; + +/// Proof of a duplicate block violation +pub struct DuplicateBlockProofData<'a> { + /// Shred signed by a leader + pub shred1: &'a [u8], + /// Conflicting shred signed by the same leader + pub shred2: &'a [u8], +} + +impl<'a> DuplicateBlockProofData<'a> { + const LENGTH_SIZE: usize = std::mem::size_of::(); + + /// Packs proof data to write in account for + /// `SlashingInstruction::DuplicateBlockProof` + pub fn pack(self) -> Vec { + let mut buf = vec![]; + buf.extend_from_slice(&(self.shred1.len() as u32).to_le_bytes()); + buf.extend_from_slice(self.shred1); + buf.extend_from_slice(&(self.shred2.len() as u32).to_le_bytes()); + buf.extend_from_slice(self.shred2); + buf + } + + /// Given the maximum size of a shred as `shred_size` this returns + /// the maximum size of the account needed to store a + /// `DuplicateBlockProofData` + pub const fn size_of(shred_size: usize) -> usize { + 2usize + .wrapping_mul(shred_size) + .saturating_add(2 * Self::LENGTH_SIZE) + } +} + +impl<'a> SlashingProofData<'a> for DuplicateBlockProofData<'a> { + const PROOF_TYPE: ProofType = ProofType::DuplicateBlockProof; + + fn verify_proof(self, slot: Slot, _node_pubkey: &Pubkey) -> Result<(), SlashingError> { + // TODO: verify through instruction inspection that the shreds were sigverified + // earlier in this transaction. + // Ed25519 Singature verification is performed on the merkle root: + // node_pubkey.verify_strict(merkle_root, signature). + // We will verify that the pubkey merkle root and signature match the shred and + // that the verification was successful. + let shred1 = Shred::new_from_payload(self.shred1)?; + let shred2 = Shred::new_from_payload(self.shred2)?; + check_shreds(slot, &shred1, &shred2) + } + + fn unpack(data: &'a [u8]) -> Result + where + Self: Sized, + { + if data.len() < Self::LENGTH_SIZE { + return Err(SlashingError::ProofBufferTooSmall); + } + let (length1, data) = data.split_at(Self::LENGTH_SIZE); + let shred1_length = try_from_bytes::(length1) + .map_err(|_| SlashingError::ProofBufferDeserializationError)?; + let shred1_length = u32::from(*shred1_length) as usize; + + if data.len() < shred1_length { + return Err(SlashingError::ProofBufferTooSmall); + } + let (shred1, data) = data.split_at(shred1_length); + + if data.len() < Self::LENGTH_SIZE { + return Err(SlashingError::ProofBufferTooSmall); + } + let (length2, shred2) = data.split_at(Self::LENGTH_SIZE); + let shred2_length = try_from_bytes::(length2) + .map_err(|_| SlashingError::ProofBufferDeserializationError)?; + let shred2_length = u32::from(*shred2_length) as usize; + + if shred2.len() < shred2_length { + return Err(SlashingError::ProofBufferTooSmall); + } + + Ok(Self { shred1, shred2 }) + } +} + +/// Check that `shred1` and `shred2` indicate a valid duplicate proof +/// - Must be for the same slot `slot` +/// - Must be for the same shred version +/// - Must have a merkle root conflict, otherwise `shred1` and `shred2` must +/// have the same `shred_type` +/// - If `shred1` and `shred2` share the same index they must be not have +/// equal payloads excluding the retransmitter signature +/// - If `shred1` and `shred2` do not share the same index and are data +/// shreds verify that they indicate an index conflict. One of them must +/// be the LAST_SHRED_IN_SLOT, however the other shred must have a higher +/// index. +/// - If `shred1` and `shred2` do not share the same index and are coding +/// shreds verify that they have conflicting erasure metas +fn check_shreds(slot: Slot, shred1: &Shred, shred2: &Shred) -> Result<(), SlashingError> { + if shred1.slot()? != slot { + msg!( + "Invalid proof for different slots {} vs {}", + shred1.slot()?, + slot, + ); + return Err(SlashingError::SlotMismatch); + } + + if shred2.slot()? != slot { + msg!( + "Invalid proof for different slots {} vs {}", + shred1.slot()?, + slot, + ); + return Err(SlashingError::SlotMismatch); + } + + if shred1.version()? != shred2.version()? { + msg!( + "Invalid proof for different shred versions {} vs {}", + shred1.version()?, + shred2.version()?, + ); + return Err(SlashingError::InvalidShredVersion); + } + + // Merkle root conflict check + if shred1.fec_set_index()? == shred2.fec_set_index()? + && shred1.merkle_root()? != shred2.merkle_root()? + { + // Legacy shreds are discarded by validators and already filtered out + // above during proof deserialization, so any valid proof should have + // merkle roots. + msg!( + "Valid merkle root conflict for fec set {}, {:?} vs {:?}", + shred1.fec_set_index()?, + shred1.merkle_root()?, + shred2.merkle_root()? + ); + return Ok(()); + } + + // Overlapping fec set check + if shred1.shred_type() == ShredType::Code && shred1.fec_set_index()? < shred2.fec_set_index()? { + let next_fec_set_index = shred1.next_fec_set_index()?; + if next_fec_set_index > shred2.fec_set_index()? { + msg!( + "Valid overlapping fec set conflict. fec set {}'s next set is {} \ + however we observed a shred with fec set index {}", + shred1.fec_set_index()?, + next_fec_set_index, + shred2.fec_set_index()? + ); + return Ok(()); + } + } + + if shred2.shred_type() == ShredType::Code && shred1.fec_set_index()? > shred2.fec_set_index()? { + let next_fec_set_index = shred2.next_fec_set_index()?; + if next_fec_set_index > shred1.fec_set_index()? { + msg!( + "Valid overlapping fec set conflict. fec set {}'s next set is {} \ + however we observed a shred with fec set index {}", + shred2.fec_set_index()?, + next_fec_set_index, + shred1.fec_set_index()? + ); + return Ok(()); + } + } + + if shred1.shred_type() != shred2.shred_type() { + msg!( + "Invalid proof for different shred types {:?} vs {:?}", + shred1.shred_type(), + shred2.shred_type() + ); + return Err(SlashingError::ShredTypeMismatch); + } + + if shred1.index()? == shred2.index()? { + if shred1.is_shred_duplicate(shred2) { + msg!("Valid payload mismatch for shred index {}", shred1.index()?); + return Ok(()); + } + msg!( + "Invalid proof, payload matches for index {}", + shred1.index()? + ); + return Err(SlashingError::InvalidPayloadProof); + } + + if shred1.shred_type() == ShredType::Data { + if shred1.last_in_slot()? && shred2.index()? > shred1.index()? { + msg!( + "Valid last in slot conflict last index {} but shred with index {} is present", + shred1.index()?, + shred2.index()? + ); + return Ok(()); + } + if shred2.last_in_slot()? && shred1.index()? > shred2.index()? { + msg!( + "Valid last in slot conflict last index {} but shred with index {} is present", + shred2.index()?, + shred1.index()? + ); + return Ok(()); + } + msg!( + "Invalid proof, no last in shred conflict for data shreds {} and {}", + shred1.index()?, + shred2.index()? + ); + return Err(SlashingError::InvalidLastIndexConflict); + } + + if shred1.fec_set_index() == shred2.fec_set_index() + && !shred1.check_erasure_consistency(shred2)? + { + msg!( + "Valid erasure meta conflict in fec set {}, config {:?} vs {:?}", + shred1.fec_set_index()?, + shred1.erasure_meta()?, + shred2.erasure_meta()?, + ); + return Ok(()); + } + msg!( + "Invalid proof, no erasure meta conflict for coding shreds set {} idx {} and set {} idx {}", + shred1.fec_set_index()?, + shred1.index()?, + shred2.fec_set_index()?, + shred2.index()?, + ); + Err(SlashingError::InvalidErasureMetaConflict) +} + +#[cfg(test)] +mod tests { + use { + super::*, + crate::shred::{ + tests::{new_rand_coding_shreds, new_rand_data_shred, new_rand_shreds}, + SIZE_OF_SIGNATURE, + }, + rand::Rng, + solana_ledger::shred::{Shred as SolanaShred, Shredder}, + solana_sdk::signature::{Keypair, Signature, Signer}, + std::sync::Arc, + }; + + const SLOT: Slot = 53084024; + const PARENT_SLOT: Slot = SLOT - 1; + const REFERENCE_TICK: u8 = 0; + const VERSION: u16 = 0; + + fn generate_proof_data<'a>( + shred1: &'a SolanaShred, + shred2: &'a SolanaShred, + ) -> DuplicateBlockProofData<'a> { + DuplicateBlockProofData { + shred1: shred1.payload().as_slice(), + shred2: shred2.payload().as_slice(), + } + } + + #[test] + fn test_legacy_shreds_invalid() { + let mut rng = rand::thread_rng(); + let leader = Arc::new(Keypair::new()); + let shredder = Shredder::new(SLOT, PARENT_SLOT, REFERENCE_TICK, VERSION).unwrap(); + let next_shred_index = rng.gen_range(0..32_000); + let legacy_data_shred = + new_rand_data_shred(&mut rng, next_shred_index, &shredder, &leader, false, false); + let legacy_coding_shred = + new_rand_coding_shreds(&mut rng, next_shred_index, 5, &shredder, &leader, false)[0] + .clone(); + let data_shred = + new_rand_data_shred(&mut rng, next_shred_index, &shredder, &leader, true, false); + let coding_shred = + new_rand_coding_shreds(&mut rng, next_shred_index, 5, &shredder, &leader, true)[0] + .clone(); + + let test_cases = [ + (legacy_data_shred.clone(), legacy_data_shred.clone()), + (legacy_coding_shred.clone(), legacy_coding_shred.clone()), + (legacy_data_shred.clone(), legacy_coding_shred.clone()), + // Mix of legacy and merkle + (legacy_data_shred.clone(), data_shred.clone()), + (legacy_coding_shred.clone(), coding_shred.clone()), + (legacy_data_shred.clone(), coding_shred.clone()), + (data_shred.clone(), legacy_coding_shred.clone()), + ]; + for (shred1, shred2) in test_cases.iter().flat_map(|(a, b)| [(a, b), (b, a)]) { + let proof_data = generate_proof_data(shred1, shred2); + assert_eq!( + proof_data.verify_proof(SLOT, &leader.pubkey()).unwrap_err(), + SlashingError::LegacyShreds, + ); + } + } + + #[test] + fn test_slot_invalid() { + let mut rng = rand::thread_rng(); + let leader = Arc::new(Keypair::new()); + let shredder_slot = Shredder::new(SLOT, PARENT_SLOT, REFERENCE_TICK, VERSION).unwrap(); + let shredder_bad_slot = + Shredder::new(SLOT + 1, PARENT_SLOT, REFERENCE_TICK, VERSION).unwrap(); + let next_shred_index = rng.gen_range(0..32_000); + let data_shred = new_rand_data_shred( + &mut rng, + next_shred_index, + &shredder_slot, + &leader, + true, + false, + ); + let data_shred_bad_slot = new_rand_data_shred( + &mut rng, + next_shred_index, + &shredder_bad_slot, + &leader, + true, + false, + ); + let coding_shred = + new_rand_coding_shreds(&mut rng, next_shred_index, 5, &shredder_slot, &leader, true)[0] + .clone(); + + let coding_shred_bad_slot = new_rand_coding_shreds( + &mut rng, + next_shred_index, + 5, + &shredder_bad_slot, + &leader, + true, + )[0] + .clone(); + + let test_cases = vec![ + (data_shred_bad_slot.clone(), data_shred_bad_slot.clone()), + (coding_shred_bad_slot.clone(), coding_shred_bad_slot.clone()), + (data_shred_bad_slot.clone(), coding_shred_bad_slot.clone()), + (data_shred.clone(), data_shred_bad_slot.clone()), + (coding_shred.clone(), coding_shred_bad_slot.clone()), + (data_shred.clone(), coding_shred_bad_slot.clone()), + (data_shred_bad_slot.clone(), coding_shred.clone()), + ]; + + for (shred1, shred2) in test_cases.iter().flat_map(|(a, b)| [(a, b), (b, a)]) { + let proof_data = generate_proof_data(shred1, shred2); + assert_eq!( + proof_data.verify_proof(SLOT, &leader.pubkey()).unwrap_err(), + SlashingError::SlotMismatch + ); + } + } + + #[test] + fn test_payload_proof_valid() { + let mut rng = rand::thread_rng(); + let leader = Arc::new(Keypair::new()); + let shredder = Shredder::new(SLOT, PARENT_SLOT, REFERENCE_TICK, VERSION).unwrap(); + let next_shred_index = rng.gen_range(0..32_000); + let shred1 = + new_rand_data_shred(&mut rng, next_shred_index, &shredder, &leader, true, true); + let shred2 = + new_rand_data_shred(&mut rng, next_shred_index, &shredder, &leader, true, true); + let proof_data = generate_proof_data(&shred1, &shred2); + proof_data.verify_proof(SLOT, &leader.pubkey()).unwrap(); + } + + #[test] + fn test_payload_proof_invalid() { + let mut rng = rand::thread_rng(); + let leader = Arc::new(Keypair::new()); + let shredder = Shredder::new(SLOT, PARENT_SLOT, REFERENCE_TICK, VERSION).unwrap(); + let next_shred_index = rng.gen_range(0..32_000); + let data_shred = + new_rand_data_shred(&mut rng, next_shred_index, &shredder, &leader, true, true); + let coding_shreds = + new_rand_coding_shreds(&mut rng, next_shred_index, 10, &shredder, &leader, true); + let test_cases = vec![ + // Same data_shred + (data_shred.clone(), data_shred), + // Same coding_shred + (coding_shreds[0].clone(), coding_shreds[0].clone()), + ]; + + for (shred1, shred2) in test_cases.into_iter() { + let proof_data = generate_proof_data(&shred1, &shred2); + assert_eq!( + proof_data.verify_proof(SLOT, &leader.pubkey()).unwrap_err(), + SlashingError::InvalidPayloadProof + ); + } + } + + #[test] + fn test_merkle_root_proof_valid() { + let mut rng = rand::thread_rng(); + let leader = Arc::new(Keypair::new()); + let shredder = Shredder::new(SLOT, PARENT_SLOT, REFERENCE_TICK, VERSION).unwrap(); + let next_shred_index = rng.gen_range(0..32_000); + let (data_shreds, coding_shreds) = new_rand_shreds( + &mut rng, + next_shred_index, + next_shred_index, + 10, + true, /* merkle_variant */ + &shredder, + &leader, + false, + ); + + let (diff_data_shreds, diff_coding_shreds) = new_rand_shreds( + &mut rng, + next_shred_index, + next_shred_index, + 10, + true, /* merkle_variant */ + &shredder, + &leader, + false, + ); + + let test_cases = vec![ + (data_shreds[0].clone(), diff_data_shreds[1].clone()), + (coding_shreds[0].clone(), diff_coding_shreds[1].clone()), + (data_shreds[0].clone(), diff_coding_shreds[0].clone()), + (coding_shreds[0].clone(), diff_data_shreds[0].clone()), + ]; + + for (shred1, shred2) in test_cases.iter().flat_map(|(a, b)| [(a, b), (b, a)]) { + let proof_data = generate_proof_data(shred1, shred2); + proof_data.verify_proof(SLOT, &leader.pubkey()).unwrap(); + } + } + + #[test] + fn test_merkle_root_proof_invalid() { + let mut rng = rand::thread_rng(); + let leader = Arc::new(Keypair::new()); + let shredder = Shredder::new(SLOT, PARENT_SLOT, REFERENCE_TICK, VERSION).unwrap(); + let next_shred_index = rng.gen_range(0..32_000); + let (data_shreds, coding_shreds) = new_rand_shreds( + &mut rng, + next_shred_index, + next_shred_index, + 10, + true, + &shredder, + &leader, + true, + ); + + let (next_data_shreds, next_coding_shreds) = new_rand_shreds( + &mut rng, + next_shred_index + 33, + next_shred_index + 33, + 10, + true, + &shredder, + &leader, + true, + ); + + let test_cases = vec![ + // Same fec set same merkle root + (coding_shreds[0].clone(), data_shreds[0].clone()), + // Different FEC set different merkle root + (coding_shreds[0].clone(), next_data_shreds[0].clone()), + (next_coding_shreds[0].clone(), data_shreds[0].clone()), + ]; + + for (shred1, shred2) in test_cases.iter().flat_map(|(a, b)| [(a, b), (b, a)]) { + let proof_data = generate_proof_data(shred1, shred2); + assert_eq!( + proof_data.verify_proof(SLOT, &leader.pubkey()).unwrap_err(), + SlashingError::ShredTypeMismatch + ); + } + } + + #[test] + fn test_last_index_conflict_valid() { + let mut rng = rand::thread_rng(); + let leader = Arc::new(Keypair::new()); + let shredder = Shredder::new(SLOT, PARENT_SLOT, REFERENCE_TICK, VERSION).unwrap(); + let next_shred_index = rng.gen_range(0..32_000); + let test_cases = vec![ + ( + new_rand_data_shred(&mut rng, next_shred_index, &shredder, &leader, true, true), + new_rand_data_shred( + &mut rng, + // With Merkle shreds, last erasure batch is padded with + // empty data shreds. + next_shred_index + 30, + &shredder, + &leader, + true, + false, + ), + ), + ( + new_rand_data_shred( + &mut rng, + next_shred_index + 100, + &shredder, + &leader, + true, + true, + ), + new_rand_data_shred(&mut rng, next_shred_index, &shredder, &leader, true, true), + ), + ]; + + for (shred1, shred2) in test_cases.iter().flat_map(|(a, b)| [(a, b), (b, a)]) { + let proof_data = generate_proof_data(shred1, shred2); + proof_data.verify_proof(SLOT, &leader.pubkey()).unwrap(); + } + } + + #[test] + fn test_last_index_conflict_invalid() { + let mut rng = rand::thread_rng(); + let leader = Arc::new(Keypair::new()); + let shredder = Shredder::new(SLOT, PARENT_SLOT, REFERENCE_TICK, VERSION).unwrap(); + let next_shred_index = rng.gen_range(0..32_000); + let test_cases = vec![ + ( + new_rand_data_shred(&mut rng, next_shred_index, &shredder, &leader, true, false), + new_rand_data_shred( + &mut rng, + next_shred_index + 1, + &shredder, + &leader, + true, + true, + ), + ), + ( + new_rand_data_shred( + &mut rng, + next_shred_index + 1, + &shredder, + &leader, + true, + true, + ), + new_rand_data_shred(&mut rng, next_shred_index, &shredder, &leader, true, false), + ), + ( + new_rand_data_shred( + &mut rng, + next_shred_index + 100, + &shredder, + &leader, + true, + false, + ), + new_rand_data_shred(&mut rng, next_shred_index, &shredder, &leader, true, false), + ), + ( + new_rand_data_shred(&mut rng, next_shred_index, &shredder, &leader, true, false), + new_rand_data_shred( + &mut rng, + next_shred_index + 100, + &shredder, + &leader, + true, + false, + ), + ), + ]; + + for (shred1, shred2) in test_cases.iter().flat_map(|(a, b)| [(a, b), (b, a)]) { + let proof_data = generate_proof_data(shred1, shred2); + assert_eq!( + proof_data.verify_proof(SLOT, &leader.pubkey()).unwrap_err(), + SlashingError::InvalidLastIndexConflict + ); + } + } + + #[test] + fn test_erasure_meta_conflict_valid() { + let mut rng = rand::thread_rng(); + let leader = Arc::new(Keypair::new()); + let shredder = Shredder::new(SLOT, PARENT_SLOT, REFERENCE_TICK, VERSION).unwrap(); + let next_shred_index = rng.gen_range(0..32_000); + let coding_shreds = + new_rand_coding_shreds(&mut rng, next_shred_index, 10, &shredder, &leader, true); + let coding_shreds_bigger = + new_rand_coding_shreds(&mut rng, next_shred_index, 13, &shredder, &leader, true); + let coding_shreds_smaller = + new_rand_coding_shreds(&mut rng, next_shred_index, 7, &shredder, &leader, true); + + // Same fec-set, different index, different erasure meta + let test_cases = vec![ + (coding_shreds[0].clone(), coding_shreds_bigger[1].clone()), + (coding_shreds[0].clone(), coding_shreds_smaller[1].clone()), + ]; + for (shred1, shred2) in test_cases.iter().flat_map(|(a, b)| [(a, b), (b, a)]) { + let proof_data = generate_proof_data(shred1, shred2); + proof_data.verify_proof(SLOT, &leader.pubkey()).unwrap(); + } + } + + #[test] + fn test_erasure_meta_conflict_invalid() { + let mut rng = rand::thread_rng(); + let leader = Arc::new(Keypair::new()); + let shredder = Shredder::new(SLOT, PARENT_SLOT, REFERENCE_TICK, VERSION).unwrap(); + let next_shred_index = rng.gen_range(0..32_000); + let coding_shreds = + new_rand_coding_shreds(&mut rng, next_shred_index, 10, &shredder, &leader, true); + let coding_shreds_different_fec = new_rand_coding_shreds( + &mut rng, + next_shred_index + 100, + 10, + &shredder, + &leader, + true, + ); + let coding_shreds_different_fec_and_size = new_rand_coding_shreds( + &mut rng, + next_shred_index + 100, + 13, + &shredder, + &leader, + true, + ); + + let test_cases = vec![ + // Different index, different fec set, same erasure meta + ( + coding_shreds[0].clone(), + coding_shreds_different_fec[1].clone(), + ), + // Different index, different fec set, different erasure meta + ( + coding_shreds[0].clone(), + coding_shreds_different_fec_and_size[1].clone(), + ), + // Different index, same fec set, same erasure meta + (coding_shreds[0].clone(), coding_shreds[1].clone()), + ( + coding_shreds_different_fec[0].clone(), + coding_shreds_different_fec[1].clone(), + ), + ( + coding_shreds_different_fec_and_size[0].clone(), + coding_shreds_different_fec_and_size[1].clone(), + ), + ]; + + for (shred1, shred2) in test_cases.iter().flat_map(|(a, b)| [(a, b), (b, a)]) { + let proof_data = generate_proof_data(shred1, shred2); + assert_eq!( + proof_data.verify_proof(SLOT, &leader.pubkey()).unwrap_err(), + SlashingError::InvalidErasureMetaConflict + ); + } + } + + #[test] + fn test_shred_version_invalid() { + let mut rng = rand::thread_rng(); + let leader = Arc::new(Keypair::new()); + let shredder = Shredder::new(SLOT, PARENT_SLOT, REFERENCE_TICK, VERSION).unwrap(); + let next_shred_index = rng.gen_range(0..32_000); + let (data_shreds, coding_shreds) = new_rand_shreds( + &mut rng, + next_shred_index, + next_shred_index, + 10, + true, + &shredder, + &leader, + true, + ); + + // Wrong shred VERSION + let shredder = Shredder::new(SLOT, PARENT_SLOT, REFERENCE_TICK, VERSION + 1).unwrap(); + let (wrong_data_shreds, wrong_coding_shreds) = new_rand_shreds( + &mut rng, + next_shred_index, + next_shred_index, + 10, + true, + &shredder, + &leader, + true, + ); + let test_cases = vec![ + // One correct shred VERSION, one wrong + (coding_shreds[0].clone(), wrong_coding_shreds[0].clone()), + (coding_shreds[0].clone(), wrong_data_shreds[0].clone()), + (data_shreds[0].clone(), wrong_coding_shreds[0].clone()), + (data_shreds[0].clone(), wrong_data_shreds[0].clone()), + ]; + + for (shred1, shred2) in test_cases.iter().flat_map(|(a, b)| [(a, b), (b, a)]) { + let proof_data = generate_proof_data(shred1, shred2); + assert_eq!( + proof_data.verify_proof(SLOT, &leader.pubkey()).unwrap_err(), + SlashingError::InvalidShredVersion + ); + } + } + + #[test] + fn test_retransmitter_signature_payload_proof_invalid() { + // TODO: change visbility of shred::layout::set_retransmitter_signature. + // Hardcode offsets for now; + const DATA_SHRED_OFFSET: usize = 1139; + const CODING_SHRED_OFFSET: usize = 1164; + + let mut rng = rand::thread_rng(); + let leader = Arc::new(Keypair::new()); + let shredder = Shredder::new(SLOT, PARENT_SLOT, REFERENCE_TICK, VERSION).unwrap(); + let next_shred_index = rng.gen_range(0..32_000); + let data_shred = + new_rand_data_shred(&mut rng, next_shred_index, &shredder, &leader, true, true); + let coding_shred = + new_rand_coding_shreds(&mut rng, next_shred_index, 10, &shredder, &leader, true)[0] + .clone(); + + let mut data_shred_different_retransmitter_payload = data_shred.clone().into_payload(); + let buffer = data_shred_different_retransmitter_payload + .get_mut(DATA_SHRED_OFFSET..DATA_SHRED_OFFSET + SIZE_OF_SIGNATURE) + .unwrap(); + buffer.copy_from_slice(Signature::new_unique().as_ref()); + let data_shred_different_retransmitter = + SolanaShred::new_from_serialized_shred(data_shred_different_retransmitter_payload) + .unwrap(); + + let mut coding_shred_different_retransmitter_payload = coding_shred.clone().into_payload(); + let buffer = coding_shred_different_retransmitter_payload + .get_mut(CODING_SHRED_OFFSET..CODING_SHRED_OFFSET + SIZE_OF_SIGNATURE) + .unwrap(); + buffer.copy_from_slice(Signature::new_unique().as_ref()); + let coding_shred_different_retransmitter = + SolanaShred::new_from_serialized_shred(coding_shred_different_retransmitter_payload) + .unwrap(); + + let test_cases = vec![ + // Same data shred from different retransmitter + (data_shred, data_shred_different_retransmitter), + // Same coding shred from different retransmitter + (coding_shred, coding_shred_different_retransmitter), + ]; + for (shred1, shred2) in test_cases.iter().flat_map(|(a, b)| [(a, b), (b, a)]) { + let proof_data = generate_proof_data(shred1, shred2); + assert_eq!( + proof_data.verify_proof(SLOT, &leader.pubkey()).unwrap_err(), + SlashingError::InvalidPayloadProof + ); + } + } + + #[test] + fn test_overlapping_erasure_meta_proof_valid() { + let mut rng = rand::thread_rng(); + let leader = Arc::new(Keypair::new()); + let shredder = Shredder::new(SLOT, PARENT_SLOT, REFERENCE_TICK, VERSION).unwrap(); + let next_shred_index = rng.gen_range(0..32_000); + let coding_shreds = + new_rand_coding_shreds(&mut rng, next_shred_index, 10, &shredder, &leader, true); + let (data_shred_next, coding_shred_next) = new_rand_shreds( + &mut rng, + next_shred_index + 1, + next_shred_index + 33, + 10, + true, + &shredder, + &leader, + true, + ); + + // Fec set is overlapping + let test_cases = vec![ + (coding_shreds[0].clone(), coding_shred_next[0].clone()), + (coding_shreds[0].clone(), data_shred_next[0].clone()), + ( + coding_shreds[2].clone(), + coding_shred_next.last().unwrap().clone(), + ), + ( + coding_shreds[2].clone(), + data_shred_next.last().unwrap().clone(), + ), + ]; + for (shred1, shred2) in test_cases.iter().flat_map(|(a, b)| [(a, b), (b, a)]) { + let proof_data = generate_proof_data(shred1, shred2); + proof_data.verify_proof(SLOT, &leader.pubkey()).unwrap(); + } + } + + #[test] + fn test_overlapping_erasure_meta_proof_invalid() { + let mut rng = rand::thread_rng(); + let leader = Arc::new(Keypair::new()); + let shredder = Shredder::new(SLOT, PARENT_SLOT, REFERENCE_TICK, VERSION).unwrap(); + let next_shred_index = rng.gen_range(0..32_000); + let (data_shred, coding_shred) = new_rand_shreds( + &mut rng, + next_shred_index, + next_shred_index, + 10, + true, + &shredder, + &leader, + true, + ); + let next_shred_index = next_shred_index + data_shred.len() as u32; + let next_code_index = next_shred_index + coding_shred.len() as u32; + let (data_shred_next, coding_shred_next) = new_rand_shreds( + &mut rng, + next_shred_index, + next_code_index, + 10, + true, + &shredder, + &leader, + true, + ); + let test_cases = vec![ + ( + coding_shred[0].clone(), + data_shred_next[0].clone(), + SlashingError::ShredTypeMismatch, + ), + ( + coding_shred[0].clone(), + coding_shred_next[0].clone(), + SlashingError::InvalidErasureMetaConflict, + ), + ( + coding_shred[0].clone(), + data_shred_next.last().unwrap().clone(), + SlashingError::ShredTypeMismatch, + ), + ( + coding_shred[0].clone(), + coding_shred_next.last().unwrap().clone(), + SlashingError::InvalidErasureMetaConflict, + ), + ]; + + for (shred1, shred2, expected) in test_cases + .iter() + .flat_map(|(a, b, c)| [(a, b, c), (b, a, c)]) + { + let proof_data = generate_proof_data(shred1, shred2); + assert_eq!( + proof_data.verify_proof(SLOT, &leader.pubkey()).unwrap_err(), + *expected, + ); + } + } +} diff --git a/slashing/program/src/entrypoint.rs b/slashing/program/src/entrypoint.rs new file mode 100644 index 00000000000..62a02f2a465 --- /dev/null +++ b/slashing/program/src/entrypoint.rs @@ -0,0 +1,14 @@ +//! Program entrypoint + +#![cfg(all(target_os = "solana", not(feature = "no-entrypoint")))] + +use solana_program::{account_info::AccountInfo, entrypoint::ProgramResult, pubkey::Pubkey}; + +solana_program::entrypoint!(process_instruction); +fn process_instruction( + program_id: &Pubkey, + accounts: &[AccountInfo], + instruction_data: &[u8], +) -> ProgramResult { + crate::processor::process_instruction(program_id, accounts, instruction_data) +} diff --git a/slashing/program/src/error.rs b/slashing/program/src/error.rs new file mode 100644 index 00000000000..0c6d2b4ec3b --- /dev/null +++ b/slashing/program/src/error.rs @@ -0,0 +1,83 @@ +//! Error types + +use { + num_derive::FromPrimitive, + solana_program::{decode_error::DecodeError, program_error::ProgramError}, + thiserror::Error, +}; + +/// Errors that may be returned by the program. +#[derive(Clone, Copy, Debug, Eq, Error, FromPrimitive, PartialEq)] +pub enum SlashingError { + /// Violation is too old for statue of limitations + #[error("Exceeds statue of limitations")] + ExceedsStatueOfLimitations, + + /// Invalid shred variant + #[error("Invalid shred variant")] + InvalidShredVariant, + + /// Invalid merkle shred + #[error("Invalid Merkle shred")] + InvalidMerkleShred, + + /// Invalid duplicate block payload proof + #[error("Invalid payload proof")] + InvalidPayloadProof, + + /// Invalid duplicate block erasure meta proof + #[error("Invalid erasure meta conflict")] + InvalidErasureMetaConflict, + + /// Invalid instruction + #[error("Invalid instruction")] + InvalidInstruction, + + /// Invalid duplicate block last index proof + #[error("Invalid last index conflict")] + InvalidLastIndexConflict, + + /// Invalid shred version on duplicate block proof shreds + #[error("Invalid shred version")] + InvalidShredVersion, + + /// Invalid signature on duplicate block proof shreds + #[error("Invalid signature")] + InvalidSignature, + + /// Legacy shreds are not supported + #[error("Legacy shreds are not eligible for slashing")] + LegacyShreds, + + /// Unable to deserialize proof buffer + #[error("Proof buffer deserialization error")] + ProofBufferDeserializationError, + + /// Proof buffer is too small + #[error("Proof buffer too small")] + ProofBufferTooSmall, + + /// Shred deserialization error + #[error("Deserialization error")] + ShredDeserializationError, + + /// Invalid shred type on duplicate block proof shreds + #[error("Shred type mismatch")] + ShredTypeMismatch, + + /// Invalid slot on duplicate block proof shreds + #[error("Slot mismatch")] + SlotMismatch, +} + +impl From for ProgramError { + fn from(e: SlashingError) -> Self { + ProgramError::Custom(e as u32) + } +} + +impl DecodeError for SlashingError { + fn type_of() -> &'static str { + "Slashing Error" + } +} diff --git a/slashing/program/src/instruction.rs b/slashing/program/src/instruction.rs new file mode 100644 index 00000000000..d31eacf9a8e --- /dev/null +++ b/slashing/program/src/instruction.rs @@ -0,0 +1,144 @@ +//! Program instructions + +use { + crate::{error::SlashingError, id}, + bytemuck::{Pod, Zeroable}, + num_enum::{IntoPrimitive, TryFromPrimitive}, + solana_program::{ + clock::Slot, + instruction::{AccountMeta, Instruction}, + program_error::ProgramError, + pubkey::Pubkey, + }, + spl_pod::{ + bytemuck::{pod_from_bytes, pod_get_packed_len}, + primitives::PodU64, + }, +}; + +/// Instructions supported by the program +#[repr(u8)] +#[derive(Clone, Copy, Debug, PartialEq, TryFromPrimitive, IntoPrimitive)] +pub enum SlashingInstruction { + /// Submit a slashable violation proof for `node_pubkey`, which indicates + /// that they submitted a duplicate block to the network + /// + /// + /// Accounts expected by this instruction: + /// 0. `[]` Proof account, must be previously initialized with the proof + /// data. + /// + /// We expect the proof account to be properly sized as to hold a duplicate + /// block proof. See [ProofType] for sizing requirements. + /// + /// Deserializing the proof account from `offset` should result in a + /// [DuplicateBlockProofData] + /// + /// Data expected by this instruction: + /// DuplicateBlockProofInstructionData + DuplicateBlockProof, +} + +/// Data expected by +/// `SlashingInstruction::DuplicateBlockProof` +#[repr(C)] +#[derive(Clone, Copy, Debug, PartialEq, Pod, Zeroable)] +pub struct DuplicateBlockProofInstructionData { + /// Offset into the proof account to begin reading, expressed as `u64` + pub(crate) offset: PodU64, + /// Slot for which the violation occured + pub(crate) slot: PodU64, + /// Identity pubkey of the Node that signed the duplicate block + pub(crate) node_pubkey: Pubkey, +} + +/// Utility function for encoding instruction data +pub(crate) fn encode_instruction( + accounts: Vec, + instruction: SlashingInstruction, + instruction_data: &D, +) -> Instruction { + let mut data = vec![u8::from(instruction)]; + data.extend_from_slice(bytemuck::bytes_of(instruction_data)); + Instruction { + program_id: id(), + accounts, + data, + } +} + +/// Utility function for decoding just the instruction type +pub(crate) fn decode_instruction_type(input: &[u8]) -> Result { + if input.is_empty() { + Err(ProgramError::InvalidInstructionData) + } else { + SlashingInstruction::try_from(input[0]) + .map_err(|_| SlashingError::InvalidInstruction.into()) + } +} + +/// Utility function for decoding instruction data +pub(crate) fn decode_instruction_data(input_with_type: &[u8]) -> Result<&T, ProgramError> { + if input_with_type.len() != pod_get_packed_len::().saturating_add(1) { + Err(ProgramError::InvalidInstructionData) + } else { + pod_from_bytes(&input_with_type[1..]) + } +} + +/// Create a `SlashingInstruction::DuplicateBlockProof` instruction +pub fn duplicate_block_proof( + proof_account: &Pubkey, + offset: u64, + slot: Slot, + node_pubkey: Pubkey, +) -> Instruction { + encode_instruction( + vec![AccountMeta::new_readonly(*proof_account, false)], + SlashingInstruction::DuplicateBlockProof, + &DuplicateBlockProofInstructionData { + offset: PodU64::from(offset), + slot: PodU64::from(slot), + node_pubkey, + }, + ) +} + +#[cfg(test)] +mod tests { + use {super::*, solana_program::program_error::ProgramError}; + + const TEST_BYTES: [u8; 8] = [42; 8]; + + #[test] + fn serialize_duplicate_block_proof() { + let offset = 34; + let slot = 42; + let node_pubkey = Pubkey::new_unique(); + let instruction = duplicate_block_proof(&Pubkey::new_unique(), offset, slot, node_pubkey); + let mut expected = vec![0]; + expected.extend_from_slice(&offset.to_le_bytes()); + expected.extend_from_slice(&slot.to_le_bytes()); + expected.extend_from_slice(&node_pubkey.to_bytes()); + assert_eq!(instruction.data, expected); + + assert_eq!( + SlashingInstruction::DuplicateBlockProof, + decode_instruction_type(&instruction.data).unwrap() + ); + let instruction_data: &DuplicateBlockProofInstructionData = + decode_instruction_data(&instruction.data).unwrap(); + + assert_eq!(instruction_data.offset, offset.into()); + assert_eq!(instruction_data.slot, slot.into()); + assert_eq!(instruction_data.node_pubkey, node_pubkey); + } + + #[test] + fn deserialize_invalid_instruction() { + let mut expected = vec![12]; + expected.extend_from_slice(&TEST_BYTES); + let err: ProgramError = decode_instruction_type(&expected).unwrap_err(); + assert_eq!(err, SlashingError::InvalidInstruction.into()); + } +} diff --git a/slashing/program/src/lib.rs b/slashing/program/src/lib.rs new file mode 100644 index 00000000000..24b7343ab09 --- /dev/null +++ b/slashing/program/src/lib.rs @@ -0,0 +1,16 @@ +//! Slashing program +#![deny(missing_docs)] + +pub mod duplicate_block_proof; +mod entrypoint; +pub mod error; +pub mod instruction; +pub mod processor; +mod shred; +pub mod state; + +// Export current SDK types for downstream users building with a different SDK +// version +pub use solana_program; + +solana_program::declare_id!("S1ashing11111111111111111111111111111111111"); diff --git a/slashing/program/src/processor.rs b/slashing/program/src/processor.rs new file mode 100644 index 00000000000..07efa42ccd9 --- /dev/null +++ b/slashing/program/src/processor.rs @@ -0,0 +1,166 @@ +//! Program state processor + +use { + crate::{ + duplicate_block_proof::DuplicateBlockProofData, + error::SlashingError, + instruction::{ + decode_instruction_data, decode_instruction_type, DuplicateBlockProofInstructionData, + SlashingInstruction, + }, + state::SlashingProofData, + }, + solana_program::{ + account_info::{next_account_info, AccountInfo}, + clock::Slot, + entrypoint::ProgramResult, + msg, + program_error::ProgramError, + pubkey::Pubkey, + sysvar::{clock::Clock, epoch_schedule::EpochSchedule, Sysvar}, + }, +}; + +fn verify_proof_data<'a, T>(slot: Slot, pubkey: &Pubkey, proof_data: &'a [u8]) -> ProgramResult +where + T: SlashingProofData<'a>, +{ + // Statue of limitations is 1 epoch + let clock = Clock::get()?; + let Some(elapsed) = clock.slot.checked_sub(slot) else { + return Err(ProgramError::ArithmeticOverflow); + }; + let epoch_schedule = EpochSchedule::get()?; + if elapsed > epoch_schedule.slots_per_epoch { + return Err(SlashingError::ExceedsStatueOfLimitations.into()); + } + + let proof_data: T = + T::unpack(proof_data).map_err(|_| SlashingError::ShredDeserializationError)?; + + SlashingProofData::verify_proof(proof_data, slot, pubkey)?; + + // TODO: follow up PR will record this violation in context state account. just + // log for now. + msg!( + "{} violation verified in slot {}. This incident will be recorded", + T::PROOF_TYPE.violation_str(), + slot + ); + Ok(()) +} + +/// Instruction processor +pub fn process_instruction( + _program_id: &Pubkey, + accounts: &[AccountInfo], + input: &[u8], +) -> ProgramResult { + let instruction_type = decode_instruction_type(input)?; + let account_info_iter = &mut accounts.iter(); + let proof_data_info = next_account_info(account_info_iter); + + match instruction_type { + SlashingInstruction::DuplicateBlockProof => { + let data = decode_instruction_data::(input)?; + let proof_data = &proof_data_info?.data.borrow()[u64::from(data.offset) as usize..]; + verify_proof_data::( + data.slot.into(), + &data.node_pubkey, + proof_data, + )?; + Ok(()) + } + } +} + +#[cfg(test)] +mod tests { + use { + super::verify_proof_data, + crate::{ + duplicate_block_proof::DuplicateBlockProofData, error::SlashingError, + shred::tests::new_rand_data_shred, + }, + rand::Rng, + solana_ledger::shred::Shredder, + solana_sdk::{ + clock::{Clock, Slot, DEFAULT_SLOTS_PER_EPOCH}, + epoch_schedule::EpochSchedule, + program_error::ProgramError, + signature::Keypair, + signer::Signer, + }, + std::sync::{Arc, RwLock}, + }; + + const SLOT: Slot = 53084024; + lazy_static::lazy_static! { + static ref CLOCK_SLOT: Arc> = Arc::new(RwLock::new(SLOT)); + } + + fn generate_proof_data(leader: Arc) -> Vec { + let mut rng = rand::thread_rng(); + let (slot, parent_slot, reference_tick, version) = (SLOT, SLOT - 1, 0, 0); + let shredder = Shredder::new(slot, parent_slot, reference_tick, version).unwrap(); + let next_shred_index = rng.gen_range(0..32_000); + let shred1 = + new_rand_data_shred(&mut rng, next_shred_index, &shredder, &leader, true, true); + let shred2 = + new_rand_data_shred(&mut rng, next_shred_index, &shredder, &leader, true, true); + let proof = DuplicateBlockProofData { + shred1: shred1.payload().as_slice(), + shred2: shred2.payload().as_slice(), + }; + proof.pack() + } + + #[test] + fn test_statue_of_limitations() { + *CLOCK_SLOT.write().unwrap() = SLOT + 5; + verify_with_clock().unwrap(); + + *CLOCK_SLOT.write().unwrap() = SLOT - 1; + assert_eq!( + verify_with_clock().unwrap_err(), + ProgramError::ArithmeticOverflow + ); + + *CLOCK_SLOT.write().unwrap() = SLOT + DEFAULT_SLOTS_PER_EPOCH + 1; + assert_eq!( + verify_with_clock().unwrap_err(), + SlashingError::ExceedsStatueOfLimitations.into() + ); + } + + fn verify_with_clock() -> Result<(), ProgramError> { + struct SyscallStubs {} + impl solana_sdk::program_stubs::SyscallStubs for SyscallStubs { + fn sol_get_clock_sysvar(&self, var_addr: *mut u8) -> u64 { + unsafe { + let clock = Clock { + slot: *CLOCK_SLOT.read().unwrap(), + ..Clock::default() + }; + *(var_addr as *mut _ as *mut Clock) = clock; + } + solana_program::entrypoint::SUCCESS + } + + fn sol_get_epoch_schedule_sysvar(&self, var_addr: *mut u8) -> u64 { + unsafe { + *(var_addr as *mut _ as *mut EpochSchedule) = EpochSchedule::default(); + } + solana_program::entrypoint::SUCCESS + } + } + + solana_sdk::program_stubs::set_syscall_stubs(Box::new(SyscallStubs {})); + let leader = Arc::new(Keypair::new()); + verify_proof_data::( + SLOT, + &leader.pubkey(), + &generate_proof_data(leader), + ) + } +} diff --git a/slashing/program/src/shred.rs b/slashing/program/src/shred.rs new file mode 100644 index 00000000000..47e4308e086 --- /dev/null +++ b/slashing/program/src/shred.rs @@ -0,0 +1,564 @@ +//! Shred representation +use { + crate::error::SlashingError, + bitflags::bitflags, + bytemuck::Pod, + generic_array::{typenum::U64, GenericArray}, + num_enum::{IntoPrimitive, TryFromPrimitive}, + serde_derive::Deserialize, + solana_program::{ + clock::Slot, + hash::{hashv, Hash}, + }, + spl_pod::primitives::{PodU16, PodU32, PodU64}, +}; + +pub(crate) const SIZE_OF_SIGNATURE: usize = 64; +const SIZE_OF_SHRED_VARIANT: usize = 1; +const SIZE_OF_SLOT: usize = 8; +const SIZE_OF_INDEX: usize = 4; +const SIZE_OF_VERSION: usize = 2; +const SIZE_OF_FEC_SET_INDEX: usize = 4; +const SIZE_OF_PARENT_OFFSET: usize = 2; +const SIZE_OF_NUM_DATA_SHREDS: usize = 2; +const SIZE_OF_NUM_CODING_SHREDS: usize = 2; +const SIZE_OF_POSITION: usize = 2; + +const SIZE_OF_MERKLE_ROOT: usize = 32; +const SIZE_OF_MERKLE_PROOF_ENTRY: usize = 20; + +const OFFSET_OF_SHRED_VARIANT: usize = SIZE_OF_SIGNATURE; +const OFFSET_OF_SLOT: usize = SIZE_OF_SIGNATURE + SIZE_OF_SHRED_VARIANT; +const OFFSET_OF_INDEX: usize = OFFSET_OF_SLOT + SIZE_OF_SLOT; +const OFFSET_OF_VERSION: usize = OFFSET_OF_INDEX + SIZE_OF_INDEX; +const OFFSET_OF_FEC_SET_INDEX: usize = OFFSET_OF_VERSION + SIZE_OF_VERSION; + +const OFFSET_OF_DATA_PARENT_OFFSET: usize = OFFSET_OF_FEC_SET_INDEX + SIZE_OF_FEC_SET_INDEX; +const OFFSET_OF_DATA_SHRED_FLAGS: usize = OFFSET_OF_DATA_PARENT_OFFSET + SIZE_OF_PARENT_OFFSET; + +const OFFSET_OF_CODING_NUM_DATA_SHREDS: usize = OFFSET_OF_FEC_SET_INDEX + SIZE_OF_FEC_SET_INDEX; +const OFFSET_OF_CODING_NUM_CODING_SHREDS: usize = + OFFSET_OF_CODING_NUM_DATA_SHREDS + SIZE_OF_NUM_DATA_SHREDS; +const OFFSET_OF_CODING_POSITION: usize = + OFFSET_OF_CODING_NUM_CODING_SHREDS + SIZE_OF_NUM_CODING_SHREDS; + +type MerkleProofEntry = [u8; 20]; +const MERKLE_HASH_PREFIX_LEAF: &[u8] = b"\x00SOLANA_MERKLE_SHREDS_LEAF"; +const MERKLE_HASH_PREFIX_NODE: &[u8] = b"\x01SOLANA_MERKLE_SHREDS_NODE"; + +#[repr(transparent)] +#[derive(Clone, Copy, Default, Eq, PartialEq, Ord, PartialOrd, Hash, Deserialize)] +pub(crate) struct Signature(GenericArray); + +bitflags! { + #[derive(Clone, Copy, Debug, Default, Eq, PartialEq, Deserialize)] + pub struct ShredFlags:u8 { + const SHRED_TICK_REFERENCE_MASK = 0b0011_1111; + const DATA_COMPLETE_SHRED = 0b0100_0000; + const LAST_SHRED_IN_SLOT = 0b1100_0000; + } +} + +#[repr(u8)] +#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, IntoPrimitive, TryFromPrimitive)] +pub(crate) enum ShredType { + Data = 0b1010_0101, + Code = 0b0101_1010, +} + +#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)] +enum ShredVariant { + LegacyCode, + LegacyData, + MerkleCode { + proof_size: u8, + chained: bool, + resigned: bool, + }, + MerkleData { + proof_size: u8, + chained: bool, + resigned: bool, + }, +} + +#[derive(Clone, Debug, Eq, Hash, PartialEq)] +pub(crate) struct ErasureMeta { + num_data_shreds: usize, + num_coding_shreds: usize, + first_coding_index: u32, +} + +#[derive(Clone, PartialEq, Eq)] +pub(crate) struct Shred<'a> { + shred_type: ShredType, + proof_size: u8, + chained: bool, + resigned: bool, + payload: &'a [u8], +} + +impl<'a> Shred<'a> { + const SIZE_OF_CODING_PAYLOAD: usize = 1228; + const SIZE_OF_DATA_PAYLOAD: usize = + Self::SIZE_OF_CODING_PAYLOAD - Self::SIZE_OF_CODING_HEADERS + SIZE_OF_SIGNATURE; + const SIZE_OF_CODING_HEADERS: usize = 89; + const SIZE_OF_DATA_HEADERS: usize = 88; + + pub(crate) fn new_from_payload(payload: &'a [u8]) -> Result { + match Self::get_shred_variant(payload)? { + ShredVariant::LegacyCode | ShredVariant::LegacyData => Err(SlashingError::LegacyShreds), + ShredVariant::MerkleCode { + proof_size, + chained, + resigned, + } => Ok(Self { + shred_type: ShredType::Code, + proof_size, + chained, + resigned, + payload, + }), + ShredVariant::MerkleData { + proof_size, + chained, + resigned, + } => Ok(Self { + shred_type: ShredType::Data, + proof_size, + chained, + resigned, + payload, + }), + } + } + + fn pod_from_bytes( + &self, + ) -> Result<&T, SlashingError> { + let end_index: usize = OFFSET + .checked_add(SIZE) + .ok_or(SlashingError::ShredDeserializationError)?; + bytemuck::try_from_bytes( + self.payload + .get(OFFSET..end_index) + .ok_or(SlashingError::ShredDeserializationError)?, + ) + .map_err(|_| SlashingError::ShredDeserializationError) + } + + fn get_shred_variant(payload: &'a [u8]) -> Result { + let Some(&shred_variant) = payload.get(OFFSET_OF_SHRED_VARIANT) else { + return Err(SlashingError::ShredDeserializationError); + }; + ShredVariant::try_from(shred_variant).map_err(|_| SlashingError::InvalidShredVariant) + } + + pub(crate) fn slot(&self) -> Result { + self.pod_from_bytes::() + .map(|x| u64::from(*x)) + } + + pub(crate) fn index(&self) -> Result { + self.pod_from_bytes::() + .map(|x| u32::from(*x)) + } + + pub(crate) fn version(&self) -> Result { + self.pod_from_bytes::() + .map(|x| u16::from(*x)) + } + + pub(crate) fn fec_set_index(&self) -> Result { + self.pod_from_bytes::() + .map(|x| u32::from(*x)) + } + + pub(crate) fn shred_type(&self) -> ShredType { + self.shred_type + } + + pub(crate) fn last_in_slot(&self) -> Result { + debug_assert!(self.shred_type == ShredType::Data); + let Some(&flags) = self.payload.get(OFFSET_OF_DATA_SHRED_FLAGS) else { + return Err(SlashingError::ShredDeserializationError); + }; + + let flags: ShredFlags = + bincode::deserialize(&[flags]).map_err(|_| SlashingError::InvalidShredVariant)?; + Ok(flags.contains(ShredFlags::LAST_SHRED_IN_SLOT)) + } + + fn num_data_shreds(&self) -> Result { + debug_assert!(self.shred_type == ShredType::Code); + self.pod_from_bytes::() + .map(|x| u16::from(*x) as usize) + } + + fn num_coding_shreds(&self) -> Result { + debug_assert!(self.shred_type == ShredType::Code); + self.pod_from_bytes::() + .map(|x| u16::from(*x) as usize) + } + + fn position(&self) -> Result { + debug_assert!(self.shred_type == ShredType::Code); + self.pod_from_bytes::() + .map(|x| u16::from(*x) as usize) + } + + pub(crate) fn next_fec_set_index(&self) -> Result { + debug_assert!(self.shred_type == ShredType::Code); + let num_data = u32::try_from(self.num_data_shreds()?) + .map_err(|_| SlashingError::ShredDeserializationError)?; + self.fec_set_index()? + .checked_add(num_data) + .ok_or(SlashingError::ShredDeserializationError) + } + + pub(crate) fn erasure_meta(&self) -> Result { + debug_assert!(self.shred_type == ShredType::Code); + let num_data_shreds = self.num_data_shreds()?; + let num_coding_shreds = self.num_coding_shreds()?; + let first_coding_index = self + .index()? + .checked_sub( + u32::try_from(self.position()?) + .map_err(|_| SlashingError::ShredDeserializationError)?, + ) + .ok_or(SlashingError::ShredDeserializationError)?; + Ok(ErasureMeta { + num_data_shreds, + num_coding_shreds, + first_coding_index, + }) + } + + fn erasure_batch_index(&self) -> Result { + match self.shred_type { + ShredType::Data => self + .index()? + .checked_sub(self.fec_set_index()?) + .and_then(|x| usize::try_from(x).ok()) + .ok_or(SlashingError::ShredDeserializationError), + ShredType::Code => self + .num_data_shreds()? + .checked_add(self.position()?) + .ok_or(SlashingError::ShredDeserializationError), + } + } + + pub(crate) fn merkle_root(&self) -> Result { + let (proof_offset, proof_size) = self.get_proof_offset_and_size()?; + let proof_end = proof_offset + .checked_add(proof_size) + .ok_or(SlashingError::ShredDeserializationError)?; + let index = self.erasure_batch_index()?; + + let proof = self + .payload + .get(proof_offset..proof_end) + .ok_or(SlashingError::InvalidMerkleShred)? + .chunks(SIZE_OF_MERKLE_PROOF_ENTRY) + .map(<&MerkleProofEntry>::try_from) + .map(Result::unwrap); + let node = self + .payload + .get(SIZE_OF_SIGNATURE..proof_offset) + .ok_or(SlashingError::InvalidMerkleShred)?; + let node = hashv(&[MERKLE_HASH_PREFIX_LEAF, node]); + + Self::get_merkle_root(index, node, proof) + } + + fn get_proof_offset_and_size(&self) -> Result<(usize, usize), SlashingError> { + let (header_size, payload_size) = match self.shred_type { + ShredType::Code => (Self::SIZE_OF_CODING_HEADERS, Self::SIZE_OF_CODING_PAYLOAD), + ShredType::Data => (Self::SIZE_OF_DATA_HEADERS, Self::SIZE_OF_DATA_PAYLOAD), + }; + let proof_size = usize::from(self.proof_size) + .checked_mul(SIZE_OF_MERKLE_PROOF_ENTRY) + .ok_or(SlashingError::ShredDeserializationError)?; + let bytes_past_end = header_size + .checked_add(if self.chained { SIZE_OF_MERKLE_ROOT } else { 0 }) + .and_then(|x| x.checked_add(proof_size)) + .and_then(|x| x.checked_add(if self.resigned { SIZE_OF_SIGNATURE } else { 0 })) + .ok_or(SlashingError::ShredDeserializationError)?; + + let capacity = payload_size + .checked_sub(bytes_past_end) + .ok_or(SlashingError::ShredDeserializationError)?; + let proof_offset = header_size + .checked_add(capacity) + .and_then(|x| x.checked_add(if self.chained { SIZE_OF_MERKLE_ROOT } else { 0 })) + .ok_or(SlashingError::ShredDeserializationError)?; + Ok((proof_offset, proof_size)) + } + + // Obtains parent's hash by joining two sibiling nodes in merkle tree. + fn join_nodes, T: AsRef<[u8]>>(node: S, other: T) -> Hash { + let node = &node.as_ref()[..SIZE_OF_MERKLE_PROOF_ENTRY]; + let other = &other.as_ref()[..SIZE_OF_MERKLE_PROOF_ENTRY]; + hashv(&[MERKLE_HASH_PREFIX_NODE, node, other]) + } + + // Recovers root of the merkle tree from a leaf node + // at the given index and the respective proof. + fn get_merkle_root<'b, I>(index: usize, node: Hash, proof: I) -> Result + where + I: IntoIterator, + { + let (index, root) = proof + .into_iter() + .fold((index, node), |(index, node), other| { + let parent = if index % 2 == 0 { + Self::join_nodes(node, other) + } else { + Self::join_nodes(other, node) + }; + (index >> 1, parent) + }); + (index == 0) + .then_some(root) + .ok_or(SlashingError::InvalidMerkleShred) + } + + /// Returns true if the other shred has the same (slot, index, + /// shred-type), but different payload. + /// Retransmitter's signature is ignored when comparing payloads. + pub(crate) fn is_shred_duplicate(&self, other: &Shred) -> bool { + if (self.slot(), self.index(), self.shred_type()) + != (other.slot(), other.index(), other.shred_type()) + { + return false; + } + fn get_payload<'a>(shred: &Shred<'a>) -> &'a [u8] { + let Ok((proof_offset, proof_size)) = shred.get_proof_offset_and_size() else { + return shred.payload; + }; + if !shred.resigned { + return shred.payload; + } + let Some(offset) = proof_offset.checked_add(proof_size) else { + return shred.payload; + }; + shred.payload.get(..offset).unwrap_or(shred.payload) + } + get_payload(self) != get_payload(other) + } + + /// Returns true if the erasure metas of the other shred matches ours. + /// Assumes that other shred has the same fec set index as ours. + pub(crate) fn check_erasure_consistency(&self, other: &Shred) -> Result { + debug_assert!(self.fec_set_index() == other.fec_set_index()); + debug_assert!(self.shred_type == ShredType::Code); + debug_assert!(other.shred_type == ShredType::Code); + Ok(self.erasure_meta()? == other.erasure_meta()?) + } +} + +impl TryFrom for ShredVariant { + type Error = SlashingError; + fn try_from(shred_variant: u8) -> Result { + if shred_variant == u8::from(ShredType::Code) { + Ok(ShredVariant::LegacyCode) + } else if shred_variant == u8::from(ShredType::Data) { + Ok(ShredVariant::LegacyData) + } else { + let proof_size = shred_variant & 0x0F; + match shred_variant & 0xF0 { + 0x40 => Ok(ShredVariant::MerkleCode { + proof_size, + chained: false, + resigned: false, + }), + 0x60 => Ok(ShredVariant::MerkleCode { + proof_size, + chained: true, + resigned: false, + }), + 0x70 => Ok(ShredVariant::MerkleCode { + proof_size, + chained: true, + resigned: true, + }), + 0x80 => Ok(ShredVariant::MerkleData { + proof_size, + chained: false, + resigned: false, + }), + 0x90 => Ok(ShredVariant::MerkleData { + proof_size, + chained: true, + resigned: false, + }), + 0xb0 => Ok(ShredVariant::MerkleData { + proof_size, + chained: true, + resigned: true, + }), + _ => Err(SlashingError::InvalidShredVariant), + } + } + } +} + +#[cfg(test)] +pub(crate) mod tests { + use { + super::Shred, + crate::shred::ShredType, + rand::Rng, + solana_entry::entry::Entry, + solana_ledger::shred::{ + ProcessShredsStats, ReedSolomonCache, Shred as SolanaShred, Shredder, + }, + solana_sdk::{hash::Hash, pubkey::Pubkey, signature::Keypair, system_transaction}, + std::sync::Arc, + }; + + pub(crate) fn new_rand_data_shred( + rng: &mut R, + next_shred_index: u32, + shredder: &Shredder, + keypair: &Keypair, + merkle_variant: bool, + is_last_in_slot: bool, + ) -> SolanaShred { + let (mut data_shreds, _) = new_rand_shreds( + rng, + next_shred_index, + next_shred_index, + 5, + merkle_variant, + shredder, + keypair, + is_last_in_slot, + ); + data_shreds.pop().unwrap() + } + + pub(crate) fn new_rand_coding_shreds( + rng: &mut R, + next_shred_index: u32, + num_entries: usize, + shredder: &Shredder, + keypair: &Keypair, + merkle_variant: bool, + ) -> Vec { + let (_, coding_shreds) = new_rand_shreds( + rng, + next_shred_index, + next_shred_index, + num_entries, + merkle_variant, + shredder, + keypair, + true, + ); + coding_shreds + } + + #[allow(clippy::too_many_arguments)] + pub(crate) fn new_rand_shreds( + rng: &mut R, + next_shred_index: u32, + next_code_index: u32, + num_entries: usize, + merkle_variant: bool, + shredder: &Shredder, + keypair: &Keypair, + is_last_in_slot: bool, + ) -> (Vec, Vec) { + let entries: Vec<_> = std::iter::repeat_with(|| { + let tx = system_transaction::transfer( + &Keypair::new(), // from + &Pubkey::new_unique(), // to + rng.gen(), // lamports + Hash::new_unique(), // recent blockhash + ); + Entry::new( + &Hash::new_unique(), // prev_hash + 1, // num_hashes, + vec![tx], // transactions + ) + }) + .take(num_entries) + .collect(); + shredder.entries_to_shreds( + keypair, + &entries, + is_last_in_slot, + // chained_merkle_root + Some(Hash::new_from_array(rng.gen())), + next_shred_index, + next_code_index, // next_code_index + merkle_variant, + &ReedSolomonCache::default(), + &mut ProcessShredsStats::default(), + ) + } + + #[test] + fn test_solana_shred_parity() { + // Verify that the deserialization functions match solana shred format + for _ in 0..300 { + let mut rng = rand::thread_rng(); + let leader = Arc::new(Keypair::new()); + let slot = rng.gen_range(1..u64::MAX); + let parent_slot = slot - 1; + let reference_tick = 0; + let version = rng.gen_range(0..u16::MAX); + let shredder = Shredder::new(slot, parent_slot, reference_tick, version).unwrap(); + let next_shred_index = rng.gen_range(0..671); + let next_code_index = rng.gen_range(0..781); + let is_last_in_slot = rng.gen_bool(0.5); + let (data_solana_shreds, coding_solana_shreds) = new_rand_shreds( + &mut rng, + next_shred_index, + next_code_index, + 10, + true, + &shredder, + &leader, + is_last_in_slot, + ); + + for solana_shred in data_solana_shreds + .into_iter() + .chain(coding_solana_shreds.into_iter()) + { + let payload = solana_shred.payload().as_slice(); + let shred = Shred::new_from_payload(payload).unwrap(); + + assert_eq!(shred.slot().unwrap(), solana_shred.slot()); + assert_eq!(shred.index().unwrap(), solana_shred.index()); + assert_eq!(shred.version().unwrap(), solana_shred.version()); + assert_eq!( + u8::from(shred.shred_type()), + u8::from(solana_shred.shred_type()) + ); + if shred.shred_type() == ShredType::Data { + assert_eq!(shred.last_in_slot().unwrap(), solana_shred.last_in_slot()); + } else { + let erasure_meta = shred.erasure_meta().unwrap(); + assert_eq!( + erasure_meta.num_data_shreds, + shred.num_data_shreds().unwrap() + ); + assert_eq!( + erasure_meta.num_coding_shreds, + shred.num_coding_shreds().unwrap() + ); + // We cannot verify first_coding_index until visibility is + // changed in agave + } + assert_eq!( + shred.merkle_root().unwrap(), + solana_shred.merkle_root().unwrap() + ); + assert_eq!(&shred.payload, solana_shred.payload()); + } + } + } +} diff --git a/slashing/program/src/state.rs b/slashing/program/src/state.rs new file mode 100644 index 00000000000..addd3f50449 --- /dev/null +++ b/slashing/program/src/state.rs @@ -0,0 +1,82 @@ +//! Program state +use { + crate::{duplicate_block_proof::DuplicateBlockProofData, error::SlashingError}, + solana_program::{clock::Slot, pubkey::Pubkey}, +}; + +const PACKET_DATA_SIZE: usize = 1232; + +/// Types of slashing proofs +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum ProofType { + /// Invalid proof type + InvalidType, + /// Proof consisting of 2 shreds signed by the leader indicating the leader + /// submitted a duplicate block. + DuplicateBlockProof, +} + +impl ProofType { + /// Size of the proof account to create in order to hold the proof data + /// header and contents + pub const fn proof_account_length(&self) -> usize { + match self { + Self::InvalidType => panic!("Cannot determine size of invalid proof type"), + Self::DuplicateBlockProof => { + // Duplicate block proof consists of 2 shreds that can be `PACKET_DATA_SIZE`. + DuplicateBlockProofData::size_of(PACKET_DATA_SIZE) + } + } + } + + /// Display string for this proof type's violation + pub fn violation_str(&self) -> &str { + match self { + Self::InvalidType => "invalid", + Self::DuplicateBlockProof => "duplicate block", + } + } +} + +impl From for u8 { + fn from(value: ProofType) -> Self { + match value { + ProofType::InvalidType => 0, + ProofType::DuplicateBlockProof => 1, + } + } +} + +impl From for ProofType { + fn from(value: u8) -> Self { + match value { + 1 => Self::DuplicateBlockProof, + _ => Self::InvalidType, + } + } +} + +/// Trait that proof accounts must satisfy in order to verify via the slashing +/// program +pub trait SlashingProofData<'a> { + /// The type of proof this data represents + const PROOF_TYPE: ProofType; + + /// Zero copy from raw data buffer + fn unpack(data: &'a [u8]) -> Result + where + Self: Sized; + + /// Verification logic for this type of proof data + fn verify_proof(self, slot: Slot, pubkey: &Pubkey) -> Result<(), SlashingError>; +} + +#[cfg(test)] +mod tests { + use crate::state::PACKET_DATA_SIZE; + + #[test] + fn test_packet_size_parity() { + assert_eq!(PACKET_DATA_SIZE, solana_sdk::packet::PACKET_DATA_SIZE); + } +} diff --git a/slashing/program/tests/duplicate_block_proof.rs b/slashing/program/tests/duplicate_block_proof.rs new file mode 100644 index 00000000000..bab00c51015 --- /dev/null +++ b/slashing/program/tests/duplicate_block_proof.rs @@ -0,0 +1,390 @@ +#![cfg(feature = "test-sbf")] + +use { + rand::Rng, + solana_entry::entry::Entry, + solana_ledger::{ + blockstore_meta::ErasureMeta, + shred::{ProcessShredsStats, ReedSolomonCache, Shred, Shredder}, + }, + solana_program::pubkey::Pubkey, + solana_program_test::*, + solana_sdk::{ + clock::{Clock, Slot}, + decode_error::DecodeError, + hash::Hash, + instruction::InstructionError, + rent::Rent, + signature::{Keypair, Signer}, + system_instruction, system_transaction, + transaction::{Transaction, TransactionError}, + }, + spl_pod::bytemuck::pod_get_packed_len, + spl_record::{instruction as record, state::RecordData}, + spl_slashing::{ + duplicate_block_proof::DuplicateBlockProofData, error::SlashingError, id, instruction, + processor::process_instruction, state::ProofType, + }, + std::sync::Arc, +}; + +const SLOT: Slot = 53084024; + +fn program_test() -> ProgramTest { + let mut program_test = ProgramTest::new("spl_slashing", id(), processor!(process_instruction)); + program_test.add_program( + "spl_record", + spl_record::id(), + processor!(spl_record::processor::process_instruction), + ); + program_test +} + +async fn setup_clock(context: &mut ProgramTestContext) { + let clock: Clock = context.banks_client.get_sysvar().await.unwrap(); + let mut new_clock = clock.clone(); + new_clock.slot = SLOT; + context.set_sysvar(&new_clock); +} + +async fn initialize_duplicate_proof_account( + context: &mut ProgramTestContext, + authority: &Keypair, + account: &Keypair, +) { + let account_length = ProofType::DuplicateBlockProof + .proof_account_length() + .saturating_add(pod_get_packed_len::()); + println!("Creating account of size {account_length}"); + let transaction = Transaction::new_signed_with_payer( + &[ + system_instruction::create_account( + &context.payer.pubkey(), + &account.pubkey(), + 1.max(Rent::default().minimum_balance(account_length)), + account_length as u64, + &spl_record::id(), + ), + record::initialize(&account.pubkey(), &authority.pubkey()), + ], + Some(&context.payer.pubkey()), + &[&context.payer, account], + context.last_blockhash, + ); + context + .banks_client + .process_transaction(transaction) + .await + .unwrap(); +} + +async fn write_proof( + context: &mut ProgramTestContext, + authority: &Keypair, + account: &Keypair, + proof: &[u8], +) { + let mut offset = 0; + let proof_len = proof.len(); + let chunk_size = 800; + println!("Writing a proof of size {proof_len}"); + while offset < proof_len { + let end = std::cmp::min(offset.checked_add(chunk_size).unwrap(), proof_len); + let transaction = Transaction::new_signed_with_payer( + &[record::write( + &account.pubkey(), + &authority.pubkey(), + offset as u64, + &proof[offset..end], + )], + Some(&context.payer.pubkey()), + &[&context.payer, authority], + context.last_blockhash, + ); + context + .banks_client + .process_transaction(transaction) + .await + .unwrap(); + + offset = offset.checked_add(chunk_size).unwrap(); + } +} + +pub fn new_rand_data_shred( + rng: &mut R, + next_shred_index: u32, + shredder: &Shredder, + keypair: &Keypair, + is_last_in_slot: bool, +) -> Shred { + let (mut data_shreds, _) = new_rand_shreds( + rng, + next_shred_index, + next_shred_index, + 5, + shredder, + keypair, + is_last_in_slot, + ); + data_shreds.pop().unwrap() +} + +pub(crate) fn new_rand_coding_shreds( + rng: &mut R, + next_shred_index: u32, + num_entries: usize, + shredder: &Shredder, + keypair: &Keypair, +) -> Vec { + let (_, coding_shreds) = new_rand_shreds( + rng, + next_shred_index, + next_shred_index, + num_entries, + shredder, + keypair, + true, + ); + coding_shreds +} + +pub(crate) fn new_rand_shreds( + rng: &mut R, + next_shred_index: u32, + next_code_index: u32, + num_entries: usize, + shredder: &Shredder, + keypair: &Keypair, + is_last_in_slot: bool, +) -> (Vec, Vec) { + let entries: Vec<_> = std::iter::repeat_with(|| { + let tx = system_transaction::transfer( + &Keypair::new(), // from + &Pubkey::new_unique(), // to + rng.gen(), // lamports + Hash::new_unique(), // recent blockhash + ); + Entry::new( + &Hash::new_unique(), // prev_hash + 1, // num_hashes, + vec![tx], // transactions + ) + }) + .take(num_entries) + .collect(); + shredder.entries_to_shreds( + keypair, + &entries, + is_last_in_slot, + // chained_merkle_root + Some(Hash::new_from_array(rng.gen())), + next_shred_index, + next_code_index, // next_code_index + true, // merkle_variant + &ReedSolomonCache::default(), + &mut ProcessShredsStats::default(), + ) +} + +#[tokio::test] +async fn valid_proof_data() { + let mut context = program_test().start_with_context().await; + setup_clock(&mut context).await; + + let authority = Keypair::new(); + let account = Keypair::new(); + + let mut rng = rand::thread_rng(); + let leader = Arc::new(Keypair::new()); + let (slot, parent_slot, reference_tick, version) = (SLOT, 53084023, 0, 0); + let shredder = Shredder::new(slot, parent_slot, reference_tick, version).unwrap(); + let next_shred_index = rng.gen_range(0..32_000); + let shred1 = new_rand_data_shred(&mut rng, next_shred_index, &shredder, &leader, true); + let shred2 = new_rand_data_shred(&mut rng, next_shred_index, &shredder, &leader, true); + + assert_ne!( + shred1.merkle_root().unwrap(), + shred2.merkle_root().unwrap(), + "Expecting merkle root conflict", + ); + + let duplicate_proof = DuplicateBlockProofData { + shred1: shred1.payload().as_slice(), + shred2: shred2.payload().as_slice(), + }; + let data = duplicate_proof.pack(); + + initialize_duplicate_proof_account(&mut context, &authority, &account).await; + write_proof(&mut context, &authority, &account, &data).await; + + let transaction = Transaction::new_signed_with_payer( + &[instruction::duplicate_block_proof( + &account.pubkey(), + RecordData::WRITABLE_START_INDEX as u64, + slot, + leader.pubkey(), + )], + Some(&context.payer.pubkey()), + &[&context.payer], + context.last_blockhash, + ); + context + .banks_client + .process_transaction(transaction) + .await + .unwrap(); +} + +#[tokio::test] +async fn valid_proof_coding() { + let mut context = program_test().start_with_context().await; + setup_clock(&mut context).await; + + let authority = Keypair::new(); + let account = Keypair::new(); + + let mut rng = rand::thread_rng(); + let leader = Arc::new(Keypair::new()); + let (slot, parent_slot, reference_tick, version) = (SLOT, 53084023, 0, 0); + let shredder = Shredder::new(slot, parent_slot, reference_tick, version).unwrap(); + let next_shred_index = rng.gen_range(0..32_000); + let shred1 = + new_rand_coding_shreds(&mut rng, next_shred_index, 10, &shredder, &leader)[0].clone(); + let shred2 = + new_rand_coding_shreds(&mut rng, next_shred_index, 10, &shredder, &leader)[1].clone(); + + assert!( + ErasureMeta::check_erasure_consistency(&shred1, &shred2), + "Expected erasure consistency failure", + ); + + let duplicate_proof = DuplicateBlockProofData { + shred1: shred1.payload().as_slice(), + shred2: shred2.payload().as_slice(), + }; + let data = duplicate_proof.pack(); + + initialize_duplicate_proof_account(&mut context, &authority, &account).await; + write_proof(&mut context, &authority, &account, &data).await; + + let transaction = Transaction::new_signed_with_payer( + &[instruction::duplicate_block_proof( + &account.pubkey(), + RecordData::WRITABLE_START_INDEX as u64, + slot, + leader.pubkey(), + )], + Some(&context.payer.pubkey()), + &[&context.payer], + context.last_blockhash, + ); + context + .banks_client + .process_transaction(transaction) + .await + .unwrap(); +} + +#[tokio::test] +async fn invalid_proof_data() { + let mut context = program_test().start_with_context().await; + setup_clock(&mut context).await; + + let authority = Keypair::new(); + let account = Keypair::new(); + + let mut rng = rand::thread_rng(); + let leader = Arc::new(Keypair::new()); + let (slot, parent_slot, reference_tick, version) = (SLOT, 53084023, 0, 0); + let shredder = Shredder::new(slot, parent_slot, reference_tick, version).unwrap(); + let next_shred_index = rng.gen_range(0..32_000); + let shred1 = new_rand_data_shred(&mut rng, next_shred_index, &shredder, &leader, true); + let shred2 = shred1.clone(); + + let duplicate_proof = DuplicateBlockProofData { + shred1: shred1.payload().as_slice(), + shred2: shred2.payload().as_slice(), + }; + let data = duplicate_proof.pack(); + + initialize_duplicate_proof_account(&mut context, &authority, &account).await; + write_proof(&mut context, &authority, &account, &data).await; + + let transaction = Transaction::new_signed_with_payer( + &[instruction::duplicate_block_proof( + &account.pubkey(), + RecordData::WRITABLE_START_INDEX as u64, + slot, + leader.pubkey(), + )], + Some(&context.payer.pubkey()), + &[&context.payer], + context.last_blockhash, + ); + let err = context + .banks_client + .process_transaction(transaction) + .await + .unwrap_err() + .unwrap(); + let TransactionError::InstructionError(0, InstructionError::Custom(code)) = err else { + panic!("Invalid error {err:?}"); + }; + let err: SlashingError = SlashingError::decode_custom_error_to_enum(code).unwrap(); + assert_eq!(err, SlashingError::InvalidPayloadProof); +} + +#[tokio::test] +async fn invalid_proof_coding() { + let mut context = program_test().start_with_context().await; + setup_clock(&mut context).await; + + let authority = Keypair::new(); + let account = Keypair::new(); + + let mut rng = rand::thread_rng(); + let leader = Arc::new(Keypair::new()); + let (slot, parent_slot, reference_tick, version) = (SLOT, 53084023, 0, 0); + let shredder = Shredder::new(slot, parent_slot, reference_tick, version).unwrap(); + let next_shred_index = rng.gen_range(0..32_000); + let coding_shreds = new_rand_coding_shreds(&mut rng, next_shred_index, 10, &shredder, &leader); + let shred1 = coding_shreds[0].clone(); + let shred2 = coding_shreds[1].clone(); + + assert!( + ErasureMeta::check_erasure_consistency(&shred1, &shred2), + "Expecting no erasure conflict" + ); + let duplicate_proof = DuplicateBlockProofData { + shred1: shred1.payload().as_slice(), + shred2: shred2.payload().as_slice(), + }; + let data = duplicate_proof.pack(); + + initialize_duplicate_proof_account(&mut context, &authority, &account).await; + write_proof(&mut context, &authority, &account, &data).await; + + let transaction = Transaction::new_signed_with_payer( + &[instruction::duplicate_block_proof( + &account.pubkey(), + RecordData::WRITABLE_START_INDEX as u64, + slot, + leader.pubkey(), + )], + Some(&context.payer.pubkey()), + &[&context.payer], + context.last_blockhash, + ); + let err = context + .banks_client + .process_transaction(transaction) + .await + .unwrap_err() + .unwrap(); + let TransactionError::InstructionError(0, InstructionError::Custom(code)) = err else { + panic!("Invalid error {err:?}"); + }; + let err: SlashingError = SlashingError::decode_custom_error_to_enum(code).unwrap(); + assert_eq!(err, SlashingError::InvalidErasureMetaConflict); +}