diff --git a/crates/core/src/masp.rs b/crates/core/src/masp.rs index 2c72ab1c381..58278eced66 100644 --- a/crates/core/src/masp.rs +++ b/crates/core/src/masp.rs @@ -534,28 +534,17 @@ impl FromStr for MaspValue { } } -/// Reference to a masp transaction inside a [`BatchedTx`] -#[derive(Clone, Serialize, Deserialize)] -pub struct MaspTxRef { - // FIXME: actually, are we using the commitment? Probably if I give the - // masp section ref I don't need the commitment anymore right? - /// The inner tx commitment's hash - pub cmt: Hash, - /// The hash of the masp [`Section::MaspTx`] - pub masp_section_ref: Hash, -} - /// The masp transactions' references of a given batch #[derive(Default, Clone, Serialize, Deserialize)] -pub struct BatchMaspTxRefs(pub Vec); +pub struct MaspTxRefs(pub Vec); -impl Display for BatchMaspTxRefs { +impl Display for MaspTxRefs { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{}", serde_json::to_string(self).unwrap()) } } -impl FromStr for BatchMaspTxRefs { +impl FromStr for MaspTxRefs { type Err = serde_json::Error; fn from_str(s: &str) -> Result { diff --git a/crates/events/src/extend.rs b/crates/events/src/extend.rs index 78058e5436c..f51052a0f51 100644 --- a/crates/events/src/extend.rs +++ b/crates/events/src/extend.rs @@ -2,12 +2,12 @@ use std::fmt::Display; use std::marker::PhantomData; -use std::ops::{ControlFlow, DerefMut}; +use std::ops::ControlFlow; use std::str::FromStr; use namada_core::collections::HashMap; use namada_core::hash::Hash; -use namada_core::masp::BatchMaspTxRefs; +use namada_core::masp::MaspTxRefs; use namada_core::storage::{BlockHeight, TxIndex}; use super::*; @@ -502,10 +502,10 @@ impl EventAttributeEntry<'static> for MaspTxBlockIndex { /// Extend an [`Event`] with `masp_tx_batch_refs` data, indicating the specific /// inner transactions inside the batch that are valid masp txs and the /// references to the relative masp sections. -pub struct MaspTxBatchRefs(pub BatchMaspTxRefs); +pub struct MaspTxBatchRefs(pub MaspTxRefs); impl EventAttributeEntry<'static> for MaspTxBatchRefs { - type Value = BatchMaspTxRefs; + type Value = MaspTxRefs; type ValueOwned = Self::Value; const KEY: &'static str = "masp_tx_batch_refs"; @@ -607,8 +607,8 @@ where } /// Return a new implementation of [`EventAttributeChecker`]. -pub fn attribute_checker<'value, DATA, ATTR>( -) -> Box> +pub fn attribute_checker<'value, DATA, ATTR>() +-> Box> where DATA: EventAttributeEntry<'value> + 'static, ATTR: AttributesMap, diff --git a/crates/namada/src/ledger/native_vp/masp.rs b/crates/namada/src/ledger/native_vp/masp.rs index 9ae17dd5a5d..a3446299830 100644 --- a/crates/namada/src/ledger/native_vp/masp.rs +++ b/crates/namada/src/ledger/native_vp/masp.rs @@ -25,7 +25,6 @@ use namada_proof_of_stake::Epoch; use namada_sdk::masp::verify_shielded_tx; use namada_state::{ConversionState, OptionExt, ResultExt, StateRead}; use namada_token::read_denom; -use namada_tx::action::{Action, MaspAction, Read}; use namada_tx::BatchedTxRef; use namada_vp_env::VpEnv; use ripemd::Digest as RipemdDigest; @@ -364,8 +363,7 @@ where let shielded_tx = tx_data .tx .get_section(&masp_section_ref) - .map(|section| section.masp_tx()) - .flatten() + .and_then(|section| section.masp_tx()) .ok_or_else(|| { native_vp::Error::new_const( "Missing MASP section in transaction", diff --git a/crates/namada/src/ledger/protocol/mod.rs b/crates/namada/src/ledger/protocol/mod.rs index 8faf7a9e013..974dc35173a 100644 --- a/crates/namada/src/ledger/protocol/mod.rs +++ b/crates/namada/src/ledger/protocol/mod.rs @@ -7,7 +7,6 @@ use borsh_ext::BorshSerializeExt; use eyre::{eyre, WrapErr}; use namada_core::booleans::BoolResultUnitExt; use namada_core::hash::Hash; -use namada_core::masp::MaspTxRef; use namada_core::storage::Key; use namada_events::extend::{ ComposeEvent, Height as HeightAttr, TxHash as TxHashAttr, @@ -15,9 +14,8 @@ use namada_events::extend::{ use namada_events::EventLevel; use namada_gas::TxGasMeter; use namada_sdk::tx::TX_TRANSFER_WASM; -use namada_state::{StateRead, StorageWrite}; +use namada_state::StorageWrite; use namada_token::event::{TokenEvent, TokenOperation, UserAccount}; -use namada_tx::action::{Action, MaspAction, Read}; use namada_tx::data::protocol::ProtocolTxType; use namada_tx::data::{ BatchResults, BatchedTxResult, ExtendedTxResult, TxResult, TxType, @@ -338,12 +336,12 @@ where // transaction refs for the events if let Some(masp_section_ref) = namada_tx::action::get_masp_section_ref(state) - .map_err(|e| Error::StateError(e))? + .map_err(Error::StateError)? { - extended_tx_result.masp_tx_refs.0.push(MaspTxRef { - cmt: cmt.get_hash(), - masp_section_ref, - }); + extended_tx_result + .masp_tx_refs + .0 + .push(masp_section_ref); } state.write_log_mut().commit_tx_to_batch(); } else { diff --git a/crates/node/src/bench_utils.rs b/crates/node/src/bench_utils.rs index cf5d8b50f29..78483126702 100644 --- a/crates/node/src/bench_utils.rs +++ b/crates/node/src/bench_utils.rs @@ -74,7 +74,7 @@ use namada::ledger::native_vp::ibc::get_dummy_header; use namada::ledger::queries::{ Client, EncodedResponseQuery, RequestCtx, RequestQuery, Router, RPC, }; -use namada::masp::{BatchMaspTxRefs, MaspTxRef}; +use namada::masp::MaspTxRefs; use namada::state::StorageRead; use namada::tx::data::pos::Bond; use namada::tx::data::{ @@ -923,25 +923,17 @@ impl Client for BenchShell { .with(MaspTxBlockIndex(TxIndex::must_from_usize( idx, ))) - .with(MaspTxBatchRefs(BatchMaspTxRefs(vec![ - MaspTxRef { - cmt: tx - .first_commitments() - .unwrap() - .get_hash(), - masp_section_ref: tx - .sections - .iter() - .find_map(|section| { - if let Section::MaspTx(_) = section - { - Some(section.get_hash()) - } else { - None - } - }) - .unwrap(), - }, + .with(MaspTxBatchRefs(MaspTxRefs(vec![ + tx.sections + .iter() + .find_map(|section| { + if let Section::MaspTx(_) = section { + Some(section.get_hash()) + } else { + None + } + }) + .unwrap(), ]))) .into(); namada::tendermint::abci::Event::from(event) diff --git a/crates/node/src/shell/finalize_block.rs b/crates/node/src/shell/finalize_block.rs index 09cdf59db41..c4100ab7e70 100644 --- a/crates/node/src/shell/finalize_block.rs +++ b/crates/node/src/shell/finalize_block.rs @@ -16,7 +16,7 @@ use namada::ledger::gas::GasMetering; use namada::ledger::ibc; use namada::ledger::pos::namada_proof_of_stake; use namada::ledger::protocol::DispatchError; -use namada::masp::BatchMaspTxRefs; +use namada::masp::MaspTxRefs; use namada::proof_of_stake; use namada::proof_of_stake::storage::{ find_validator_by_raw_hash, write_last_block_proposer_address, @@ -824,7 +824,7 @@ impl<'finalize> TempTxLogs { fn check_inner_results( &mut self, tx_result: &namada::tx::data::TxResult, - masp_tx_refs: BatchMaspTxRefs, + masp_tx_refs: MaspTxRefs, tx_header: &namada::tx::Header, tx_index: usize, height: BlockHeight, @@ -2801,21 +2801,25 @@ mod test_finalize_block { assert_eq!(root_pre.0, root_post.0); // Check transaction's hash in storage - assert!(shell - .shell - .state - .write_log() - .has_replay_protection_entry(&wrapper_tx.raw_header_hash())); + assert!( + shell + .shell + .state + .write_log() + .has_replay_protection_entry(&wrapper_tx.raw_header_hash()) + ); // Check that the hash is not present in the merkle tree shell.state.commit_block().unwrap(); - assert!(!shell - .shell - .state - .in_mem() - .block - .tree - .has_key(&wrapper_hash_key) - .unwrap()); + assert!( + !shell + .shell + .state + .in_mem() + .block + .tree + .has_key(&wrapper_hash_key) + .unwrap() + ); } /// Test that masp anchor keys are added to the merkle tree @@ -2853,22 +2857,26 @@ mod test_finalize_block { assert_eq!(root_pre.0, root_post.0); // Check that the hashes are present in the merkle tree shell.state.commit_block().unwrap(); - assert!(shell - .shell - .state - .in_mem() - .block - .tree - .has_key(&convert_key) - .unwrap()); - assert!(shell - .shell - .state - .in_mem() - .block - .tree - .has_key(&commitment_key) - .unwrap()); + assert!( + shell + .shell + .state + .in_mem() + .block + .tree + .has_key(&convert_key) + .unwrap() + ); + assert!( + shell + .shell + .state + .in_mem() + .block + .tree + .has_key(&commitment_key) + .unwrap() + ); } /// Test that a tx that has already been applied in the same block @@ -2946,26 +2954,34 @@ mod test_finalize_block { assert_eq!(code, ResultCode::WasmRuntimeError); for wrapper in [&wrapper, &new_wrapper] { - assert!(shell - .state - .write_log() - .has_replay_protection_entry(&wrapper.raw_header_hash())); - assert!(!shell - .state - .write_log() - .has_replay_protection_entry(&wrapper.header_hash())); + assert!( + shell + .state + .write_log() + .has_replay_protection_entry(&wrapper.raw_header_hash()) + ); + assert!( + !shell + .state + .write_log() + .has_replay_protection_entry(&wrapper.header_hash()) + ); } // Commit to check the hashes from storage shell.commit(); for wrapper in [&wrapper, &new_wrapper] { - assert!(shell - .state - .has_replay_protection_entry(&wrapper.raw_header_hash()) - .unwrap()); - assert!(!shell - .state - .has_replay_protection_entry(&wrapper.header_hash()) - .unwrap()); + assert!( + shell + .state + .has_replay_protection_entry(&wrapper.raw_header_hash()) + .unwrap() + ); + assert!( + !shell + .state + .has_replay_protection_entry(&wrapper.header_hash()) + .unwrap() + ); } } @@ -3248,23 +3264,29 @@ mod test_finalize_block { &unsigned_wrapper, &wrong_commitment_wrapper, ] { - assert!(!shell - .state - .write_log() - .has_replay_protection_entry(&valid_wrapper.raw_header_hash())); - assert!(shell + assert!( + !shell.state.write_log().has_replay_protection_entry( + &valid_wrapper.raw_header_hash() + ) + ); + assert!( + shell + .state + .write_log() + .has_replay_protection_entry(&valid_wrapper.header_hash()) + ); + } + assert!( + shell.state.write_log().has_replay_protection_entry( + &failing_wrapper.raw_header_hash() + ) + ); + assert!( + !shell .state .write_log() - .has_replay_protection_entry(&valid_wrapper.header_hash())); - } - assert!(shell - .state - .write_log() - .has_replay_protection_entry(&failing_wrapper.raw_header_hash())); - assert!(!shell - .state - .write_log() - .has_replay_protection_entry(&failing_wrapper.header_hash())); + .has_replay_protection_entry(&failing_wrapper.header_hash()) + ); // Commit to check the hashes from storage shell.commit(); @@ -3273,23 +3295,33 @@ mod test_finalize_block { unsigned_wrapper, wrong_commitment_wrapper, ] { - assert!(!shell + assert!( + !shell + .state + .has_replay_protection_entry( + &valid_wrapper.raw_header_hash() + ) + .unwrap() + ); + assert!( + shell + .state + .has_replay_protection_entry(&valid_wrapper.header_hash()) + .unwrap() + ); + } + assert!( + shell .state - .has_replay_protection_entry(&valid_wrapper.raw_header_hash()) - .unwrap()); - assert!(shell + .has_replay_protection_entry(&failing_wrapper.raw_header_hash()) + .unwrap() + ); + assert!( + !shell .state - .has_replay_protection_entry(&valid_wrapper.header_hash()) - .unwrap()); - } - assert!(shell - .state - .has_replay_protection_entry(&failing_wrapper.raw_header_hash()) - .unwrap()); - assert!(!shell - .state - .has_replay_protection_entry(&failing_wrapper.header_hash()) - .unwrap()); + .has_replay_protection_entry(&failing_wrapper.header_hash()) + .unwrap() + ); } #[test] @@ -3349,14 +3381,18 @@ mod test_finalize_block { let code = event[0].read_attribute::().expect("Test failed"); assert_eq!(code, ResultCode::InvalidTx); - assert!(shell - .state - .write_log() - .has_replay_protection_entry(&wrapper_hash)); - assert!(!shell - .state - .write_log() - .has_replay_protection_entry(&wrapper.raw_header_hash())); + assert!( + shell + .state + .write_log() + .has_replay_protection_entry(&wrapper_hash) + ); + assert!( + !shell + .state + .write_log() + .has_replay_protection_entry(&wrapper.raw_header_hash()) + ); } // Test that the fees are paid even if the inner transaction fails and its @@ -3754,9 +3790,11 @@ mod test_finalize_block { .unwrap(), Some(ValidatorState::Consensus) ); - assert!(enqueued_slashes_handle() - .at(&Epoch::default()) - .is_empty(&shell.state)?); + assert!( + enqueued_slashes_handle() + .at(&Epoch::default()) + .is_empty(&shell.state)? + ); assert_eq!( get_num_consensus_validators(&shell.state, Epoch::default()) .unwrap(), @@ -3775,17 +3813,21 @@ mod test_finalize_block { .unwrap(), Some(ValidatorState::Jailed) ); - assert!(enqueued_slashes_handle() - .at(&epoch) - .is_empty(&shell.state)?); + assert!( + enqueued_slashes_handle() + .at(&epoch) + .is_empty(&shell.state)? + ); assert_eq!( get_num_consensus_validators(&shell.state, epoch).unwrap(), 5_u64 ); } - assert!(!enqueued_slashes_handle() - .at(&processing_epoch) - .is_empty(&shell.state)?); + assert!( + !enqueued_slashes_handle() + .at(&processing_epoch) + .is_empty(&shell.state)? + ); // Advance to the processing epoch loop { @@ -3808,9 +3850,11 @@ mod test_finalize_block { // println!("Reached processing epoch"); break; } else { - assert!(enqueued_slashes_handle() - .at(&shell.state.in_mem().block.epoch) - .is_empty(&shell.state)?); + assert!( + enqueued_slashes_handle() + .at(&shell.state.in_mem().block.epoch) + .is_empty(&shell.state)? + ); let stake1 = read_validator_stake( &shell.state, ¶ms, @@ -4294,11 +4338,13 @@ mod test_finalize_block { ) .unwrap(); assert_eq!(last_slash, Some(misbehavior_epoch)); - assert!(namada_proof_of_stake::storage::validator_slashes_handle( - &val1.address - ) - .is_empty(&shell.state) - .unwrap()); + assert!( + namada_proof_of_stake::storage::validator_slashes_handle( + &val1.address + ) + .is_empty(&shell.state) + .unwrap() + ); tracing::debug!("Advancing to epoch 7"); @@ -4363,18 +4409,22 @@ mod test_finalize_block { ) .unwrap(); assert_eq!(last_slash, Some(Epoch(4))); - assert!(namada_proof_of_stake::is_validator_frozen( - &shell.state, - &val1.address, - current_epoch, - ¶ms - ) - .unwrap()); - assert!(namada_proof_of_stake::storage::validator_slashes_handle( - &val1.address - ) - .is_empty(&shell.state) - .unwrap()); + assert!( + namada_proof_of_stake::is_validator_frozen( + &shell.state, + &val1.address, + current_epoch, + ¶ms + ) + .unwrap() + ); + assert!( + namada_proof_of_stake::storage::validator_slashes_handle( + &val1.address + ) + .is_empty(&shell.state) + .unwrap() + ); let pre_stake_10 = namada_proof_of_stake::storage::read_validator_stake( @@ -5252,9 +5302,11 @@ mod test_finalize_block { shell.vp_wasm_cache.clone(), ); let parameters = ParametersVp { ctx }; - assert!(parameters - .validate_tx(&batched_tx, &keys_changed, &verifiers) - .is_ok()); + assert!( + parameters + .validate_tx(&batched_tx, &keys_changed, &verifiers) + .is_ok() + ); // we advance forward to the next epoch let mut req = FinalizeBlock::default(); @@ -5327,11 +5379,13 @@ mod test_finalize_block { let inner_results = inner_tx_result.batch_results.0; for cmt in batch.commitments() { - assert!(inner_results - .get(&cmt.get_hash()) - .unwrap() - .clone() - .is_ok_and(|res| res.is_accepted())); + assert!( + inner_results + .get(&cmt.get_hash()) + .unwrap() + .clone() + .is_ok_and(|res| res.is_accepted()) + ); } // Check storage modifications @@ -5369,18 +5423,24 @@ mod test_finalize_block { let inner_tx_result = event[0].read_attribute::>().unwrap(); let inner_results = inner_tx_result.batch_results.0; - assert!(inner_results - .get(&batch.commitments()[0].get_hash()) - .unwrap() - .clone() - .is_ok_and(|res| res.is_accepted())); - assert!(inner_results - .get(&batch.commitments()[1].get_hash()) - .unwrap() - .clone() - .is_err()); + assert!( + inner_results + .get(&batch.commitments()[0].get_hash()) + .unwrap() + .clone() + .is_ok_and(|res| res.is_accepted()) + ); + assert!( + inner_results + .get(&batch.commitments()[1].get_hash()) + .unwrap() + .clone() + .is_err() + ); // Assert that the last tx didn't run - assert!(!inner_results.contains_key(&batch.commitments()[2].get_hash())); + assert!( + !inner_results.contains_key(&batch.commitments()[2].get_hash()) + ); // Check storage modifications are missing for key in ["random_key_1", "random_key_2", "random_key_3"] { @@ -5411,21 +5471,27 @@ mod test_finalize_block { let inner_tx_result = event[0].read_attribute::>().unwrap(); let inner_results = inner_tx_result.batch_results.0; - assert!(inner_results - .get(&batch.commitments()[0].get_hash()) - .unwrap() - .clone() - .is_ok_and(|res| res.is_accepted())); - assert!(inner_results - .get(&batch.commitments()[1].get_hash()) - .unwrap() - .clone() - .is_err()); - assert!(inner_results - .get(&batch.commitments()[2].get_hash()) - .unwrap() - .clone() - .is_ok_and(|res| res.is_accepted())); + assert!( + inner_results + .get(&batch.commitments()[0].get_hash()) + .unwrap() + .clone() + .is_ok_and(|res| res.is_accepted()) + ); + assert!( + inner_results + .get(&batch.commitments()[1].get_hash()) + .unwrap() + .clone() + .is_err() + ); + assert!( + inner_results + .get(&batch.commitments()[2].get_hash()) + .unwrap() + .clone() + .is_ok_and(|res| res.is_accepted()) + ); // Check storage modifications assert_eq!( @@ -5436,10 +5502,12 @@ mod test_finalize_block { .unwrap(), STORAGE_VALUE ); - assert!(!shell - .state - .has_key(&"random_key_2".parse().unwrap()) - .unwrap()); + assert!( + !shell + .state + .has_key(&"random_key_2".parse().unwrap()) + .unwrap() + ); assert_eq!( shell .state @@ -5471,18 +5539,24 @@ mod test_finalize_block { let inner_tx_result = event[0].read_attribute::>().unwrap(); let inner_results = inner_tx_result.batch_results.0; - assert!(inner_results - .get(&batch.commitments()[0].get_hash()) - .unwrap() - .clone() - .is_ok_and(|res| res.is_accepted())); - assert!(inner_results - .get(&batch.commitments()[1].get_hash()) - .unwrap() - .clone() - .is_err()); + assert!( + inner_results + .get(&batch.commitments()[0].get_hash()) + .unwrap() + .clone() + .is_ok_and(|res| res.is_accepted()) + ); + assert!( + inner_results + .get(&batch.commitments()[1].get_hash()) + .unwrap() + .clone() + .is_err() + ); // Assert that the last tx didn't run - assert!(!inner_results.contains_key(&batch.commitments()[2].get_hash())); + assert!( + !inner_results.contains_key(&batch.commitments()[2].get_hash()) + ); // Check storage modifications are missing for key in ["random_key_1", "random_key_2", "random_key_3"] { @@ -5512,18 +5586,24 @@ mod test_finalize_block { let inner_tx_result = event[0].read_attribute::>().unwrap(); let inner_results = inner_tx_result.batch_results.0; - assert!(inner_results - .get(&batch.commitments()[0].get_hash()) - .unwrap() - .clone() - .is_ok_and(|res| res.is_accepted())); - assert!(inner_results - .get(&batch.commitments()[1].get_hash()) - .unwrap() - .clone() - .is_err()); + assert!( + inner_results + .get(&batch.commitments()[0].get_hash()) + .unwrap() + .clone() + .is_ok_and(|res| res.is_accepted()) + ); + assert!( + inner_results + .get(&batch.commitments()[1].get_hash()) + .unwrap() + .clone() + .is_err() + ); // Assert that the last tx didn't run - assert!(!inner_results.contains_key(&batch.commitments()[2].get_hash())); + assert!( + !inner_results.contains_key(&batch.commitments()[2].get_hash()) + ); // Check storage modifications assert_eq!( diff --git a/crates/sdk/src/masp.rs b/crates/sdk/src/masp.rs index e5fd6e27606..3fe9c0f2228 100644 --- a/crates/sdk/src/masp.rs +++ b/crates/sdk/src/masp.rs @@ -53,11 +53,11 @@ use masp_proofs::sapling::SaplingVerificationContext; use namada_core::address::Address; use namada_core::collections::{HashMap, HashSet}; use namada_core::dec::Dec; +use namada_core::masp::MaspTxRefs; pub use namada_core::masp::{ encode_asset_type, AssetData, BalanceOwner, ExtendedViewingKey, PaymentAddress, TransferSource, TransferTarget, }; -use namada_core::masp::{BatchMaspTxRefs, MaspTxRef}; use namada_core::storage::{BlockHeight, Epoch, TxIndex}; use namada_core::time::{DateTimeUtc, DurationSecs}; use namada_core::uint::Uint; @@ -65,13 +65,12 @@ use namada_events::extend::{ MaspTxBatchRefs as MaspTxBatchRefsAttr, MaspTxBlockIndex as MaspTxBlockIndexAttr, ReadFromEventAttributes, }; -use namada_ibc::IbcMessage; use namada_macros::BorshDeserializer; #[cfg(feature = "migrations")] use namada_migrations::*; use namada_state::StorageError; -use namada_token::{self as token, Denomination, MaspDigitPos, Transfer}; -use namada_tx::{IndexedTx, Tx, TxCommitments}; +use namada_token::{self as token, Denomination, MaspDigitPos}; +use namada_tx::{IndexedTx, Tx}; use rand_core::{CryptoRng, OsRng, RngCore}; use ripemd::Digest as RipemdDigest; use sha2::Digest; @@ -103,10 +102,10 @@ pub const OUTPUT_NAME: &str = "masp-output.params"; pub const CONVERT_NAME: &str = "masp-convert.params"; /// Type alias for convenience and profit -pub type IndexedNoteData = BTreeMap; +pub type IndexedNoteData = BTreeMap>; /// Type alias for the entries of [`IndexedNoteData`] iterators -pub type IndexedNoteEntry = (IndexedTx, Transaction); +pub type IndexedNoteEntry = (IndexedTx, Vec); /// Shielded transfer #[derive(Clone, Debug, BorshSerialize, BorshDeserialize, BorshDeserializer)] @@ -144,9 +143,6 @@ pub enum TransferErr { General(#[from] Error), } -#[derive(Debug, Clone)] -struct ExtractedMaspTxs(Vec<(TxCommitments, Transaction)>); - /// MASP verifying keys pub struct PVKs { /// spend verifying key @@ -668,35 +664,37 @@ impl ShieldedContext { } /// Update the merkle tree of witnesses the first time we - /// scan a new MASP transaction. + /// scan new MASP transactions. fn update_witness_map( &mut self, indexed_tx: IndexedTx, - shielded: &Transaction, + shielded: &[Transaction], ) -> Result<(), Error> { let mut note_pos = self.tree.size(); self.tx_note_map.insert(indexed_tx, note_pos); - for so in shielded - .sapling_bundle() - .map_or(&vec![], |x| &x.shielded_outputs) - { - // Create merkle tree leaf node from note commitment - let node = Node::new(so.cmu.to_repr()); - // Update each merkle tree in the witness map with the latest - // addition - for (_, witness) in self.witness_map.iter_mut() { - witness.append(node).map_err(|()| { + + for tx in shielded { + for so in + tx.sapling_bundle().map_or(&vec![], |x| &x.shielded_outputs) + { + // Create merkle tree leaf node from note commitment + let node = Node::new(so.cmu.to_repr()); + // Update each merkle tree in the witness map with the latest + // addition + for (_, witness) in self.witness_map.iter_mut() { + witness.append(node).map_err(|()| { + Error::Other("note commitment tree is full".to_string()) + })?; + } + self.tree.append(node).map_err(|()| { Error::Other("note commitment tree is full".to_string()) })?; + // Finally, make it easier to construct merkle paths to this new + // note + let witness = IncrementalWitness::::from_tree(&self.tree); + self.witness_map.insert(note_pos, witness); + note_pos += 1; } - self.tree.append(node).map_err(|()| { - Error::Other("note commitment tree is full".to_string()) - })?; - // Finally, make it easier to construct merkle paths to this new - // note - let witness = IncrementalWitness::::from_tree(&self.tree); - self.witness_map.insert(note_pos, witness); - note_pos += 1; } Ok(()) } @@ -841,16 +839,13 @@ impl ShieldedContext { let extracted_masp_txs = Self::extract_masp_tx(&tx, &masp_sections_refs).await?; // Collect the current transactions - for (inner_tx, transaction) in extracted_masp_txs.0 { - shielded_txs.insert( - IndexedTx { - height: height.into(), - index: idx, - inner_tx, - }, - transaction, - ); - } + shielded_txs.insert( + IndexedTx { + height: height.into(), + index: idx, + }, + extracted_masp_txs, + ); } } @@ -860,44 +855,33 @@ impl ShieldedContext { /// Extract the relevant shield portions of a [`Tx`], if any. async fn extract_masp_tx( tx: &Tx, - masp_section_refs: &BatchMaspTxRefs, - ) -> Result { + masp_section_refs: &MaspTxRefs, + ) -> Result, Error> { // NOTE: simply looking for masp sections attached to the tx // is not safe. We don't validate the sections attached to a // transaction se we could end up with transactions carrying // an unnecessary masp section. We must instead look for the // required masp sections coming from the events - let mut txs = vec![]; - for MaspTxRef { - cmt, - masp_section_ref, - } in &masp_section_refs.0 - { - let transaction = tx - .get_section(masp_section_ref) - .map(|section| section.masp_tx()) - .flatten() - .ok_or_else(|| { - Error::Other( - "Missing expected masp transaction".to_string(), - ) - })?; - - let cmt = tx - .commitments() - .iter() - .find(|inner_tx| &inner_tx.get_hash() == cmt) - .ok_or_else(|| { - Error::Other( - "Missing expected inner transaction".to_string(), - ) - })?; - - txs.push((cmt.to_owned(), transaction)); - } - - Ok(ExtractedMaspTxs(txs)) + masp_section_refs + .0 + .iter() + .try_fold(vec![], |mut acc, hash| { + match tx + .get_section(hash) + .and_then(|section| section.masp_tx()) + .ok_or_else(|| { + Error::Other( + "Missing expected masp transaction".to_string(), + ) + }) { + Ok(transaction) => { + acc.push(transaction); + Ok(acc) + } + Err(e) => Err(e), + } + }) } /// Applies the given transaction to the supplied context. More precisely, @@ -911,7 +895,7 @@ impl ShieldedContext { pub fn scan_tx( &mut self, indexed_tx: IndexedTx, - shielded: &Transaction, + shielded: &[Transaction], vk: &ViewingKey, ) -> Result<(), Error> { // For tracking the account changes caused by this Transaction @@ -920,42 +904,78 @@ impl ShieldedContext { let mut note_pos = self.tx_note_map[&indexed_tx]; // Listen for notes sent to our viewing keys, only if we are syncing // (i.e. in a confirmed status) - for so in shielded - .sapling_bundle() - .map_or(&vec![], |x| &x.shielded_outputs) - { - // Let's try to see if this viewing key can decrypt latest - // note - let notes = self.pos_map.entry(*vk).or_default(); - let decres = try_sapling_note_decryption::<_, OutputDescription<<::SaplingAuth as masp_primitives::transaction::components::sapling::Authorization>::Proof>>( + for tx in shielded { + for so in + tx.sapling_bundle().map_or(&vec![], |x| &x.shielded_outputs) + { + // Let's try to see if this viewing key can decrypt latest + // note + let notes = self.pos_map.entry(*vk).or_default(); + let decres = try_sapling_note_decryption::<_, OutputDescription<<::SaplingAuth as masp_primitives::transaction::components::sapling::Authorization>::Proof>>( &NETWORK, 1.into(), &PreparedIncomingViewingKey::new(&vk.ivk()), so, ); - // So this current viewing key does decrypt this current note... - if let Some((note, pa, memo)) = decres { - // Add this note to list of notes decrypted by this viewing - // key - notes.insert(note_pos); - // Compute the nullifier now to quickly recognize when spent - let nf = note.nf( - &vk.nk, - note_pos.try_into().map_err(|_| { - Error::Other("Can not get nullifier".to_string()) - })?, - ); - self.note_map.insert(note_pos, note); - self.memo_map.insert(note_pos, memo); - // The payment address' diversifier is required to spend - // note - self.div_map.insert(note_pos, *pa.diversifier()); - self.nf_map.insert(nf, note_pos); + // So this current viewing key does decrypt this current + // note... + if let Some((note, pa, memo)) = decres { + // Add this note to list of notes decrypted by this + // viewing key + notes.insert(note_pos); + // Compute the nullifier now to quickly recognize when + // spent + let nf = note.nf( + &vk.nk, + note_pos.try_into().map_err(|_| { + Error::Other( + "Can not get nullifier".to_string(), + ) + })?, + ); + self.note_map.insert(note_pos, note); + self.memo_map.insert(note_pos, memo); + // The payment address' diversifier is required to spend + // note + self.div_map.insert(note_pos, *pa.diversifier()); + self.nf_map.insert(nf, note_pos); + // Note the account changes + let balance = transaction_delta + .entry(*vk) + .or_insert_with(I128Sum::zero); + *balance += I128Sum::from_nonnegative( + note.asset_type, + note.value as i128, + ) + .map_err(|()| { + Error::Other( + "found note with invalid value or asset type" + .to_string(), + ) + })?; + self.vk_map.insert(note_pos, *vk); + } + note_pos += 1; + } + } + } + + // Cancel out those of our notes that have been spent + for tx in shielded { + for ss in + tx.sapling_bundle().map_or(&vec![], |x| &x.shielded_spends) + { + // If the shielded spend's nullifier is in our map, then target + // note is rendered unusable + if let Some(note_pos) = self.nf_map.get(&ss.nullifier) { + self.spents.insert(*note_pos); // Note the account changes let balance = transaction_delta - .entry(*vk) + .entry(self.vk_map[note_pos]) .or_insert_with(I128Sum::zero); - *balance += I128Sum::from_nonnegative( + let note = self.note_map[note_pos]; + + *balance -= I128Sum::from_nonnegative( note.asset_type, note.value as i128, ) @@ -965,37 +985,7 @@ impl ShieldedContext { .to_string(), ) })?; - self.vk_map.insert(note_pos, *vk); } - note_pos += 1; - } - } - - // Cancel out those of our notes that have been spent - for ss in shielded - .sapling_bundle() - .map_or(&vec![], |x| &x.shielded_spends) - { - // If the shielded spend's nullifier is in our map, then target note - // is rendered unusable - if let Some(note_pos) = self.nf_map.get(&ss.nullifier) { - self.spents.insert(*note_pos); - // Note the account changes - let balance = transaction_delta - .entry(self.vk_map[note_pos]) - .or_insert_with(I128Sum::zero); - let note = self.note_map[note_pos]; - - *balance -= I128Sum::from_nonnegative( - note.asset_type, - note.value as i128, - ) - .map_err(|()| { - Error::Other( - "found note with invalid value or asset type" - .to_string(), - ) - })?; } } @@ -1899,7 +1889,7 @@ impl ShieldedContext { // Cache the generated transfer let mut shielded_ctx = context.shielded_mut().await; shielded_ctx - .pre_cache_transaction(context, &masp_tx) + .pre_cache_transaction(context, &[masp_tx.clone()]) .await?; } @@ -1918,7 +1908,7 @@ impl ShieldedContext { async fn pre_cache_transaction( &mut self, context: &impl Namada, - masp_tx: &Transaction, + masp_tx: &[Transaction], ) -> Result<(), Error> { let vks: Vec<_> = context .wallet() @@ -1938,7 +1928,6 @@ impl ShieldedContext { .index .checked_add(1) .expect("Tx index shouldn't overflow"), - inner_tx: TxCommitments::default(), } }); self.sync_status = ContextSyncStatus::Speculative; @@ -2020,7 +2009,7 @@ async fn get_indexed_masp_events_at_height( client: &C, height: BlockHeight, first_idx_to_query: Option, -) -> Result>, Error> { +) -> Result>, Error> { let first_idx_to_query = first_idx_to_query.unwrap_or_default(); Ok(client @@ -2055,45 +2044,6 @@ async fn get_indexed_masp_events_at_height( })) } -// Extract the Transaction hash from a masp over ibc message -async fn extract_payload_from_shielded_action( - tx_data: &[u8], -) -> Result { - let message = namada_ibc::decode_message(tx_data) - .map_err(|e| Error::Other(e.to_string()))?; - - let result = match message { - IbcMessage::Transfer(msg) => msg.transfer.ok_or_else(|| { - Error::Other("Missing masp tx in the ibc message".to_string()) - })?, - IbcMessage::NftTransfer(msg) => msg.transfer.ok_or_else(|| { - Error::Other("Missing masp tx in the ibc message".to_string()) - })?, - IbcMessage::RecvPacket(msg) => msg.transfer.ok_or_else(|| { - Error::Other("Missing masp tx in the ibc message".to_string()) - })?, - IbcMessage::AckPacket(msg) => { - // Refund tokens by the ack message - msg.transfer.ok_or_else(|| { - Error::Other("Missing masp tx in the ibc message".to_string()) - })? - } - IbcMessage::Timeout(msg) => { - // Refund tokens by the timeout message - msg.transfer.ok_or_else(|| { - Error::Other("Missing masp tx in the ibc message".to_string()) - })? - } - IbcMessage::Envelope(_) => { - return Err(Error::Other( - "Unexpected ibc message for masp".to_string(), - )); - } - }; - - Ok(result) -} - mod tests { /// quick and dirty test. will fail on size check #[test] diff --git a/crates/shielded_token/src/utils.rs b/crates/shielded_token/src/utils.rs index 22d783f67f5..3d3240b18db 100644 --- a/crates/shielded_token/src/utils.rs +++ b/crates/shielded_token/src/utils.rs @@ -1,17 +1,11 @@ //! MASP utilities -use std::collections::BTreeSet; - use masp_primitives::merkle_tree::CommitmentTree; use masp_primitives::sapling::Node; use masp_primitives::transaction::Transaction; -use namada_core::storage; use namada_storage::{Error, Result, StorageRead, StorageWrite}; -use crate::storage_key::{ - is_masp_key, is_masp_transfer_key, masp_commitment_tree_key, - masp_nullifier_key, -}; +use crate::storage_key::{masp_commitment_tree_key, masp_nullifier_key}; // Writes the nullifiers of the provided masp transaction to storage fn reveal_nullifiers( diff --git a/crates/tx/src/action.rs b/crates/tx/src/action.rs index 29d0be536a2..c43820c019f 100644 --- a/crates/tx/src/action.rs +++ b/crates/tx/src/action.rs @@ -66,6 +66,7 @@ pub enum PgfAction { /// MASP tx action. #[derive(Clone, Debug, BorshDeserialize, BorshSerialize)] pub struct MaspAction { + /// The hash of the masp [`Section`] pub masp_section_ref: Hash, } @@ -117,7 +118,9 @@ fn storage_key() -> storage::Key { .expect("Cannot obtain a storage key") } -/// Helper function to get the optional masp section reference from the [`Actions`]. If more than one [`MaspAction`] has been found we return the first one +/// Helper function to get the optional masp section reference from the +/// [`Actions`]. If more than one [`MaspAction`] has been found we return the +/// first one pub fn get_masp_section_ref( reader: &T, ) -> Result, ::Err> { diff --git a/crates/tx/src/data/mod.rs b/crates/tx/src/data/mod.rs index e2100ce5096..20c4019fc20 100644 --- a/crates/tx/src/data/mod.rs +++ b/crates/tx/src/data/mod.rs @@ -21,7 +21,7 @@ use namada_core::borsh::{ BorshDeserialize, BorshSchema, BorshSerialize, BorshSerializeExt, }; use namada_core::hash::Hash; -use namada_core::masp::BatchMaspTxRefs; +use namada_core::masp::MaspTxRefs; use namada_core::storage; use namada_events::Event; use namada_gas::{Gas, VpsGas}; @@ -242,8 +242,10 @@ impl<'de, T: Deserialize<'de>> serde::Deserialize<'de> for BatchResults { /// The extended transaction result, containing the references to masp /// sections (if any) pub struct ExtendedTxResult { + /// The transaction result pub tx_result: TxResult, - pub masp_tx_refs: BatchMaspTxRefs, + /// The optional references to masp sections + pub masp_tx_refs: MaspTxRefs, } impl Default for ExtendedTxResult { @@ -300,7 +302,7 @@ impl TxResult { /// Converts this result to [`ExtendedTxResult`] pub fn to_extended_result( self, - masp_tx_refs: Option, + masp_tx_refs: Option, ) -> ExtendedTxResult { ExtendedTxResult { tx_result: self, diff --git a/crates/tx/src/types.rs b/crates/tx/src/types.rs index c783ce6076f..3156b584d5d 100644 --- a/crates/tx/src/types.rs +++ b/crates/tx/src/types.rs @@ -1730,8 +1730,8 @@ impl<'tx> Tx { } } -/// Represents the pointers of an indexed tx, which are the block height, the -/// index inside that block and the commitment inside the tx bundle +/// Represents the pointers to a indexed tx, which are the block height and the +/// index inside that block #[derive( Debug, Clone, @@ -1749,9 +1749,6 @@ pub struct IndexedTx { pub height: BlockHeight, /// The index in the block of the tx pub index: TxIndex, - /// This is a pointer to the inner tx inside the batch - //FIXME: do we really need this? Probably not for the masp - pub inner_tx: TxCommitments, } impl Default for IndexedTx { @@ -1759,7 +1756,6 @@ impl Default for IndexedTx { Self { height: BlockHeight::first(), index: TxIndex(0), - inner_tx: TxCommitments::default(), } } } diff --git a/crates/vp_env/src/lib.rs b/crates/vp_env/src/lib.rs index b72ca703c7d..534db3d0b08 100644 --- a/crates/vp_env/src/lib.rs +++ b/crates/vp_env/src/lib.rs @@ -25,7 +25,6 @@ use namada_core::address::Address; use namada_core::borsh::BorshDeserialize; use namada_core::hash::Hash; use namada_core::storage::{BlockHeight, Epoch, Epochs, Header, Key, TxIndex}; -use namada_core::token::Transfer; use namada_events::{Event, EventType}; use namada_storage::StorageRead; use namada_tx::BatchedTxRef; diff --git a/wasm/tx_ibc/src/lib.rs b/wasm/tx_ibc/src/lib.rs index e8d681ff94a..7d6a020bd55 100644 --- a/wasm/tx_ibc/src/lib.rs +++ b/wasm/tx_ibc/src/lib.rs @@ -16,28 +16,25 @@ fn apply_tx(ctx: &mut Ctx, tx_data: BatchedTx) -> TxResult { // Temp. workaround for let transfer = tx_ibc_execute()?; - if let Some(transfer) = transfer { - if let Some(masp_section_ref) = transfer.shielded { - let shielded = signed - .get_section(&masp_section_ref) - .and_then(|x| x.as_ref().masp_tx()) - .ok_or_err_msg( - "Unable to find required shielded section in tx data", - ) - .map_err(|err| { - ctx.set_commitment_sentinel(); - err - })?; - token::utils::handle_masp_tx( - ctx, - &shielded, - transfer.key.as_deref(), + if let Some(masp_section_ref) = + transfer.and_then(|transfer| transfer.shielded) + { + let shielded = tx_data + .tx + .get_section(&masp_section_ref) + .and_then(|x| x.as_ref().masp_tx()) + .ok_or_err_msg( + "Unable to find required shielded section in tx data", ) + .map_err(|err| { + ctx.set_commitment_sentinel(); + err + })?; + token::utils::handle_masp_tx(ctx, &shielded) .wrap_err("Encountered error while handling MASP transaction")?; - update_masp_note_commitment_tree(&shielded) - .wrap_err("Failed to update the MASP commitment tree")?; - ctx.push_action(Action::Masp(MaspAction { masp_section_ref }))?; - } + update_masp_note_commitment_tree(&shielded) + .wrap_err("Failed to update the MASP commitment tree")?; + ctx.push_action(Action::Masp(MaspAction { masp_section_ref }))?; } Ok(()) diff --git a/wasm/tx_transfer/src/lib.rs b/wasm/tx_transfer/src/lib.rs index 871cb836fc9..62e7adebacc 100644 --- a/wasm/tx_transfer/src/lib.rs +++ b/wasm/tx_transfer/src/lib.rs @@ -22,7 +22,8 @@ fn apply_tx(ctx: &mut Ctx, tx_data: BatchedTx) -> TxResult { .wrap_err("Token transfer failed")?; if let Some(masp_section_ref) = transfer.shielded { - let shielded = signed + let shielded = tx_data + .tx .get_section(&masp_section_ref) .and_then(|x| x.as_ref().masp_tx()) .ok_or_err_msg( @@ -32,7 +33,7 @@ fn apply_tx(ctx: &mut Ctx, tx_data: BatchedTx) -> TxResult { ctx.set_commitment_sentinel(); err })?; - token::utils::handle_masp_tx(ctx, &shielded, transfer.key.as_deref()) + token::utils::handle_masp_tx(ctx, &shielded) .wrap_err("Encountered error while handling MASP transaction")?; update_masp_note_commitment_tree(&shielded) .wrap_err("Failed to update the MASP commitment tree")?;