From 0e120034565c83b306e87b8b86b420f519adeef5 Mon Sep 17 00:00:00 2001 From: satan Date: Thu, 4 Apr 2024 17:39:36 +0200 Subject: [PATCH 01/29] Made fetching and decrypting maps notes operate in parallel. Fetching can recover from network failures --- Cargo.lock | 34 +- crates/apps_lib/src/cli/client.rs | 2 +- crates/apps_lib/src/cli/wallet.rs | 2 +- crates/apps_lib/src/client/masp.rs | 260 ++- crates/node/src/lib.rs | 2 +- crates/sdk/Cargo.toml | 4 + crates/sdk/src/masp/mod.rs | 1452 ++++++++++++ .../sdk/src/{masp.rs => masp/shielded_ctx.rs} | 2024 ++++------------- crates/sdk/src/masp/types.rs | 262 +++ crates/sdk/src/masp/utils.rs | 653 ++++++ crates/sdk/src/queries/mod.rs | 4 +- crates/sdk/src/queries/shell.rs | 2 +- crates/sdk/src/tx.rs | 5 +- crates/tests/src/e2e/ledger_tests.rs | 2 - 14 files changed, 3067 insertions(+), 1641 deletions(-) create mode 100644 crates/sdk/src/masp/mod.rs rename crates/sdk/src/{masp.rs => masp/shielded_ctx.rs} (51%) create mode 100644 crates/sdk/src/masp/types.rs create mode 100644 crates/sdk/src/masp/utils.rs diff --git a/Cargo.lock b/Cargo.lock index 40787e4c1c..1b99f2fad7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -377,7 +377,7 @@ dependencies = [ "pin-project-lite", "rustversion", "serde", - "sync_wrapper", + "sync_wrapper 0.1.2", "tower", "tower-layer", "tower-service", @@ -2656,6 +2656,18 @@ dependencies = [ "paste", ] +[[package]] +name = "flume" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55ac459de2512911e4b674ce33cf20befaba382d05b62b008afc1c8b57cbf181" +dependencies = [ + "futures-core", + "futures-sink", + "nanorand", + "spin 0.9.8", +] + [[package]] name = "fnv" version = "1.0.7" @@ -2808,6 +2820,7 @@ checksum = "45ec6fe3675af967e67c5536c0b9d44e34e6c52f86bedc4ea49c5317b8e94d06" dependencies = [ "futures-channel", "futures-task", + "tokio", ] [[package]] @@ -5243,7 +5256,9 @@ dependencies = [ "ethers", "eyre", "fd-lock", + "flume", "futures", + "futures-locks", "itertools 0.12.1", "jubjub", "lazy_static", @@ -5277,6 +5292,7 @@ dependencies = [ "prost 0.12.3", "rand 0.8.5", "rand_core 0.6.4", + "rayon", "regex", "ripemd", "serde", @@ -5284,6 +5300,7 @@ dependencies = [ "sha2 0.9.9", "slip10_ed25519", "smooth-operator", + "sync_wrapper 1.0.1", "tempfile", "tendermint-rpc", "thiserror", @@ -5559,6 +5576,15 @@ dependencies = [ "sha2 0.9.9", ] +[[package]] +name = "nanorand" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a51313c5820b0b02bd422f4b44776fbf47961755c74ce64afc73bfad10226c3" +dependencies = [ + "getrandom 0.2.15", +] + [[package]] name = "native-tls" version = "0.2.11" @@ -7843,6 +7869,12 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" +[[package]] +name = "sync_wrapper" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7065abeca94b6a8a577f9bd45aa0867a2238b74e8eb67cf10d492bc39351394" + [[package]] name = "sysinfo" version = "0.27.8" diff --git a/crates/apps_lib/src/cli/client.rs b/crates/apps_lib/src/cli/client.rs index 03f69c3f3e..a31e71e0a9 100644 --- a/crates/apps_lib/src/cli/client.rs +++ b/crates/apps_lib/src/cli/client.rs @@ -12,7 +12,7 @@ use crate::cli::cmds::*; use crate::client::{rpc, tx, utils}; impl CliApi { - pub async fn handle_client_command( + pub async fn handle_client_command( client: Option, cmd: cli::NamadaClient, io: IO, diff --git a/crates/apps_lib/src/cli/wallet.rs b/crates/apps_lib/src/cli/wallet.rs index 4eeab319f6..cca0d98066 100644 --- a/crates/apps_lib/src/cli/wallet.rs +++ b/crates/apps_lib/src/cli/wallet.rs @@ -16,7 +16,7 @@ use namada::core::address::{Address, DecodeError}; use namada::core::key::*; use namada::core::masp::{ExtendedSpendingKey, MaspValue, PaymentAddress}; use namada::io::Io; -use namada_sdk::masp::find_valid_diversifier; +use namada_sdk::masp::utils::find_valid_diversifier; use namada_sdk::wallet::{ DecryptionError, DerivationPath, DerivationPathError, FindKeyError, Wallet, }; diff --git a/crates/apps_lib/src/client/masp.rs b/crates/apps_lib/src/client/masp.rs index 983be6d718..ae51df48ef 100644 --- a/crates/apps_lib/src/client/masp.rs +++ b/crates/apps_lib/src/client/masp.rs @@ -1,23 +1,23 @@ use std::fmt::Debug; +use std::sync::{Arc, Mutex}; use color_eyre::owo_colors::OwoColorize; use masp_primitives::sapling::ViewingKey; use masp_primitives::zip32::ExtendedSpendingKey; use namada_sdk::error::Error; use namada_sdk::io::Io; -use namada_sdk::masp::{ - IndexedNoteEntry, ProgressLogger, ProgressType, ShieldedContext, - ShieldedUtils, -}; +use namada_sdk::masp::types::IndexedNoteEntry; +use namada_sdk::masp::utils::{ProgressLogger, ProgressType}; +use namada_sdk::masp::{ShieldedContext, ShieldedUtils}; use namada_sdk::queries::Client; use namada_sdk::storage::BlockHeight; -use namada_sdk::{display, display_line, MaybeSend, MaybeSync}; +use namada_sdk::{display, display_line}; #[allow(clippy::too_many_arguments)] pub async fn syncing< - U: ShieldedUtils + MaybeSend + MaybeSync, + U: ShieldedUtils + Send + Sync, C: Client + Sync, - IO: Io, + IO: Io + Sync + Send, >( mut shielded: ShieldedContext, client: &C, @@ -65,100 +65,232 @@ pub async fn syncing< } } -pub struct CliLogging<'io, T, IO: Io> { - items: Vec, +#[derive(Default, Copy, Clone)] +struct IterProgress { index: usize, length: usize, +} +struct StdoutDrawer<'io, IO: Io> { io: &'io IO, + fetch: IterProgress, + scan: IterProgress, +} + +impl<'io, IO: Io> StdoutDrawer<'io, IO> { + fn draw(&self) { + let (fetch_percent, fetch_completed) = (self.fetch.length > 0) + .then(|| { + let fetch_percent = + (100 * self.fetch.index) / self.fetch.length; + let fetch_completed: String = + vec!['#'; fetch_percent].iter().collect(); + (fetch_percent, fetch_completed) + }) + .unzip(); + let fetch_incomplete = fetch_percent + .as_ref() + .map(|p| vec!['.'; 100 - *p].iter().collect::()); + + let (scan_percent, scan_completed) = (self.scan.length > 0) + .then(|| { + let scan_percent = (100 * self.scan.index) / self.scan.length; + let scan_completed: String = + vec!['#'; scan_percent].iter().collect(); + (scan_percent, scan_completed) + }) + .unzip(); + let scan_incomplete = scan_percent + .as_ref() + .map(|p| vec!['.'; 100 - *p].iter().collect::()); + + match (fetch_percent, scan_percent) { + (Some(fp), Some(sp)) => { + display_line!(self.io, "\x1b[4A\x1b[J"); + display_line!( + self.io, + "Fetched block {:?} of {:?}", + self.fetch.index, + self.fetch.length + ); + display_line!( + self.io, + "[{}{}] ~~ {} %", + fetch_completed.unwrap(), + fetch_incomplete.unwrap(), + fp + ); + display_line!( + self.io, + "Scanned {} of {}", + self.scan.index, + self.scan.length + ); + display!( + self.io, + "[{}{}] ~~ {} %", + scan_completed.unwrap(), + scan_incomplete.unwrap(), + sp + ); + self.io.flush() + } + (Some(fp), None) => { + display_line!(self.io, "\x1b[4A\x1b[J"); + display_line!( + self.io, + "Fetched block {:?} of {:?}", + self.fetch.index, + self.fetch.length + ); + display!( + self.io, + "[{}{}] ~~ {} \n\n%", + fetch_completed.unwrap(), + fetch_incomplete.unwrap(), + fp + ); + self.io.flush() + } + (None, Some(sp)) => { + display_line!(self.io, "\x1b[4A\x1b[J"); + display_line!( + self.io, + "Scanned {} of {}", + self.scan.index, + self.scan.length + ); + display!( + self.io, + "[{}{}] ~~ {} \n\n%", + scan_completed.unwrap(), + scan_incomplete.unwrap(), + sp + ); + self.io.flush() + } + _ => {} + } + } +} + +pub struct CliLogging<'io, T, I, IO> +where + T: Debug, + I: Iterator, + IO: Io, +{ + items: I, + drawer: Arc>>, r#type: ProgressType, } -impl<'io, T: Debug, IO: Io> CliLogging<'io, T, IO> { - fn new(items: I, io: &'io IO, r#type: ProgressType) -> Self - where - I: IntoIterator, - { - let items: Vec<_> = items.into_iter().collect(); +impl<'io, T, I, IO> CliLogging<'io, T, I, IO> +where + T: Debug, + I: Iterator, + IO: Io, +{ + fn new( + items: I, + r#type: ProgressType, + drawer: Arc>>, + ) -> Self { + let (size, _) = items.size_hint(); + { + let mut locked = drawer.lock().unwrap(); + match r#type { + ProgressType::Fetch => { + locked.fetch.length = size; + } + ProgressType::Scan => { + locked.scan.length = size; + } + } + } Self { - length: items.len(), items, - index: 0, - io, + drawer, r#type, } } + + fn advance_index(&self) { + let mut locked = self.drawer.lock().unwrap(); + match self.r#type { + ProgressType::Fetch => { + locked.fetch.index += 1; + } + ProgressType::Scan => { + locked.scan.index += 1; + } + } + } + + fn draw(&self) { + let locked = self.drawer.lock().unwrap(); + locked.draw(); + } } -impl<'io, T: Debug, IO: Io> Iterator for CliLogging<'io, T, IO> { +impl<'io, T, I, IO> Iterator for CliLogging<'io, T, I, IO> +where + T: Debug, + I: Iterator, + IO: Io, +{ type Item = T; fn next(&mut self) -> Option { - if self.index == 0 { - self.items = { - let mut new_items = vec![]; - std::mem::swap(&mut new_items, &mut self.items); - new_items.into_iter().rev().collect() - }; - } - if self.items.is_empty() { - return None; - } - self.index += 1; - let percent = (100 * self.index) / self.length; - let completed: String = vec!['#'; percent].iter().collect(); - let incomplete: String = vec!['.'; 100 - percent].iter().collect(); - display_line!(self.io, "\x1b[2A\x1b[J"); - match self.r#type { - ProgressType::Fetch => display_line!( - self.io, - "Fetched block {:?} of {:?}", - self.items.last().unwrap(), - self.items[0] - ), - ProgressType::Scan => display_line!( - self.io, - "Scanning {} of {}", - self.index, - self.length - ), - } - display!(self.io, "[{}{}] ~~ {} %", completed, incomplete, percent); - self.io.flush(); - self.items.pop() + let next_item = self.items.next()?; + self.advance_index(); + self.draw(); + Some(next_item) } } /// A progress logger for the CLI -#[derive(Debug, Clone)] +#[derive(Clone)] pub struct CliLogger<'io, IO: Io> { - io: &'io IO, + drawer: Arc>>, } impl<'io, IO: Io> CliLogger<'io, IO> { pub fn new(io: &'io IO) -> Self { - Self { io } + Self { + drawer: Arc::new(Mutex::new(StdoutDrawer { + io, + fetch: Default::default(), + scan: Default::default(), + })), + } } } impl<'io, IO: Io> ProgressLogger for CliLogger<'io, IO> { - type Fetch = CliLogging<'io, u64, IO>; - type Scan = CliLogging<'io, IndexedNoteEntry, IO>; - fn io(&self) -> &IO { - self.io + let io = { + let locked = self.drawer.lock().unwrap(); + locked.io + }; + io } - fn fetch(&self, items: I) -> Self::Fetch + fn fetch(&self, items: I) -> impl Iterator where - I: IntoIterator, + I: Iterator, { - CliLogging::new(items, self.io, ProgressType::Fetch) + CliLogging::new(items, ProgressType::Fetch, self.drawer.clone()) } - fn scan(&self, items: I) -> Self::Scan + fn scan(&self, items: I) -> impl Iterator where - I: IntoIterator, + I: Iterator, { - CliLogging::new(items, self.io, ProgressType::Scan) + CliLogging::new(items, ProgressType::Scan, self.drawer.clone()) + } + + fn left_to_fetch(&self) -> usize { + let locked = self.drawer.lock().unwrap().fetch; + locked.length - locked.index } } diff --git a/crates/node/src/lib.rs b/crates/node/src/lib.rs index f210bb8a71..0fe4b45ac7 100644 --- a/crates/node/src/lib.rs +++ b/crates/node/src/lib.rs @@ -371,7 +371,7 @@ async fn run_aux( }; tracing::info!("Loading MASP verifying keys."); - let _ = namada_sdk::masp::preload_verifying_keys(); + let _ = namada_sdk::masp::utils::preload_verifying_keys(); tracing::info!("Done loading MASP verifying keys."); // Start ABCI server and broadcaster (the latter only if we are a validator diff --git a/crates/sdk/Cargo.toml b/crates/sdk/Cargo.toml index c408e74085..66c0977b13 100644 --- a/crates/sdk/Cargo.toml +++ b/crates/sdk/Cargo.toml @@ -102,7 +102,9 @@ ethbridge-bridge-contract.workspace = true ethers.workspace = true eyre.workspace = true fd-lock = { workspace = true, optional = true } +flume = "0.11.0" futures.workspace = true +futures-locks = "0.7.1" itertools.workspace = true jubjub = { workspace = true, optional = true } lazy_static.workspace = true @@ -119,12 +121,14 @@ proptest = { workspace = true, optional = true } prost.workspace = true rand = { workspace = true, optional = true } rand_core = { workspace = true, optional = true } +rayon.workspace = true regex.workspace = true ripemd.workspace = true serde.workspace = true serde_json.workspace = true sha2.workspace = true slip10_ed25519.workspace = true +sync_wrapper = "1.0.0" smooth-operator.workspace = true tendermint-rpc = { workspace = true, optional = true } thiserror.workspace = true diff --git a/crates/sdk/src/masp/mod.rs b/crates/sdk/src/masp/mod.rs new file mode 100644 index 0000000000..bfc7b61c99 --- /dev/null +++ b/crates/sdk/src/masp/mod.rs @@ -0,0 +1,1452 @@ +//! MASP verification wrappers. + +pub mod shielded_ctx; +pub mod types; +pub mod utils; + +use std::collections::HashMap; +use std::env; +use std::fmt::Debug; +use std::ops::Deref; +use std::path::PathBuf; + +use borsh::{BorshDeserialize, BorshSerialize}; +use lazy_static::lazy_static; +use masp_primitives::asset_type::AssetType; +#[cfg(feature = "mainnet")] +use masp_primitives::consensus::MainNetwork; +#[cfg(not(feature = "mainnet"))] +use masp_primitives::consensus::TestNetwork; +use masp_primitives::convert::AllowedConversion; +use masp_primitives::ff::PrimeField; +use masp_primitives::group::GroupEncoding; +use masp_primitives::memo::MemoBytes; +use masp_primitives::merkle_tree::MerklePath; +use masp_primitives::sapling::note_encryption::*; +use masp_primitives::sapling::redjubjub::PublicKey; +use masp_primitives::sapling::{Diversifier, Node, Note}; +use masp_primitives::transaction::components::transparent::builder::TransparentBuilder; +use masp_primitives::transaction::components::{ + ConvertDescription, I128Sum, OutputDescription, SpendDescription, TxOut, + U64Sum, +}; +use masp_primitives::transaction::fees::fixed::FeeRule; +use masp_primitives::transaction::sighash::{signature_hash, SignableInput}; +use masp_primitives::transaction::txid::TxIdDigester; +use masp_primitives::transaction::{ + Authorization, Authorized, Transaction, TransactionData, TransparentAddress, +}; +use masp_primitives::zip32::{ExtendedFullViewingKey, ExtendedSpendingKey}; +use masp_proofs::bellman::groth16::PreparedVerifyingKey; +use masp_proofs::bls12_381::Bls12; +use masp_proofs::prover::LocalTxProver; +#[cfg(not(feature = "testing"))] +use masp_proofs::sapling::SaplingVerificationContext; +pub use namada_core::masp::{ + encode_asset_type, AssetData, BalanceOwner, ExtendedViewingKey, + PaymentAddress, TransferSource, TransferTarget, +}; +use namada_token::MaspDigitPos; +pub use shielded_ctx::ShieldedContext; +pub use utils::ShieldedUtils; + +use crate::masp::types::{PVKs, PartialAuthorized}; +use crate::masp::utils::{get_params_dir, load_pvks}; +use crate::{MaybeSend, MaybeSync}; + +/// Env var to point to a dir with MASP parameters. When not specified, +/// the default OS specific path is used. +pub const ENV_VAR_MASP_PARAMS_DIR: &str = "NAMADA_MASP_PARAMS_DIR"; + +/// Randomness seed for MASP integration tests to build proofs with +/// deterministic rng. +pub const ENV_VAR_MASP_TEST_SEED: &str = "NAMADA_MASP_TEST_SEED"; + +/// The network to use for MASP +#[cfg(feature = "mainnet")] +const NETWORK: MainNetwork = MainNetwork; +#[cfg(not(feature = "mainnet"))] +const NETWORK: TestNetwork = TestNetwork; + +// TODO these could be exported from masp_proof crate +/// Spend circuit name +pub const SPEND_NAME: &str = "masp-spend.params"; +/// Output circuit name +pub const OUTPUT_NAME: &str = "masp-output.params"; +/// Convert circuit name +pub const CONVERT_NAME: &str = "masp-convert.params"; + +lazy_static! { + /// MASP verifying keys load from parameters + static ref VERIFIYING_KEYS: PVKs = + { + let params_dir = get_params_dir(); + let [spend_path, convert_path, output_path] = + [SPEND_NAME, CONVERT_NAME, OUTPUT_NAME].map(|p| params_dir.join(p)); + + #[cfg(feature = "download-params")] + if !spend_path.exists() || !convert_path.exists() || !output_path.exists() { + let paths = masp_proofs::download_masp_parameters(None).expect( + "MASP parameters were not present, expected the download to \ + succeed", + ); + if paths.spend != spend_path + || paths.convert != convert_path + || paths.output != output_path + { + panic!( + "unrecoverable: downloaded missing masp params, but to an \ + unfamiliar path" + ) + } + } + // size and blake2b checked here + let params = masp_proofs::load_parameters( + spend_path.as_path(), + output_path.as_path(), + convert_path.as_path(), + ); + PVKs { + spend_vk: params.spend_vk, + convert_vk: params.convert_vk, + output_vk: params.output_vk + } + }; +} + +/// check_spend wrapper +pub fn check_spend( + spend: &SpendDescription<::SaplingAuth>, + sighash: &[u8; 32], + #[cfg(not(feature = "testing"))] ctx: &mut SaplingVerificationContext, + #[cfg(feature = "testing")] + ctx: &mut testing::MockSaplingVerificationContext, + parameters: &PreparedVerifyingKey, +) -> bool { + let zkproof = + masp_proofs::bellman::groth16::Proof::read(spend.zkproof.as_slice()); + let zkproof = match zkproof { + Ok(zkproof) => zkproof, + _ => return false, + }; + + ctx.check_spend( + spend.cv, + spend.anchor, + &spend.nullifier.0, + PublicKey(spend.rk.0), + sighash, + spend.spend_auth_sig, + zkproof, + parameters, + ) +} + +/// check_output wrapper +pub fn check_output( + output: &OutputDescription<<::SaplingAuth as masp_primitives::transaction::components::sapling::Authorization>::Proof>, + #[cfg(not(feature = "testing"))] ctx: &mut SaplingVerificationContext, + #[cfg(feature = "testing")] + ctx: &mut testing::MockSaplingVerificationContext, + parameters: &PreparedVerifyingKey, +) -> bool { + let zkproof = + masp_proofs::bellman::groth16::Proof::read(output.zkproof.as_slice()); + let zkproof = match zkproof { + Ok(zkproof) => zkproof, + _ => return false, + }; + let epk = + masp_proofs::jubjub::ExtendedPoint::from_bytes(&output.ephemeral_key.0); + let epk = match epk.into() { + Some(p) => p, + None => return false, + }; + + ctx.check_output(output.cv, output.cmu, epk, zkproof, parameters) +} + +/// check convert wrapper +pub fn check_convert( + convert: &ConvertDescription<<::SaplingAuth as masp_primitives::transaction::components::sapling::Authorization>::Proof>, + #[cfg(not(feature = "testing"))] ctx: &mut SaplingVerificationContext, + #[cfg(feature = "testing")] + ctx: &mut testing::MockSaplingVerificationContext, + parameters: &PreparedVerifyingKey, +) -> bool { + let zkproof = + masp_proofs::bellman::groth16::Proof::read(convert.zkproof.as_slice()); + let zkproof = match zkproof { + Ok(zkproof) => zkproof, + _ => return false, + }; + + ctx.check_convert(convert.cv, convert.anchor, zkproof, parameters) +} + +/// Partially deauthorize the transparent bundle +pub fn partial_deauthorize( + tx_data: &TransactionData, +) -> Option> { + let transp = tx_data.transparent_bundle().and_then(|x| { + let mut tb = TransparentBuilder::empty(); + for vin in &x.vin { + tb.add_input(TxOut { + asset_type: vin.asset_type, + value: vin.value, + address: vin.address, + }) + .ok()?; + } + for vout in &x.vout { + tb.add_output(&vout.address, vout.asset_type, vout.value) + .ok()?; + } + tb.build() + }); + if tx_data.transparent_bundle().is_some() != transp.is_some() { + return None; + } + Some(TransactionData::from_parts( + tx_data.version(), + tx_data.consensus_branch_id(), + tx_data.lock_time(), + tx_data.expiry_height(), + transp, + tx_data.sapling_bundle().cloned(), + )) +} + +/// Verify a shielded transaction. +pub fn verify_shielded_tx( + transaction: &Transaction, + mut consume_verify_gas: F, +) -> Result<(), StorageError> + where + F: FnMut(u64) -> std::result::Result<(), StorageError>, +{ + tracing::info!("entered verify_shielded_tx()"); + + let sapling_bundle = if let Some(bundle) = transaction.sapling_bundle() { + bundle + } else { + return Err(StorageError::SimpleMessage("no sapling bundle")); + }; + let tx_data = transaction.deref(); + + // Partially deauthorize the transparent bundle + let unauth_tx_data = match partial_deauthorize(tx_data) { + Some(tx_data) => tx_data, + None => { + return Err(StorageError::SimpleMessage( + "Failed to partially de-authorize", + )); + } + }; + + let txid_parts = unauth_tx_data.digest(TxIdDigester); + // the commitment being signed is shared across all Sapling inputs; once + // V4 transactions are deprecated this should just be the txid, but + // for now we need to continue to compute it here. + let sighash = + signature_hash(&unauth_tx_data, &SignableInput::Shielded, &txid_parts); + + tracing::info!("sighash computed"); + + let PVKs { + spend_vk, + convert_vk, + output_vk, + } = load_pvks(); + + #[cfg(not(feature = "testing"))] + let mut ctx = SaplingVerificationContext::new(true); + #[cfg(feature = "testing")] + let mut ctx = testing::MockSaplingVerificationContext::new(true); + for spend in &sapling_bundle.shielded_spends { + consume_verify_gas(namada_gas::MASP_VERIFY_SPEND_GAS)?; + if !check_spend(spend, sighash.as_ref(), &mut ctx, spend_vk) { + return Err(StorageError::SimpleMessage("Invalid shielded spend")); + } + } + for convert in &sapling_bundle.shielded_converts { + consume_verify_gas(namada_gas::MASP_VERIFY_CONVERT_GAS)?; + if !check_convert(convert, &mut ctx, convert_vk) { + return Err(StorageError::SimpleMessage( + "Invalid shielded conversion", + )); + } + } + for output in &sapling_bundle.shielded_outputs { + consume_verify_gas(namada_gas::MASP_VERIFY_OUTPUT_GAS)?; + if !check_output(output, &mut ctx, output_vk) { + return Err(StorageError::SimpleMessage("Invalid shielded output")); + } + } + + tracing::info!("passed spend/output verification"); + + let assets_and_values: I128Sum = sapling_bundle.value_balance.clone(); + + tracing::info!( + "accumulated {} assets/values", + assets_and_values.components().len() + ); + + consume_verify_gas(namada_gas::MASP_VERIFY_FINAL_GAS)?; + let result = ctx.final_check( + assets_and_values, + sighash.as_ref(), + sapling_bundle.authorization.binding_sig, + ); + tracing::info!("final check result {result}"); + if !result { + return Err(StorageError::SimpleMessage("MASP final check failed")); + } + Ok(()) +} + +mod tests { + /// quick and dirty test. will fail on size check + #[test] + #[should_panic(expected = "parameter file size is not correct")] + fn test_wrong_masp_params() { + use std::io::Write; + + use super::{CONVERT_NAME, OUTPUT_NAME, SPEND_NAME}; + + let tempdir = tempfile::tempdir() + .expect("expected a temp dir") + .into_path(); + let fake_params_paths = + [SPEND_NAME, OUTPUT_NAME, CONVERT_NAME].map(|p| tempdir.join(p)); + for path in &fake_params_paths { + let mut f = + std::fs::File::create(path).expect("expected a temp file"); + f.write_all(b"fake params") + .expect("expected a writable temp file"); + f.sync_all() + .expect("expected a writable temp file (on sync)"); + } + + std::env::set_var(super::ENV_VAR_MASP_PARAMS_DIR, tempdir.as_os_str()); + // should panic here + masp_proofs::load_parameters( + &fake_params_paths[0], + &fake_params_paths[1], + &fake_params_paths[2], + ); + } + + /// a more involved test, using dummy parameters with the right + /// size but the wrong hash. + #[test] + #[should_panic(expected = "parameter file is not correct")] + fn test_wrong_masp_params_hash() { + use masp_primitives::ff::PrimeField; + use masp_proofs::bellman::groth16::{ + generate_random_parameters, Parameters, + }; + use masp_proofs::bellman::{Circuit, ConstraintSystem, SynthesisError}; + use masp_proofs::bls12_381::{Bls12, Scalar}; + + use super::{CONVERT_NAME, OUTPUT_NAME, SPEND_NAME}; + + struct FakeCircuit { + x: E, + } + + impl Circuit for FakeCircuit { + fn synthesize>( + self, + cs: &mut CS, + ) -> Result<(), SynthesisError> { + let x = cs.alloc(|| "x", || Ok(self.x)).unwrap(); + cs.enforce( + || { + "this is an extra long constraint name so that rustfmt \ + is ok with wrapping the params of enforce()" + }, + |lc| lc + x, + |lc| lc + x, + |lc| lc + x, + ); + Ok(()) + } + } + + let dummy_circuit = FakeCircuit { x: Scalar::zero() }; + let mut rng = rand::thread_rng(); + let fake_params: Parameters = + generate_random_parameters(dummy_circuit, &mut rng) + .expect("expected to generate fake params"); + + let tempdir = tempfile::tempdir() + .expect("expected a temp dir") + .into_path(); + // TODO: get masp to export these consts + let fake_params_paths = [ + (SPEND_NAME, 49848572u64), + (OUTPUT_NAME, 16398620u64), + (CONVERT_NAME, 22570940u64), + ] + .map(|(p, s)| (tempdir.join(p), s)); + for (path, size) in &fake_params_paths { + let mut f = + std::fs::File::create(path).expect("expected a temp file"); + fake_params + .write(&mut f) + .expect("expected a writable temp file"); + // the dummy circuit has one constraint, and therefore its + // params should always be smaller than the large masp + // circuit params. so this truncate extends the file, and + // extra bytes at the end do not make it invalid. + f.set_len(*size) + .expect("expected to truncate the temp file"); + f.sync_all() + .expect("expected a writable temp file (on sync)"); + } + + std::env::set_var(super::ENV_VAR_MASP_PARAMS_DIR, tempdir.as_os_str()); + // should panic here + masp_proofs::load_parameters( + &fake_params_paths[0].0, + &fake_params_paths[1].0, + &fake_params_paths[2].0, + ); + } +} + +#[cfg(any(test, feature = "testing"))] +/// Tests and strategies for transactions +pub mod testing { + use std::ops::AddAssign; + use std::sync::Mutex; + + use bls12_381::{G1Affine, G2Affine}; + use masp_primitives::consensus::testing::arb_height; + use masp_primitives::constants::SPENDING_KEY_GENERATOR; + use masp_primitives::ff::Field; + use masp_primitives::sapling::prover::TxProver; + use masp_primitives::sapling::redjubjub::Signature; + use masp_primitives::sapling::{ProofGenerationKey, Rseed}; + use masp_primitives::transaction::builder::Builder; + use masp_primitives::transaction::components::GROTH_PROOF_SIZE; + use masp_proofs::bellman::groth16::Proof; + use proptest::prelude::*; + use proptest::sample::SizeRange; + use proptest::test_runner::TestRng; + use proptest::{collection, option, prop_compose}; + use rand_core::CryptoRng; + + use super::*; + use crate::address::testing::arb_address; + use crate::masp::types::{ShieldedTransfer, WalletMap}; + use crate::masp::utils::find_valid_diversifier; + use crate::masp_primitives::consensus::BranchId; + use crate::masp_primitives::constants::VALUE_COMMITMENT_RANDOMNESS_GENERATOR; + use crate::masp_primitives::merkle_tree::FrozenCommitmentTree; + use crate::masp_primitives::sapling::keys::OutgoingViewingKey; + use crate::masp_primitives::sapling::redjubjub::PrivateKey; + use crate::masp_primitives::transaction::components::transparent::testing::arb_transparent_address; + use crate::masp_proofs::sapling::SaplingVerificationContextInner; + use crate::storage::testing::arb_epoch; + use crate::token::testing::arb_denomination; + + /// A context object for verifying the Sapling components of a single Zcash + /// transaction. Same as SaplingVerificationContext, but always assumes the + /// proofs to be valid. + pub struct MockSaplingVerificationContext { + inner: SaplingVerificationContextInner, + zip216_enabled: bool, + } + + impl MockSaplingVerificationContext { + /// Construct a new context to be used with a single transaction. + pub fn new(zip216_enabled: bool) -> Self { + MockSaplingVerificationContext { + inner: SaplingVerificationContextInner::new(), + zip216_enabled, + } + } + + /// Perform consensus checks on a Sapling SpendDescription, while + /// accumulating its value commitment inside the context for later use. + #[allow(clippy::too_many_arguments)] + pub fn check_spend( + &mut self, + cv: jubjub::ExtendedPoint, + anchor: bls12_381::Scalar, + nullifier: &[u8; 32], + rk: PublicKey, + sighash_value: &[u8; 32], + spend_auth_sig: Signature, + zkproof: Proof, + _verifying_key: &PreparedVerifyingKey, + ) -> bool { + let zip216_enabled = true; + self.inner.check_spend( + cv, + anchor, + nullifier, + rk, + sighash_value, + spend_auth_sig, + zkproof, + &mut (), + |_, rk, msg, spend_auth_sig| { + rk.verify_with_zip216( + &msg, + &spend_auth_sig, + SPENDING_KEY_GENERATOR, + zip216_enabled, + ) + }, + |_, _proof, _public_inputs| true, + ) + } + + /// Perform consensus checks on a Sapling SpendDescription, while + /// accumulating its value commitment inside the context for later use. + #[allow(clippy::too_many_arguments)] + pub fn check_convert( + &mut self, + cv: jubjub::ExtendedPoint, + anchor: bls12_381::Scalar, + zkproof: Proof, + _verifying_key: &PreparedVerifyingKey, + ) -> bool { + self.inner.check_convert( + cv, + anchor, + zkproof, + &mut (), + |_, _proof, _public_inputs| true, + ) + } + + /// Perform consensus checks on a Sapling OutputDescription, while + /// accumulating its value commitment inside the context for later use. + pub fn check_output( + &mut self, + cv: jubjub::ExtendedPoint, + cmu: bls12_381::Scalar, + epk: jubjub::ExtendedPoint, + zkproof: Proof, + _verifying_key: &PreparedVerifyingKey, + ) -> bool { + self.inner.check_output( + cv, + cmu, + epk, + zkproof, + |_proof, _public_inputs| true, + ) + } + + /// Perform consensus checks on the valueBalance and bindingSig parts of + /// a Sapling transaction. All SpendDescriptions and + /// OutputDescriptions must have been checked before calling + /// this function. + pub fn final_check( + &self, + value_balance: I128Sum, + sighash_value: &[u8; 32], + binding_sig: Signature, + ) -> bool { + self.inner.final_check( + value_balance, + sighash_value, + binding_sig, + |bvk, msg, binding_sig| { + bvk.verify_with_zip216( + &msg, + &binding_sig, + VALUE_COMMITMENT_RANDOMNESS_GENERATOR, + self.zip216_enabled, + ) + }, + ) + } + } + + // This function computes `value` in the exponent of the value commitment + // base + fn masp_compute_value_balance( + asset_type: AssetType, + value: i128, + ) -> Option { + // Compute the absolute value (failing if -i128::MAX is + // the value) + let abs = match value.checked_abs() { + Some(a) => a as u128, + None => return None, + }; + + // Is it negative? We'll have to negate later if so. + let is_negative = value.is_negative(); + + // Compute it in the exponent + let mut abs_bytes = [0u8; 32]; + abs_bytes[0..16].copy_from_slice(&abs.to_le_bytes()); + let mut value_balance = asset_type.value_commitment_generator() + * jubjub::Fr::from_bytes(&abs_bytes).unwrap(); + + // Negate if necessary + if is_negative { + value_balance = -value_balance; + } + + // Convert to unknown order point + Some(value_balance.into()) + } + + // A context object for creating the Sapling components of a Zcash + // transaction. + pub struct SaplingProvingContext { + bsk: jubjub::Fr, + // (sum of the Spend value commitments) - (sum of the Output value + // commitments) + cv_sum: jubjub::ExtendedPoint, + } + + // An implementation of TxProver that does everything except generating + // valid zero-knowledge proofs. Uses the supplied source of randomness to + // carry out its operations. + pub struct MockTxProver(pub Mutex); + + impl TxProver for MockTxProver { + type SaplingProvingContext = SaplingProvingContext; + + fn new_sapling_proving_context(&self) -> Self::SaplingProvingContext { + SaplingProvingContext { + bsk: jubjub::Fr::zero(), + cv_sum: jubjub::ExtendedPoint::identity(), + } + } + + fn spend_proof( + &self, + ctx: &mut Self::SaplingProvingContext, + proof_generation_key: ProofGenerationKey, + _diversifier: Diversifier, + _rseed: Rseed, + ar: jubjub::Fr, + asset_type: AssetType, + value: u64, + _anchor: bls12_381::Scalar, + _merkle_path: MerklePath, + ) -> Result< + ([u8; GROTH_PROOF_SIZE], jubjub::ExtendedPoint, PublicKey), + (), + > { + // Initialize secure RNG + let mut rng = self.0.lock().unwrap(); + + // We create the randomness of the value commitment + let rcv = jubjub::Fr::random(&mut *rng); + + // Accumulate the value commitment randomness in the context + { + let mut tmp = rcv; + tmp.add_assign(&ctx.bsk); + + // Update the context + ctx.bsk = tmp; + } + + // Construct the value commitment + let value_commitment = asset_type.value_commitment(value, rcv); + + // This is the result of the re-randomization, we compute it for the + // caller + let rk = PublicKey(proof_generation_key.ak.into()) + .randomize(ar, SPENDING_KEY_GENERATOR); + + // Compute value commitment + let value_commitment: jubjub::ExtendedPoint = + value_commitment.commitment().into(); + + // Accumulate the value commitment in the context + ctx.cv_sum += value_commitment; + + let mut zkproof = [0u8; GROTH_PROOF_SIZE]; + let proof = Proof:: { + a: G1Affine::generator(), + b: G2Affine::generator(), + c: G1Affine::generator(), + }; + proof + .write(&mut zkproof[..]) + .expect("should be able to serialize a proof"); + Ok((zkproof, value_commitment, rk)) + } + + fn output_proof( + &self, + ctx: &mut Self::SaplingProvingContext, + _esk: jubjub::Fr, + _payment_address: masp_primitives::sapling::PaymentAddress, + _rcm: jubjub::Fr, + asset_type: AssetType, + value: u64, + ) -> ([u8; GROTH_PROOF_SIZE], jubjub::ExtendedPoint) { + // Initialize secure RNG + let mut rng = self.0.lock().unwrap(); + + // We construct ephemeral randomness for the value commitment. This + // randomness is not given back to the caller, but the synthetic + // blinding factor `bsk` is accumulated in the context. + let rcv = jubjub::Fr::random(&mut *rng); + + // Accumulate the value commitment randomness in the context + { + let mut tmp = rcv.neg(); // Outputs subtract from the total. + tmp.add_assign(&ctx.bsk); + + // Update the context + ctx.bsk = tmp; + } + + // Construct the value commitment for the proof instance + let value_commitment = asset_type.value_commitment(value, rcv); + + // Compute the actual value commitment + let value_commitment_point: jubjub::ExtendedPoint = + value_commitment.commitment().into(); + + // Accumulate the value commitment in the context. We do this to + // check internal consistency. + ctx.cv_sum -= value_commitment_point; // Outputs subtract from the total. + + let mut zkproof = [0u8; GROTH_PROOF_SIZE]; + let proof = Proof:: { + a: G1Affine::generator(), + b: G2Affine::generator(), + c: G1Affine::generator(), + }; + proof + .write(&mut zkproof[..]) + .expect("should be able to serialize a proof"); + + (zkproof, value_commitment_point) + } + + fn convert_proof( + &self, + ctx: &mut Self::SaplingProvingContext, + allowed_conversion: AllowedConversion, + value: u64, + _anchor: bls12_381::Scalar, + _merkle_path: MerklePath, + ) -> Result<([u8; GROTH_PROOF_SIZE], jubjub::ExtendedPoint), ()> + { + // Initialize secure RNG + let mut rng = self.0.lock().unwrap(); + + // We create the randomness of the value commitment + let rcv = jubjub::Fr::random(&mut *rng); + + // Accumulate the value commitment randomness in the context + { + let mut tmp = rcv; + tmp.add_assign(&ctx.bsk); + + // Update the context + ctx.bsk = tmp; + } + + // Construct the value commitment + let value_commitment = + allowed_conversion.value_commitment(value, rcv); + + // Compute value commitment + let value_commitment: jubjub::ExtendedPoint = + value_commitment.commitment().into(); + + // Accumulate the value commitment in the context + ctx.cv_sum += value_commitment; + + let mut zkproof = [0u8; GROTH_PROOF_SIZE]; + let proof = Proof:: { + a: G1Affine::generator(), + b: G2Affine::generator(), + c: G1Affine::generator(), + }; + proof + .write(&mut zkproof[..]) + .expect("should be able to serialize a proof"); + + Ok((zkproof, value_commitment)) + } + + fn binding_sig( + &self, + ctx: &mut Self::SaplingProvingContext, + assets_and_values: &I128Sum, + sighash: &[u8; 32], + ) -> Result { + // Initialize secure RNG + let mut rng = self.0.lock().unwrap(); + + // Grab the current `bsk` from the context + let bsk = PrivateKey(ctx.bsk); + + // Grab the `bvk` using DerivePublic. + let bvk = PublicKey::from_private( + &bsk, + VALUE_COMMITMENT_RANDOMNESS_GENERATOR, + ); + + // In order to check internal consistency, let's use the accumulated + // value commitments (as the verifier would) and apply + // value_balance to compare against our derived bvk. + { + let final_bvk = assets_and_values + .components() + .map(|(asset_type, value_balance)| { + // Compute value balance for each asset + // Error for bad value balances (-INT128_MAX value) + masp_compute_value_balance(*asset_type, *value_balance) + }) + .try_fold(ctx.cv_sum, |tmp, value_balance| { + // Compute cv_sum minus sum of all value balances + Result::<_, ()>::Ok(tmp - value_balance.ok_or(())?) + })?; + + // The result should be the same, unless the provided + // valueBalance is wrong. + if bvk.0 != final_bvk { + return Err(()); + } + } + + // Construct signature message + let mut data_to_be_signed = [0u8; 64]; + data_to_be_signed[0..32].copy_from_slice(&bvk.0.to_bytes()); + data_to_be_signed[32..64].copy_from_slice(&sighash[..]); + + // Sign + Ok(bsk.sign( + &data_to_be_signed, + &mut *rng, + VALUE_COMMITMENT_RANDOMNESS_GENERATOR, + )) + } + } + + #[derive(Debug, Clone)] + // Adapts a CSPRNG from a PRNG for proptesting + pub struct TestCsprng(R); + + impl CryptoRng for TestCsprng {} + + impl RngCore for TestCsprng { + fn next_u32(&mut self) -> u32 { + self.0.next_u32() + } + + fn next_u64(&mut self) -> u64 { + self.0.next_u64() + } + + fn fill_bytes(&mut self, dest: &mut [u8]) { + self.0.fill_bytes(dest) + } + + fn try_fill_bytes( + &mut self, + dest: &mut [u8], + ) -> Result<(), rand::Error> { + self.0.try_fill_bytes(dest) + } + } + + prop_compose! { + // Expose a random number generator + pub fn arb_rng()(rng in Just(()).prop_perturb(|(), rng| rng)) -> TestRng { + rng + } + } + + prop_compose! { + // Generate an arbitrary output description with the given value + pub fn arb_output_description( + asset_type: AssetType, + value: u64, + )( + mut rng in arb_rng().prop_map(TestCsprng), + ) -> (Option, masp_primitives::sapling::PaymentAddress, AssetType, u64, MemoBytes) { + let mut spending_key_seed = [0; 32]; + rng.fill_bytes(&mut spending_key_seed); + let spending_key = masp_primitives::zip32::ExtendedSpendingKey::master(spending_key_seed.as_ref()); + + let viewing_key = ExtendedFullViewingKey::from(&spending_key).fvk.vk; + let (div, _g_d) = find_valid_diversifier(&mut rng); + let payment_addr = viewing_key + .to_payment_address(div) + .expect("a PaymentAddress"); + + (None, payment_addr, asset_type, value, MemoBytes::empty()) + } + } + + prop_compose! { + // Generate an arbitrary spend description with the given value + pub fn arb_spend_description( + asset_type: AssetType, + value: u64, + )( + address in arb_transparent_address(), + expiration_height in arb_height(BranchId::MASP, &TestNetwork), + mut rng in arb_rng().prop_map(TestCsprng), + prover_rng in arb_rng().prop_map(TestCsprng), + ) -> (ExtendedSpendingKey, Diversifier, Note, Node) { + let mut spending_key_seed = [0; 32]; + rng.fill_bytes(&mut spending_key_seed); + let spending_key = masp_primitives::zip32::ExtendedSpendingKey::master(spending_key_seed.as_ref()); + + let viewing_key = ExtendedFullViewingKey::from(&spending_key).fvk.vk; + let (div, _g_d) = find_valid_diversifier(&mut rng); + let payment_addr = viewing_key + .to_payment_address(div) + .expect("a PaymentAddress"); + + let mut builder = Builder::::new_with_rng( + NETWORK, + // NOTE: this is going to add 20 more blocks to the actual + // expiration but there's no other exposed function that we could + // use from the masp crate to specify the expiration better + expiration_height.unwrap(), + rng, + ); + // Add a transparent input to support our desired shielded output + builder.add_transparent_input(TxOut { asset_type, value, address }).unwrap(); + // Finally add the shielded output that we need + builder.add_sapling_output(None, payment_addr, asset_type, value, MemoBytes::empty()).unwrap(); + // Build a transaction in order to get its shielded outputs + let (transaction, metadata) = builder.build( + &MockTxProver(Mutex::new(prover_rng)), + &FeeRule::non_standard(U64Sum::zero()), + ).unwrap(); + // Extract the shielded output from the transaction + let shielded_output = &transaction + .sapling_bundle() + .unwrap() + .shielded_outputs[metadata.output_index(0).unwrap()]; + + // Let's now decrypt the constructed notes + let (note, pa, _memo) = try_sapling_note_decryption::<_, OutputDescription<<::SaplingAuth as masp_primitives::transaction::components::sapling::Authorization>::Proof>>( + &NETWORK, + 1.into(), + &PreparedIncomingViewingKey::new(&viewing_key.ivk()), + shielded_output, + ).unwrap(); + assert_eq!(payment_addr, pa); + // Make a path to out new note + let node = Node::new(shielded_output.cmu.to_repr()); + (spending_key, div, note, node) + } + } + + prop_compose! { + // Generate an arbitrary MASP denomination + pub fn arb_masp_digit_pos()(denom in 0..4u8) -> MaspDigitPos { + MaspDigitPos::from(denom) + } + } + + // Maximum value for a note partition + const MAX_MONEY: u64 = 100; + // Maximum number of partitions for a note + const MAX_SPLITS: usize = 3; + + prop_compose! { + // Arbitrarily partition the given vector of integers into sets and sum + // them + pub fn arb_partition(values: Vec)(buckets in ((!values.is_empty()) as usize)..=values.len())( + values in Just(values.clone()), + assigns in collection::vec(0..buckets, values.len()), + buckets in Just(buckets), + ) -> Vec { + let mut buckets = vec![0; buckets]; + for (bucket, value) in assigns.iter().zip(values) { + buckets[*bucket] += value; + } + buckets + } + } + + prop_compose! { + // Generate arbitrary spend descriptions with the given asset type + // partitioning the given values + pub fn arb_spend_descriptions( + asset: AssetData, + values: Vec, + )(partition in arb_partition(values))( + spend_description in partition + .iter() + .map(|value| arb_spend_description( + encode_asset_type( + asset.token.clone(), + asset.denom, + asset.position, + asset.epoch, + ).unwrap(), + *value, + )).collect::>() + ) -> Vec<(ExtendedSpendingKey, Diversifier, Note, Node)> { + spend_description + } + } + + prop_compose! { + // Generate arbitrary output descriptions with the given asset type + // partitioning the given values + pub fn arb_output_descriptions( + asset: AssetData, + values: Vec, + )(partition in arb_partition(values))( + output_description in partition + .iter() + .map(|value| arb_output_description( + encode_asset_type( + asset.token.clone(), + asset.denom, + asset.position, + asset.epoch, + ).unwrap(), + *value, + )).collect::>() + ) -> Vec<(Option, masp_primitives::sapling::PaymentAddress, AssetType, u64, MemoBytes)> { + output_description + } + } + + prop_compose! { + // Generate arbitrary spend descriptions with the given asset type + // partitioning the given values + pub fn arb_txouts( + asset: AssetData, + values: Vec, + address: TransparentAddress, + )( + partition in arb_partition(values), + ) -> Vec { + partition + .iter() + .map(|value| TxOut { + asset_type: encode_asset_type( + asset.token.clone(), + asset.denom, + asset.position, + asset.epoch, + ).unwrap(), + value: *value, + address, + }).collect::>() + } + } + + prop_compose! { + // Generate an arbitrary shielded MASP transaction builder + pub fn arb_shielded_builder(asset_range: impl Into)( + assets in collection::hash_map( + arb_pre_asset_type(), + collection::vec(..MAX_MONEY, ..MAX_SPLITS), + asset_range, + ), + )( + expiration_height in arb_height(BranchId::MASP, &TestNetwork), + rng in arb_rng().prop_map(TestCsprng), + spend_descriptions in assets + .iter() + .map(|(asset, values)| arb_spend_descriptions(asset.clone(), values.clone())) + .collect::>(), + output_descriptions in assets + .iter() + .map(|(asset, values)| arb_output_descriptions(asset.clone(), values.clone())) + .collect::>(), + assets in Just(assets), + ) -> ( + Builder::>, + HashMap, + ) { + let mut builder = Builder::::new_with_rng( + NETWORK, + // NOTE: this is going to add 20 more blocks to the actual + // expiration but there's no other exposed function that we could + // use from the masp crate to specify the expiration better + expiration_height.unwrap(), + rng, + ); + let mut leaves = Vec::new(); + // First construct a Merkle tree containing all notes to be used + for (_esk, _div, _note, node) in spend_descriptions.iter().flatten() { + leaves.push(*node); + } + let tree = FrozenCommitmentTree::new(&leaves); + // Then use the notes knowing that they all have the same anchor + for (idx, (esk, div, note, _node)) in spend_descriptions.iter().flatten().enumerate() { + builder.add_sapling_spend(*esk, *div, *note, tree.path(idx)).unwrap(); + } + for (ovk, payment_addr, asset_type, value, memo) in output_descriptions.into_iter().flatten() { + builder.add_sapling_output(ovk, payment_addr, asset_type, value, memo).unwrap(); + } + (builder, assets.into_iter().map(|(k, v)| (k, v.iter().sum())).collect()) + } + } + + prop_compose! { + // Generate an arbitrary pre-asset type + pub fn arb_pre_asset_type()( + token in arb_address(), + denom in arb_denomination(), + position in arb_masp_digit_pos(), + epoch in option::of(arb_epoch()), + ) -> AssetData { + AssetData { + token, + denom, + position, + epoch, + } + } + } + + prop_compose! { + // Generate an arbitrary shielding MASP transaction builder + pub fn arb_shielding_builder( + source: TransparentAddress, + asset_range: impl Into, + )( + assets in collection::hash_map( + arb_pre_asset_type(), + collection::vec(..MAX_MONEY, ..MAX_SPLITS), + asset_range, + ), + )( + expiration_height in arb_height(BranchId::MASP, &TestNetwork), + rng in arb_rng().prop_map(TestCsprng), + txins in assets + .iter() + .map(|(asset, values)| arb_txouts(asset.clone(), values.clone(), source)) + .collect::>(), + output_descriptions in assets + .iter() + .map(|(asset, values)| arb_output_descriptions(asset.clone(), values.clone())) + .collect::>(), + assets in Just(assets), + ) -> ( + Builder::>, + HashMap, + ) { + let mut builder = Builder::::new_with_rng( + NETWORK, + // NOTE: this is going to add 20 more blocks to the actual + // expiration but there's no other exposed function that we could + // use from the masp crate to specify the expiration better + expiration_height.unwrap(), + rng, + ); + for txin in txins.into_iter().flatten() { + builder.add_transparent_input(txin).unwrap(); + } + for (ovk, payment_addr, asset_type, value, memo) in output_descriptions.into_iter().flatten() { + builder.add_sapling_output(ovk, payment_addr, asset_type, value, memo).unwrap(); + } + (builder, assets.into_iter().map(|(k, v)| (k, v.iter().sum())).collect()) + } + } + + prop_compose! { + // Generate an arbitrary deshielding MASP transaction builder + pub fn arb_deshielding_builder( + target: TransparentAddress, + asset_range: impl Into, + )( + assets in collection::hash_map( + arb_pre_asset_type(), + collection::vec(..MAX_MONEY, ..MAX_SPLITS), + asset_range, + ), + )( + expiration_height in arb_height(BranchId::MASP, &TestNetwork), + rng in arb_rng().prop_map(TestCsprng), + spend_descriptions in assets + .iter() + .map(|(asset, values)| arb_spend_descriptions(asset.clone(), values.clone())) + .collect::>(), + txouts in assets + .iter() + .map(|(asset, values)| arb_txouts(asset.clone(), values.clone(), target)) + .collect::>(), + assets in Just(assets), + ) -> ( + Builder::>, + HashMap, + ) { + let mut builder = Builder::::new_with_rng( + NETWORK, + // NOTE: this is going to add 20 more blocks to the actual + // expiration but there's no other exposed function that we could + // use from the masp crate to specify the expiration better + expiration_height.unwrap(), + rng, + ); + let mut leaves = Vec::new(); + // First construct a Merkle tree containing all notes to be used + for (_esk, _div, _note, node) in spend_descriptions.iter().flatten() { + leaves.push(*node); + } + let tree = FrozenCommitmentTree::new(&leaves); + // Then use the notes knowing that they all have the same anchor + for (idx, (esk, div, note, _node)) in spend_descriptions.into_iter().flatten().enumerate() { + builder.add_sapling_spend(esk, div, note, tree.path(idx)).unwrap(); + } + for txout in txouts.into_iter().flatten() { + builder.add_transparent_output(&txout.address, txout.asset_type, txout.value).unwrap(); + } + (builder, assets.into_iter().map(|(k, v)| (k, v.iter().sum())).collect()) + } + } + + prop_compose! { + // Generate an arbitrary MASP shielded transfer + pub fn arb_shielded_transfer( + asset_range: impl Into, + )(asset_range in Just(asset_range.into()))( + (builder, asset_types) in arb_shielded_builder(asset_range), + epoch in arb_epoch(), + rng in arb_rng().prop_map(TestCsprng), + ) -> (ShieldedTransfer, HashMap) { + let (masp_tx, metadata) = builder.clone().build( + &MockTxProver(Mutex::new(rng)), + &FeeRule::non_standard(U64Sum::zero()), + ).unwrap(); + (ShieldedTransfer { + builder: builder.map_builder(WalletMap), + metadata, + masp_tx, + epoch, + }, asset_types) + } + } + + prop_compose! { + // Generate an arbitrary MASP shielded transfer + pub fn arb_shielding_transfer( + source: TransparentAddress, + asset_range: impl Into, + )(asset_range in Just(asset_range.into()))( + (builder, asset_types) in arb_shielding_builder( + source, + asset_range, + ), + epoch in arb_epoch(), + rng in arb_rng().prop_map(TestCsprng), + ) -> (ShieldedTransfer, HashMap) { + let (masp_tx, metadata) = builder.clone().build( + &MockTxProver(Mutex::new(rng)), + &FeeRule::non_standard(U64Sum::zero()), + ).unwrap(); + (ShieldedTransfer { + builder: builder.map_builder(WalletMap), + metadata, + masp_tx, + epoch, + }, asset_types) + } + } + + prop_compose! { + // Generate an arbitrary MASP shielded transfer + pub fn arb_deshielding_transfer( + target: TransparentAddress, + asset_range: impl Into, + )(asset_range in Just(asset_range.into()))( + (builder, asset_types) in arb_deshielding_builder( + target, + asset_range, + ), + epoch in arb_epoch(), + rng in arb_rng().prop_map(TestCsprng), + ) -> (ShieldedTransfer, HashMap) { + let (masp_tx, metadata) = builder.clone().build( + &MockTxProver(Mutex::new(rng)), + &FeeRule::non_standard(U64Sum::zero()), + ).unwrap(); + (ShieldedTransfer { + builder: builder.map_builder(WalletMap), + metadata, + masp_tx, + epoch, + }, asset_types) + } + } +} + +#[cfg(feature = "std")] +/// Implementation of MASP functionality depending on a standard filesystem +pub mod fs { + use std::fs::{File, OpenOptions}; + use std::io::{Read, Write}; + + use super::*; + use crate::masp::shielded_ctx::ShieldedContext; + use crate::masp::types::ContextSyncStatus; + use crate::masp::utils::ShieldedUtils; + + /// Shielded context file name + const FILE_NAME: &str = "shielded.dat"; + const TMP_FILE_NAME: &str = "shielded.tmp"; + const SPECULATIVE_FILE_NAME: &str = "speculative_shielded.dat"; + const SPECULATIVE_TMP_FILE_NAME: &str = "speculative_shielded.tmp"; + + #[derive(Debug, BorshSerialize, BorshDeserialize, Clone)] + /// An implementation of ShieldedUtils for standard filesystems + pub struct FsShieldedUtils { + #[borsh(skip)] + context_dir: PathBuf, + } + + impl FsShieldedUtils { + /// Initialize a shielded transaction context that identifies notes + /// decryptable by any viewing key in the given set + pub fn new(context_dir: PathBuf) -> ShieldedContext { + // Make sure that MASP parameters are downloaded to enable MASP + // transaction building and verification later on + let params_dir = get_params_dir(); + let spend_path = params_dir.join(SPEND_NAME); + let convert_path = params_dir.join(CONVERT_NAME); + let output_path = params_dir.join(OUTPUT_NAME); + if !(spend_path.exists() + && convert_path.exists() + && output_path.exists()) + { + println!("MASP parameters not present, downloading..."); + masp_proofs::download_masp_parameters(None) + .expect("MASP parameters not present or downloadable"); + println!( + "MASP parameter download complete, resuming execution..." + ); + } + // Finally initialize a shielded context with the supplied directory + + let sync_status = + if std::fs::read(context_dir.join(SPECULATIVE_FILE_NAME)) + .is_ok() + { + // Load speculative state + ContextSyncStatus::Speculative + } else { + ContextSyncStatus::Confirmed + }; + + let utils = Self { context_dir }; + ShieldedContext { + utils, + sync_status, + ..Default::default() + } + } + } + + impl Default for FsShieldedUtils { + fn default() -> Self { + Self { + context_dir: PathBuf::from(FILE_NAME), + } + } + } + + #[cfg_attr(feature = "async-send", async_trait::async_trait)] + #[cfg_attr(not(feature = "async-send"), async_trait::async_trait(?Send))] + impl ShieldedUtils for FsShieldedUtils { + fn local_tx_prover(&self) -> LocalTxProver { + if let Ok(params_dir) = env::var(ENV_VAR_MASP_PARAMS_DIR) { + let params_dir = PathBuf::from(params_dir); + let spend_path = params_dir.join(SPEND_NAME); + let convert_path = params_dir.join(CONVERT_NAME); + let output_path = params_dir.join(OUTPUT_NAME); + LocalTxProver::new(&spend_path, &output_path, &convert_path) + } else { + LocalTxProver::with_default_location() + .expect("unable to load MASP Parameters") + } + } + + /// Try to load the last saved shielded context from the given context + /// directory. If this fails, then leave the current context unchanged. + async fn load( + &self, + ctx: &mut ShieldedContext, + force_confirmed: bool, + ) -> std::io::Result<()> { + // Try to load shielded context from file + let file_name = if force_confirmed { + FILE_NAME + } else { + match ctx.sync_status { + ContextSyncStatus::Confirmed => FILE_NAME, + ContextSyncStatus::Speculative => SPECULATIVE_FILE_NAME, + } + }; + let mut ctx_file = File::open(self.context_dir.join(file_name))?; + let mut bytes = Vec::new(); + ctx_file.read_to_end(&mut bytes)?; + // Fill the supplied context with the deserialized object + *ctx = ShieldedContext { + utils: ctx.utils.clone(), + ..ShieldedContext::::deserialize(&mut &bytes[..])? + }; + Ok(()) + } + + /// Save this confirmed shielded context into its associated context + /// directory. At the same time, delete the speculative file if present + async fn save( + &self, + ctx: &ShieldedContext, + ) -> std::io::Result<()> { + // TODO: use mktemp crate? + let (tmp_file_name, file_name) = match ctx.sync_status { + ContextSyncStatus::Confirmed => (TMP_FILE_NAME, FILE_NAME), + ContextSyncStatus::Speculative => { + (SPECULATIVE_TMP_FILE_NAME, SPECULATIVE_FILE_NAME) + } + }; + let tmp_path = self.context_dir.join(tmp_file_name); + { + // First serialize the shielded context into a temporary file. + // Inability to create this file implies a simultaneuous write + // is in progress. In this case, immediately + // fail. This is unproblematic because the data + // intended to be stored can always be re-fetched + // from the blockchain. + let mut ctx_file = OpenOptions::new() + .write(true) + .create_new(true) + .open(tmp_path.clone())?; + let mut bytes = Vec::new(); + ctx.serialize(&mut bytes) + .expect("cannot serialize shielded context"); + ctx_file.write_all(&bytes[..])?; + } + // Atomically update the old shielded context file with new data. + // Atomicity is required to prevent other client instances from + // reading corrupt data. + std::fs::rename(tmp_path, self.context_dir.join(file_name))?; + + // Remove the speculative file if present since it's state is + // overruled by the confirmed one we just saved + if let ContextSyncStatus::Confirmed = ctx.sync_status { + let _ = std::fs::remove_file( + self.context_dir.join(SPECULATIVE_FILE_NAME), + ); + } + + Ok(()) + } + } +} diff --git a/crates/sdk/src/masp.rs b/crates/sdk/src/masp/shielded_ctx.rs similarity index 51% rename from crates/sdk/src/masp.rs rename to crates/sdk/src/masp/shielded_ctx.rs index f37a828785..77582cb228 100644 --- a/crates/sdk/src/masp.rs +++ b/crates/sdk/src/masp/shielded_ctx.rs @@ -1,7 +1,6 @@ -//! MASP verification wrappers. - use std::cmp::Ordering; -use std::collections::{btree_map, BTreeMap, BTreeSet}; +use std::collections::{btree_map, BTreeMap, BTreeSet, HashMap, HashSet}; +use std::convert::TryInto; use std::env; use std::fmt::Debug; use std::ops::Deref; @@ -11,18 +10,16 @@ use borsh::{BorshDeserialize, BorshSerialize}; use borsh_ext::BorshSerializeExt; use lazy_static::lazy_static; use masp_primitives::asset_type::AssetType; -#[cfg(feature = "mainnet")] -use masp_primitives::consensus::MainNetwork as Network; -#[cfg(not(feature = "mainnet"))] -use masp_primitives::consensus::TestNetwork as Network; +use masp_primitives::consensus::TestNetwork; use masp_primitives::convert::AllowedConversion; use masp_primitives::ff::PrimeField; use masp_primitives::memo::MemoBytes; use masp_primitives::merkle_tree::{ CommitmentTree, IncrementalWitness, MerklePath, }; -use masp_primitives::sapling::keys::FullViewingKey; -use masp_primitives::sapling::note_encryption::*; +use masp_primitives::sapling::note_encryption::{ + try_sapling_note_decryption, PreparedIncomingViewingKey, +}; use masp_primitives::sapling::{ Diversifier, Node, Note, Nullifier, ViewingKey, }; @@ -30,55 +27,58 @@ use masp_primitives::transaction::builder::{self, *}; use masp_primitives::transaction::components::sapling::builder::{ RngBuildParams, SaplingMetadata, }; -use masp_primitives::transaction::components::sapling::{ - Authorized as SaplingAuthorized, Bundle as SaplingBundle, -}; use masp_primitives::transaction::components::transparent::builder::TransparentBuilder; use masp_primitives::transaction::components::{ I128Sum, OutputDescription, TxOut, U64Sum, ValueSum, }; use masp_primitives::transaction::fees::fixed::FeeRule; -use masp_primitives::transaction::sighash::{signature_hash, SignableInput}; -use masp_primitives::transaction::txid::TxIdDigester; use masp_primitives::transaction::{ - Authorization, Authorized, Transaction, TransactionData, - TransparentAddress, Unauthorized, + builder, Authorization, Authorized, Transaction, TransparentAddress, }; use masp_primitives::zip32::{ExtendedFullViewingKey, ExtendedSpendingKey}; -use masp_proofs::bellman::groth16::VerifyingKey; +use masp_proofs::bellman::groth16::PreparedVerifyingKey; use masp_proofs::bls12_381::Bls12; use masp_proofs::prover::LocalTxProver; -use masp_proofs::sapling::BatchValidator; +#[cfg(not(feature = "testing"))] +use masp_proofs::sapling::SaplingVerificationContext; use namada_core::address::Address; use namada_core::collections::{HashMap, HashSet}; use namada_core::dec::Dec; -use namada_core::masp::MaspTxRefs; pub use namada_core::masp::{ encode_asset_type, AssetData, BalanceOwner, ExtendedViewingKey, PaymentAddress, TransferSource, TransferTarget, }; -use namada_core::storage::{BlockHeight, Epoch, TxIndex}; +use namada_core::storage::{BlockHeight, Epoch, IndexedTx, TxIndex}; use namada_core::time::{DateTimeUtc, DurationSecs}; use namada_core::uint::Uint; use namada_events::extend::{ - MaspTxBatchRefs as MaspTxBatchRefsAttr, - MaspTxBlockIndex as MaspTxBlockIndexAttr, ReadFromEventAttributes, + ReadFromEventAttributes, ValidMaspTx as ValidMaspTxAttr, }; +use namada_ibc::IbcMessage; use namada_macros::BorshDeserializer; #[cfg(feature = "migrations")] use namada_migrations::*; use namada_state::StorageError; -use namada_token::{self as token, Denomination, MaspDigitPos}; -use namada_tx::{IndexedTx, Tx}; -use rand::rngs::StdRng; -use rand_core::{CryptoRng, OsRng, RngCore, SeedableRng}; +use namada_token::{self as token, Denomination, MaspDigitPos, Transfer}; +use namada_tx::Tx; +use rand_core::OsRng; use ripemd::Digest as RipemdDigest; use sha2::Digest; -use smooth_operator::checked; use thiserror::Error; use crate::error::{Error, QueryError}; use crate::io::Io; +use crate::masp::types::{ + ContextSyncStatus, Conversions, MaspAmount, MaspChange, ShieldedTransfer, + TransactionDelta, TransferDelta, TransferErr, Unscanned, WalletMap, +}; +use crate::masp::utils::{ + cloned_pair, extract_masp_tx, extract_payload, fetch_channel, + get_indexed_masp_events_at_height, is_amount_required, to_viewing_key, + DefaultLogger, ExtractShieldedActionArg, FetchQueueSender, ProgressLogger, + ShieldedUtils, TaskManager, +}; +use crate::masp::{testing, ENV_VAR_MASP_TEST_SEED, NETWORK}; use crate::queries::Client; use crate::rpc::{query_block, query_conversion, query_denom}; use crate::{display_line, edisplay_line, rpc, MaybeSend, MaybeSync, Namada}; @@ -92,7 +92,10 @@ pub const ENV_VAR_MASP_PARAMS_DIR: &str = "NAMADA_MASP_PARAMS_DIR"; pub const ENV_VAR_MASP_TEST_SEED: &str = "NAMADA_MASP_TEST_SEED"; /// The network to use for MASP -const NETWORK: Network = Network; +#[cfg(feature = "mainnet")] +const NETWORK: MainNetwork = MainNetwork; +#[cfg(not(feature = "mainnet"))] +const NETWORK: TestNetwork = TestNetwork; // TODO these could be exported from masp_proof crate /// Spend circuit name @@ -103,10 +106,10 @@ pub const OUTPUT_NAME: &str = "masp-output.params"; pub const CONVERT_NAME: &str = "masp-convert.params"; /// Type alias for convenience and profit -pub type IndexedNoteData = BTreeMap>; +pub type IndexedNoteData = BTreeMap; /// Type alias for the entries of [`IndexedNoteData`] iterators -pub type IndexedNoteEntry = (IndexedTx, Vec); +pub type IndexedNoteEntry = (IndexedTx, Transaction); /// Shielded transfer #[derive(Clone, Debug, BorshSerialize, BorshDeserialize, BorshDeserializer)] @@ -122,7 +125,6 @@ pub struct ShieldedTransfer { } /// Shielded pool data for a token -#[allow(missing_docs)] #[derive(Debug, BorshSerialize, BorshDeserialize, BorshDeserializer)] pub struct MaspTokenRewardData { pub name: String, @@ -144,14 +146,20 @@ pub enum TransferErr { General(#[from] Error), } +#[derive(Debug, Clone)] +struct ExtractedMaspTx { + fee_unshielding: Option, + inner_tx: Option, +} + /// MASP verifying keys pub struct PVKs { /// spend verifying key - pub spend_vk: VerifyingKey, + pub spend_vk: PreparedVerifyingKey, /// convert verifying key - pub convert_vk: VerifyingKey, + pub convert_vk: PreparedVerifyingKey, /// output verifying key - pub output_vk: VerifyingKey, + pub output_vk: PreparedVerifyingKey, } lazy_static! { @@ -185,9 +193,9 @@ lazy_static! { convert_path.as_path(), ); PVKs { - spend_vk: params.spend_params.vk, - convert_vk: params.convert_params.vk, - output_vk: params.output_params.vk + spend_vk: params.spend_vk, + convert_vk: params.convert_vk, + output_vk: params.output_vk } }; } @@ -201,6 +209,76 @@ fn load_pvks() -> &'static PVKs { &VERIFIYING_KEYS } +/// check_spend wrapper +pub fn check_spend( + spend: &SpendDescription<::SaplingAuth>, + sighash: &[u8; 32], + #[cfg(not(feature = "testing"))] ctx: &mut SaplingVerificationContext, + #[cfg(feature = "testing")] + ctx: &mut testing::MockSaplingVerificationContext, + parameters: &PreparedVerifyingKey, +) -> bool { + let zkproof = + masp_proofs::bellman::groth16::Proof::read(spend.zkproof.as_slice()); + let zkproof = match zkproof { + Ok(zkproof) => zkproof, + _ => return false, + }; + + ctx.check_spend( + spend.cv, + spend.anchor, + &spend.nullifier.0, + PublicKey(spend.rk.0), + sighash, + spend.spend_auth_sig, + zkproof, + parameters, + ) +} + +/// check_output wrapper +pub fn check_output( + output: &OutputDescription<<::SaplingAuth as masp_primitives::transaction::components::sapling::Authorization>::Proof>, + #[cfg(not(feature = "testing"))] ctx: &mut SaplingVerificationContext, + #[cfg(feature = "testing")] + ctx: &mut testing::MockSaplingVerificationContext, + parameters: &PreparedVerifyingKey, +) -> bool { + let zkproof = + masp_proofs::bellman::groth16::Proof::read(output.zkproof.as_slice()); + let zkproof = match zkproof { + Ok(zkproof) => zkproof, + _ => return false, + }; + let epk = + masp_proofs::jubjub::ExtendedPoint::from_bytes(&output.ephemeral_key.0); + let epk = match epk.into() { + Some(p) => p, + None => return false, + }; + + ctx.check_output(output.cv, output.cmu, epk, zkproof, parameters) +} + +/// check convert wrapper +pub fn check_convert( + convert: &ConvertDescription<<::SaplingAuth as masp_primitives::transaction::components::sapling::Authorization>::Proof>, + #[cfg(not(feature = "testing"))] ctx: &mut SaplingVerificationContext, + #[cfg(feature = "testing")] + ctx: &mut testing::MockSaplingVerificationContext, + parameters: &PreparedVerifyingKey, +) -> bool { + let zkproof = + masp_proofs::bellman::groth16::Proof::read(convert.zkproof.as_slice()); + let zkproof = match zkproof { + Ok(zkproof) => zkproof, + _ => return false, + }; + + ctx.check_convert(convert.cv, convert.anchor, zkproof, parameters) +} + /// Represents an authorization where the Sapling bundle is authorized and the /// transparent bundle is unauthorized. pub struct PartialAuthorized; @@ -246,12 +324,12 @@ pub fn partial_deauthorize( /// Verify a shielded transaction. pub fn verify_shielded_tx( transaction: &Transaction, - consume_verify_gas: F, + mut consume_verify_gas: F, ) -> Result<(), StorageError> where - F: Fn(u64) -> std::result::Result<(), StorageError>, + F: FnMut(u64) -> std::result::Result<(), StorageError>, { - tracing::debug!("entered verify_shielded_tx()"); + tracing::info!("entered verify_shielded_tx()"); let sapling_bundle = if let Some(bundle) = transaction.sapling_bundle() { bundle @@ -276,7 +354,8 @@ where // for now we need to continue to compute it here. let sighash = signature_hash(&unauth_tx_data, &SignableInput::Shielded, &txid_parts); - tracing::debug!("sighash computed"); + + tracing::info!("sighash computed"); let PVKs { spend_vk, @@ -285,103 +364,49 @@ where } = load_pvks(); #[cfg(not(feature = "testing"))] - let mut ctx = BatchValidator::new(); + let mut ctx = SaplingVerificationContext::new(true); #[cfg(feature = "testing")] - let mut ctx = testing::MockBatchValidator::default(); - - // Charge gas before check bundle - charge_masp_check_bundle_gas(sapling_bundle, &consume_verify_gas)?; - - if !ctx.check_bundle(sapling_bundle.to_owned(), sighash.as_ref().to_owned()) - { - tracing::debug!("failed check bundle"); - return Err(StorageError::SimpleMessage("Invalid sapling bundle")); + let mut ctx = testing::MockSaplingVerificationContext::new(true); + for spend in &sapling_bundle.shielded_spends { + consume_verify_gas(namada_gas::MASP_VERIFY_SPEND_GAS)?; + if !check_spend(spend, sighash.as_ref(), &mut ctx, spend_vk) { + return Err(StorageError::SimpleMessage("Invalid shielded spend")); + } } - tracing::debug!("passed check bundle"); - - // Charge gas before final validation - charge_masp_validate_gas(sapling_bundle, consume_verify_gas)?; - if !ctx.validate(spend_vk, convert_vk, output_vk, OsRng) { - return Err(StorageError::SimpleMessage( - "Invalid proofs or signatures", - )); + for convert in &sapling_bundle.shielded_converts { + consume_verify_gas(namada_gas::MASP_VERIFY_CONVERT_GAS)?; + if !check_convert(convert, &mut ctx, convert_vk) { + return Err(StorageError::SimpleMessage( + "Invalid shielded conversion", + )); + } + } + for output in &sapling_bundle.shielded_outputs { + consume_verify_gas(namada_gas::MASP_VERIFY_OUTPUT_GAS)?; + if !check_output(output, &mut ctx, output_vk) { + return Err(StorageError::SimpleMessage("Invalid shielded output")); + } } - Ok(()) -} -// Charge gas for the check_bundle operation which does not leverage concurrency -fn charge_masp_check_bundle_gas( - sapling_bundle: &SaplingBundle, - consume_verify_gas: F, -) -> Result<(), namada_state::StorageError> -where - F: Fn(u64) -> std::result::Result<(), namada_state::StorageError>, -{ - consume_verify_gas(checked!( - (sapling_bundle.shielded_spends.len() as u64) - * namada_gas::MASP_SPEND_CHECK_GAS - )?)?; - - consume_verify_gas(checked!( - (sapling_bundle.shielded_converts.len() as u64) - * namada_gas::MASP_CONVERT_CHECK_GAS - )?)?; - - consume_verify_gas(checked!( - (sapling_bundle.shielded_outputs.len() as u64) - * namada_gas::MASP_OUTPUT_CHECK_GAS - )?) -} + tracing::info!("passed spend/output verification"); -// Charge gas for the final validation, taking advtange of concurrency for -// proofs verification but not for signatures -fn charge_masp_validate_gas( - sapling_bundle: &SaplingBundle, - consume_verify_gas: F, -) -> Result<(), namada_state::StorageError> -where - F: Fn(u64) -> std::result::Result<(), namada_state::StorageError>, -{ - // Signatures gas - consume_verify_gas(checked!( - // Add one for the binding signature - ((sapling_bundle.shielded_spends.len() as u64) + 1) - * namada_gas::MASP_VERIFY_SIG_GAS - )?)?; - - // If at least one note is present charge the fixed costs. Then charge the - // variable cost for every other note, amortized on the fixed expected - // number of cores - if let Some(remaining_notes) = - sapling_bundle.shielded_spends.len().checked_sub(1) - { - consume_verify_gas(namada_gas::MASP_FIXED_SPEND_GAS)?; - consume_verify_gas(checked!( - namada_gas::MASP_VARIABLE_SPEND_GAS * remaining_notes as u64 - / namada_gas::MASP_PARALLEL_GAS_DIVIDER - )?)?; - } + let assets_and_values: I128Sum = sapling_bundle.value_balance.clone(); - if let Some(remaining_notes) = - sapling_bundle.shielded_converts.len().checked_sub(1) - { - consume_verify_gas(namada_gas::MASP_FIXED_CONVERT_GAS)?; - consume_verify_gas(checked!( - namada_gas::MASP_VARIABLE_CONVERT_GAS * remaining_notes as u64 - / namada_gas::MASP_PARALLEL_GAS_DIVIDER - )?)?; - } + tracing::info!( + "accumulated {} assets/values", + assets_and_values.components().len() + ); - if let Some(remaining_notes) = - sapling_bundle.shielded_outputs.len().checked_sub(1) - { - consume_verify_gas(namada_gas::MASP_FIXED_OUTPUT_GAS)?; - consume_verify_gas(checked!( - namada_gas::MASP_VARIABLE_OUTPUT_GAS * remaining_notes as u64 - / namada_gas::MASP_PARALLEL_GAS_DIVIDER - )?)?; + consume_verify_gas(namada_gas::MASP_VERIFY_FINAL_GAS)?; + let result = ctx.final_check( + assets_and_values, + sighash.as_ref(), + sapling_bundle.authorization.binding_sig, + ); + tracing::info!("final check result {result}"); + if !result { + return Err(StorageError::SimpleMessage("MASP final check failed")); } - Ok(()) } @@ -389,10 +414,7 @@ where /// use the default. pub fn get_params_dir() -> PathBuf { if let Ok(params_dir) = env::var(ENV_VAR_MASP_PARAMS_DIR) { - #[allow(clippy::print_stdout)] - { - println!("Using {} as masp parameter folder.", params_dir); - } + println!("Using {} as masp parameter folder.", params_dir); PathBuf::from(params_dir) } else { masp_proofs::default_params_folder().unwrap() @@ -564,7 +586,7 @@ pub enum ContextSyncStatus { /// Represents the current state of the shielded pool from the perspective of /// the chosen viewing keys. -#[derive(BorshSerialize, BorshDeserialize, Debug)] +#[derive(BorshSerialize, BorshDeserialize, Debug, Clone)] pub struct ShieldedContext { /// Location where this shielded context is saved #[borsh(skip)] @@ -648,151 +670,66 @@ impl ShieldedContext { } /// Update the merkle tree of witnesses the first time we - /// scan new MASP transactions. - fn update_witness_map( + /// scan a new MASP transaction. + pub(crate) fn update_witness_map( &mut self, indexed_tx: IndexedTx, - shielded: &[Transaction], + shielded: &Transaction, ) -> Result<(), Error> { let mut note_pos = self.tree.size(); self.tx_note_map.insert(indexed_tx, note_pos); - - for tx in shielded { - for so in - tx.sapling_bundle().map_or(&vec![], |x| &x.shielded_outputs) - { - // Create merkle tree leaf node from note commitment - let node = Node::new(so.cmu.to_repr()); - // Update each merkle tree in the witness map with the latest - // addition - for (_, witness) in self.witness_map.iter_mut() { - witness.append(node).map_err(|()| { - Error::Other("note commitment tree is full".to_string()) - })?; - } - self.tree.append(node).map_err(|()| { + for so in shielded + .sapling_bundle() + .map_or(&vec![], |x| &x.shielded_outputs) + { + // Create merkle tree leaf node from note commitment + let node = Node::new(so.cmu.to_repr()); + // Update each merkle tree in the witness map with the latest + // addition + for (_, witness) in self.witness_map.iter_mut() { + witness.append(node).map_err(|()| { Error::Other("note commitment tree is full".to_string()) })?; - // Finally, make it easier to construct merkle paths to this new - // note - let witness = IncrementalWitness::::from_tree(&self.tree); - self.witness_map.insert(note_pos, witness); - note_pos += 1; - } - } - Ok(()) - } - - /// Fetch the current state of the multi-asset shielded pool into a - /// ShieldedContext - #[allow(clippy::too_many_arguments)] - pub async fn fetch( - &mut self, - client: &C, - logger: &impl ProgressLogger, - start_query_height: Option, - last_query_height: Option, - // NOTE: do not remove this argument, it will be used once the indexer - // is ready - _batch_size: u64, - sks: &[ExtendedSpendingKey], - fvks: &[ViewingKey], - ) -> Result<(), Error> { - // add new viewing keys - // Reload the state from file to get the last confirmed state and - // discard any speculative data, we cannot fetch on top of a - // speculative state - // Always reload the confirmed context or initialize a new one if not - // found - if self.load_confirmed().await.is_err() { - // Initialize a default context if we couldn't load a valid one - // from storage - *self = Self { - utils: std::mem::take(&mut self.utils), - ..Default::default() - }; - } - - for esk in sks { - let vk = to_viewing_key(esk).vk; - self.vk_heights.entry(vk).or_default(); - } - for vk in fvks { - self.vk_heights.entry(*vk).or_default(); - } - let _ = self.save().await; - // the latest block height which has been added to the witness Merkle - // tree - let Some(least_idx) = self.vk_heights.values().min().cloned() else { - return Ok(()); - }; - let last_witnessed_tx = self.tx_note_map.keys().max().cloned(); - // get the bounds on the block heights to fetch - let start_idx = - std::cmp::min(last_witnessed_tx.as_ref(), least_idx.as_ref()) - .map(|ix| ix.height); - let start_idx = start_query_height.or(start_idx); - // Load all transactions accepted until this point - // N.B. the cache is a hash map - self.unscanned.extend( - self.fetch_shielded_transfers( - client, - logger, - start_idx, - last_query_height, - ) - .await?, - ); - // persist the cache in case of interruptions. - let _ = self.save().await; - - let txs = logger.scan(self.unscanned.clone()); - for (ref indexed_tx, ref stx) in txs { - if Some(indexed_tx) > last_witnessed_tx.as_ref() { - self.update_witness_map(indexed_tx.to_owned(), stx)?; - } - let mut vk_heights = BTreeMap::new(); - std::mem::swap(&mut vk_heights, &mut self.vk_heights); - for (vk, h) in vk_heights - .iter_mut() - .filter(|(_vk, h)| h.as_ref() < Some(indexed_tx)) - { - self.scan_tx(indexed_tx.to_owned(), stx, vk)?; - *h = Some(indexed_tx.to_owned()); } - // possibly remove unneeded elements from the cache. - self.unscanned.scanned(indexed_tx); - std::mem::swap(&mut vk_heights, &mut self.vk_heights); - let _ = self.save().await; + self.tree.append(node).map_err(|()| { + Error::Other("note commitment tree is full".to_string()) + })?; + // Finally, make it easier to construct merkle paths to this new + // note + let witness = IncrementalWitness::::from_tree(&self.tree); + self.witness_map.insert(note_pos, witness); + note_pos += 1; } - Ok(()) } /// Obtain a chronologically-ordered list of all accepted shielded /// transactions from a node. - pub async fn fetch_shielded_transfers( - &self, + async fn fetch_shielded_transfers( + mut block_sender: FetchQueueSender, client: &C, logger: &impl ProgressLogger, last_indexed_tx: Option, - last_query_height: Option, - ) -> Result { - // Query for the last produced block height - let last_block_height = query_block(client) - .await? - .map_or_else(BlockHeight::first, |block| block.height); - let last_query_height = last_query_height.unwrap_or(last_block_height); - - let mut shielded_txs = BTreeMap::new(); + last_query_height: BlockHeight, + ) -> Result<(), Error> { // Fetch all the transactions we do not have yet let first_height_to_query = last_indexed_tx.map_or_else(|| 1, |last| last.0); - let heights = logger.fetch(first_height_to_query..=last_query_height.0); - for height in heights { - if self.unscanned.contains_height(height) { + for height in logger.fetch(first_height_to_query..=last_query_height.0) + { + if block_sender.contains_height(height) { continue; } + // Get the valid masp transactions at the specified height + let epoch = query_epoch_at_height(client, height.into()) + .await? + .ok_or_else(|| { + Error::from(QueryError::General( + "Queried height is greater than the last committed \ + block height" + .to_string(), + )) + })?; let txs_results = match get_indexed_masp_events_at_height( client, @@ -817,55 +754,37 @@ impl ShieldedContext { .block .data; - for (idx, masp_sections_refs) in txs_results { + for idx in txs_results { let tx = Tx::try_from(block[idx.0 as usize].as_ref()) .map_err(|e| Error::Other(e.to_string()))?; - let extracted_masp_txs = - Self::extract_masp_tx(&tx, &masp_sections_refs).await?; - // Collect the current transactions - shielded_txs.insert( - IndexedTx { - height: height.into(), - index: idx, - }, - extracted_masp_txs, - ); + let ExtractedMaspTx { + fee_unshielding, + inner_tx, + } = Self::extract_masp_tx(&tx, true).await?; + // Collect the current transaction(s) + fee_unshielding.and_then(|masp_transaction| { + block_sender.send(( + IndexedTx { + height: height.into(), + index: idx, + is_wrapper: true, + }, + masp_transaction, + )); + }); + inner_tx.and_then(|masp_transaction| { + block_sender.send(( + IndexedTx { + height: height.into(), + index: idx, + is_wrapper: false, + }, + masp_transaction, + )); + }) } } - - Ok(shielded_txs) - } - - /// Extract the relevant shield portions of a [`Tx`], if any. - async fn extract_masp_tx( - tx: &Tx, - masp_section_refs: &MaspTxRefs, - ) -> Result, Error> { - // NOTE: simply looking for masp sections attached to the tx - // is not safe. We don't validate the sections attached to a - // transaction se we could end up with transactions carrying - // an unnecessary masp section. We must instead look for the - // required masp sections coming from the events - - masp_section_refs - .0 - .iter() - .try_fold(vec![], |mut acc, hash| { - match tx - .get_section(hash) - .and_then(|section| section.masp_tx()) - .ok_or_else(|| { - Error::Other( - "Missing expected masp transaction".to_string(), - ) - }) { - Ok(transaction) => { - acc.push(transaction); - Ok(acc) - } - Err(e) => Err(e), - } - }) + Ok(()) } /// Applies the given transaction to the supplied context. More precisely, @@ -879,7 +798,7 @@ impl ShieldedContext { pub fn scan_tx( &mut self, indexed_tx: IndexedTx, - shielded: &[Transaction], + shielded: &Transaction, vk: &ViewingKey, ) -> Result<(), Error> { // For tracking the account changes caused by this Transaction @@ -888,78 +807,42 @@ impl ShieldedContext { let mut note_pos = self.tx_note_map[&indexed_tx]; // Listen for notes sent to our viewing keys, only if we are syncing // (i.e. in a confirmed status) - for tx in shielded { - for so in - tx.sapling_bundle().map_or(&vec![], |x| &x.shielded_outputs) - { - // Let's try to see if this viewing key can decrypt latest - // note - let notes = self.pos_map.entry(*vk).or_default(); - let decres = try_sapling_note_decryption::<_, OutputDescription<<::SaplingAuth as masp_primitives::transaction::components::sapling::Authorization>::Proof>>( - &NETWORK, - 1.into(), - &PreparedIncomingViewingKey::new(&vk.ivk()), - so, - ); - // So this current viewing key does decrypt this current - // note... - if let Some((note, pa, memo)) = decres { - // Add this note to list of notes decrypted by this - // viewing key - notes.insert(note_pos); - // Compute the nullifier now to quickly recognize when - // spent - let nf = note.nf( - &vk.nk, - note_pos.try_into().map_err(|_| { - Error::Other( - "Can not get nullifier".to_string(), - ) - })?, - ); - self.note_map.insert(note_pos, note); - self.memo_map.insert(note_pos, memo); - // The payment address' diversifier is required to spend - // note - self.div_map.insert(note_pos, *pa.diversifier()); - self.nf_map.insert(nf, note_pos); - // Note the account changes - let balance = transaction_delta - .entry(*vk) - .or_insert_with(I128Sum::zero); - *balance += I128Sum::from_nonnegative( - note.asset_type, - note.value as i128, - ) - .map_err(|()| { - Error::Other( - "found note with invalid value or asset type" - .to_string(), - ) - })?; - self.vk_map.insert(note_pos, *vk); - } - note_pos += 1; - } - } - } - - // Cancel out those of our notes that have been spent - for tx in shielded { - for ss in - tx.sapling_bundle().map_or(&vec![], |x| &x.shielded_spends) + for so in shielded + .sapling_bundle() + .map_or(&vec![], |x| &x.shielded_outputs) { - // If the shielded spend's nullifier is in our map, then target - // note is rendered unusable - if let Some(note_pos) = self.nf_map.get(&ss.nullifier) { - self.spents.insert(*note_pos); + // Let's try to see if this viewing key can decrypt latest + // note + let notes = self.pos_map.entry(*vk).or_default(); + let decres = try_sapling_note_decryption::<_, OutputDescription<<::SaplingAuth as masp_primitives::transaction::components::sapling::Authorization>::Proof>>( + &NETWORK, + 1.into(), + &PreparedIncomingViewingKey::new(&vk.ivk()), + so, + ); + // So this current viewing key does decrypt this current note... + if let Some((note, pa, memo)) = decres { + // Add this note to list of notes decrypted by this viewing + // key + notes.insert(note_pos); + // Compute the nullifier now to quickly recognize when spent + let nf = note.nf( + &vk.nk, + note_pos.try_into().map_err(|_| { + Error::Other("Can not get nullifier".to_string()) + })?, + ); + self.note_map.insert(note_pos, note); + self.memo_map.insert(note_pos, memo); + // The payment address' diversifier is required to spend + // note + self.div_map.insert(note_pos, *pa.diversifier()); + self.nf_map.insert(nf, note_pos); // Note the account changes let balance = transaction_delta - .entry(self.vk_map[note_pos]) + .entry(*vk) .or_insert_with(I128Sum::zero); - let note = self.note_map[note_pos]; - - *balance -= I128Sum::from_nonnegative( + *balance += I128Sum::from_nonnegative( note.asset_type, note.value as i128, ) @@ -969,7 +852,37 @@ impl ShieldedContext { .to_string(), ) })?; + self.vk_map.insert(note_pos, *vk); } + note_pos += 1; + } + } + + // Cancel out those of our notes that have been spent + for ss in shielded + .sapling_bundle() + .map_or(&vec![], |x| &x.shielded_spends) + { + // If the shielded spend's nullifier is in our map, then target note + // is rendered unusable + if let Some(note_pos) = self.nf_map.get(&ss.nullifier) { + self.spents.insert(*note_pos); + // Note the account changes + let balance = transaction_delta + .entry(self.vk_map[note_pos]) + .or_insert_with(I128Sum::zero); + let note = self.note_map[note_pos]; + + *balance -= I128Sum::from_nonnegative( + note.asset_type, + note.value as i128, + ) + .map_err(|()| { + Error::Other( + "found note with invalid value or asset type" + .to_string(), + ) + })?; } } @@ -1183,9 +1096,11 @@ impl ShieldedContext { let required = value / threshold; // Forget about the trace amount left over because we cannot // realize its value - let trace = I128Sum::from_pair(asset_type, value % threshold); + let trace = I128Sum::from_pair(asset_type, value % threshold) + .expect("the trace should be a valid i128"); let normed_trace = - I128Sum::from_pair(normed_asset_type, value % threshold); + I128Sum::from_pair(normed_asset_type, value % threshold) + .expect("the trace should be a valid i128"); // Record how much more of the given conversion has been used *usage += required; // Apply the conversions to input and move the trace amount to output @@ -1365,7 +1280,13 @@ impl ShieldedContext { // The amount contributed by this note before conversion let pre_contr = - I128Sum::from_pair(note.asset_type, note.value as i128); + I128Sum::from_pair(note.asset_type, note.value as i128) + .map_err(|()| { + Error::Other( + "received note has invalid value or asset type" + .to_string(), + ) + })?; let (contr, normed_contr, proposed_convs) = self .compute_exchanged_amount( context.client(), @@ -1446,10 +1367,12 @@ impl ShieldedContext { res += ValueSum::from_pair( pre_asset_type.token, decoded_change, - ); + ) + .expect("expected this to fit"); } None => { - undecoded += ValueSum::from_pair(*asset_type, *val); + undecoded += ValueSum::from_pair(*asset_type, *val) + .expect("expected this to fit"); } _ => {} } @@ -1479,9 +1402,11 @@ impl ShieldedContext { res += MaspAmount::from_pair( (decoded.epoch, decoded.token), decoded_change, - ); + ) + .expect("unable to construct decoded amount"); } else { - undecoded += ValueSum::from_pair(*asset_type, *val); + undecoded += ValueSum::from_pair(*asset_type, *val) + .expect("expected this to fit"); } } (res, undecoded) @@ -1500,7 +1425,8 @@ impl ShieldedContext { if let Some(decoded) = self.decode_asset_type(client, *asset_type).await { - res += ValueSum::from_pair((*asset_type, decoded), *val); + res += ValueSum::from_pair((*asset_type, decoded), *val) + .expect("unable to construct decoded amount"); } } res @@ -1524,6 +1450,9 @@ impl ShieldedContext { // No shielded components are needed when neither source nor destination // are shielded + use rand::rngs::StdRng; + use rand_core::SeedableRng; + let spending_key = source.spending_key(); let payment_address = target.payment_address(); // No shielded components are needed when neither source nor @@ -1614,7 +1543,7 @@ impl ShieldedContext { u32::MAX - 20 } }; - let mut builder = Builder::::new( + let mut builder = Builder::::new( NETWORK, // NOTE: this is going to add 20 more blocks to the actual // expiration but there's no other exposed function that we could @@ -1713,7 +1642,9 @@ impl ShieldedContext { // Anotate the asset type in the value balance with its decoding in // order to facilitate cross-epoch computations - let value_balance = builder.value_balance(); + let value_balance = builder.value_balance().map_err(|e| { + Error::Other(format!("unable to complete value balance: {}", e)) + })?; let value_balance = context .shielded_mut() .await @@ -1803,7 +1734,8 @@ impl ShieldedContext { // Convert the shortfall into a I128Sum let mut shortfall = I128Sum::zero(); for (asset_type, val) in asset_types.iter().zip(rem_amount) { - shortfall += I128Sum::from_pair(*asset_type, val.into()); + shortfall += I128Sum::from_pair(*asset_type, val.into()) + .expect("unable to construct value sum"); } // Return an insufficient ffunds error return Result::Err(TransferErr::from( @@ -1815,7 +1747,16 @@ impl ShieldedContext { if let Some(sk) = spending_key { // Represents the amount of inputs we are short by let mut additional = I128Sum::zero(); - for (asset_type, amt) in builder.value_balance().components() { + for (asset_type, amt) in builder + .value_balance() + .map_err(|e| { + Error::Other(format!( + "unable to complete value balance: {}", + e + )) + })? + .components() + { match amt.cmp(&0) { Ordering::Greater => { // Send the change in this asset type back to the sender @@ -1870,7 +1811,7 @@ impl ShieldedContext { // Cache the generated transfer let mut shielded_ctx = context.shielded_mut().await; shielded_ctx - .pre_cache_transaction(context, &[masp_tx.clone()]) + .pre_cache_transaction(context, &masp_tx) .await?; } @@ -1889,7 +1830,7 @@ impl ShieldedContext { async fn pre_cache_transaction( &mut self, context: &impl Namada, - masp_tx: &[Transaction], + masp_tx: &Transaction, ) -> Result<(), Error> { let vks: Vec<_> = context .wallet() @@ -1901,21 +1842,26 @@ impl ShieldedContext { let last_witnessed_tx = self.tx_note_map.keys().max(); // This data will be discarded at the next fetch so we don't need to // populate it accurately - let indexed_tx = - last_witnessed_tx.map_or_else(IndexedTx::default, |indexed| { - IndexedTx { - height: indexed.height, - index: indexed - .index - .checked_add(1) - .expect("Tx index shouldn't overflow"), - } - }); + let indexed_tx = last_witnessed_tx.map_or_else( + || IndexedTx { + height: BlockHeight::first(), + index: TxIndex(0), + is_wrapper: false, + }, + |indexed| IndexedTx { + height: indexed.height, + index: indexed + .index + .checked_add(1) + .expect("Tx index shouldn't overflow"), + is_wrapper: false, + }, + ); self.sync_status = ContextSyncStatus::Speculative; for vk in vks { self.vk_heights.entry(vk).or_default(); - self.scan_tx(indexed_tx.clone(), masp_tx, &vk)?; + self.scan_tx(indexed_tx, masp_tx, &vk)?; } // Save the speculative state for future usage self.save().await.map_err(|e| Error::Other(e.to_string()))?; @@ -1983,1184 +1929,128 @@ impl ShieldedContext { } } -// Retrieves all the indexes at the specified height which refer -// to a valid masp transaction. If an index is given, it filters only the -// transactions with an index equal or greater to the provided one. -async fn get_indexed_masp_events_at_height( - client: &C, - height: BlockHeight, - first_idx_to_query: Option, -) -> Result>, Error> { - let first_idx_to_query = first_idx_to_query.unwrap_or_default(); - - Ok(client - .block_results(height.0 as u32) - .await - .map_err(|e| Error::from(QueryError::General(e.to_string())))? - .end_block_events - .map(|events| { - events - .into_iter() - .filter_map(|event| { - let tx_index = - MaspTxBlockIndexAttr::read_from_event_attributes( - &event.attributes, - ) - .ok()?; - - if tx_index >= first_idx_to_query { - // Extract the references to the correct masp sections - let masp_section_refs = - MaspTxBatchRefsAttr::read_from_event_attributes( - &event.attributes, - ) - .ok()?; - - Some((tx_index, masp_section_refs)) - } else { - None - } - }) - .collect::>() - })) -} - -mod tests { - /// quick and dirty test. will fail on size check - #[test] - #[should_panic(expected = "parameter file size is not correct")] - fn test_wrong_masp_params() { - use std::io::Write; - - use super::{CONVERT_NAME, OUTPUT_NAME, SPEND_NAME}; - - let tempdir = tempfile::tempdir() - .expect("expected a temp dir") - .into_path(); - let fake_params_paths = - [SPEND_NAME, OUTPUT_NAME, CONVERT_NAME].map(|p| tempdir.join(p)); - for path in &fake_params_paths { - let mut f = - std::fs::File::create(path).expect("expected a temp file"); - f.write_all(b"fake params") - .expect("expected a writable temp file"); - f.sync_all() - .expect("expected a writable temp file (on sync)"); - } - - std::env::set_var(super::ENV_VAR_MASP_PARAMS_DIR, tempdir.as_os_str()); - // should panic here - masp_proofs::load_parameters( - &fake_params_paths[0], - &fake_params_paths[1], - &fake_params_paths[2], - ); - } - - /// a more involved test, using dummy parameters with the right - /// size but the wrong hash. - #[test] - #[should_panic(expected = "parameter file is not correct")] - fn test_wrong_masp_params_hash() { - use masp_primitives::ff::PrimeField; - use masp_proofs::bellman::groth16::{ - generate_random_parameters, Parameters, - }; - use masp_proofs::bellman::{Circuit, ConstraintSystem, SynthesisError}; - use masp_proofs::bls12_381::{Bls12, Scalar}; - - use super::{CONVERT_NAME, OUTPUT_NAME, SPEND_NAME}; - - struct FakeCircuit { - x: E, - } - - impl Circuit for FakeCircuit { - fn synthesize>( - self, - cs: &mut CS, - ) -> Result<(), SynthesisError> { - let x = cs.alloc(|| "x", || Ok(self.x)).unwrap(); - cs.enforce( - || { - "this is an extra long constraint name so that rustfmt \ - is ok with wrapping the params of enforce()" - }, - |lc| lc + x, - |lc| lc + x, - |lc| lc + x, - ); - Ok(()) - } - } - - let dummy_circuit = FakeCircuit { x: Scalar::zero() }; - let mut rng = rand::thread_rng(); - let fake_params: Parameters = - generate_random_parameters(dummy_circuit, &mut rng) - .expect("expected to generate fake params"); - - let tempdir = tempfile::tempdir() - .expect("expected a temp dir") - .into_path(); - // TODO: get masp to export these consts - let fake_params_paths = [ - (SPEND_NAME, 49848572u64), - (OUTPUT_NAME, 16398620u64), - (CONVERT_NAME, 22570940u64), - ] - .map(|(p, s)| (tempdir.join(p), s)); - for (path, size) in &fake_params_paths { - let mut f = - std::fs::File::create(path).expect("expected a temp file"); - fake_params - .write(&mut f) - .expect("expected a writable temp file"); - // the dummy circuit has one constraint, and therefore its - // params should always be smaller than the large masp - // circuit params. so this truncate extends the file, and - // extra bytes at the end do not make it invalid. - f.set_len(*size) - .expect("expected to truncate the temp file"); - f.sync_all() - .expect("expected a writable temp file (on sync)"); - } - - std::env::set_var(super::ENV_VAR_MASP_PARAMS_DIR, tempdir.as_os_str()); - // should panic here - masp_proofs::load_parameters( - &fake_params_paths[0].0, - &fake_params_paths[1].0, - &fake_params_paths[2].0, - ); - } -} - -#[cfg(any(test, feature = "testing"))] -/// Tests and strategies for transactions -pub mod testing { - use std::ops::AddAssign; - use std::sync::Mutex; - - use bls12_381::{G1Affine, G2Affine}; - use masp_primitives::consensus::testing::arb_height; - use masp_primitives::constants::SPENDING_KEY_GENERATOR; - use masp_primitives::group::GroupEncoding; - use masp_primitives::sapling::prover::TxProver; - use masp_primitives::sapling::redjubjub::{PublicKey, Signature}; - use masp_primitives::sapling::{ProofGenerationKey, Rseed}; - use masp_primitives::transaction::components::sapling::builder::StoredBuildParams; - use masp_primitives::transaction::components::sapling::Bundle; - use masp_primitives::transaction::components::GROTH_PROOF_SIZE; - use masp_proofs::bellman::groth16::{self, Proof}; - use proptest::prelude::*; - use proptest::sample::SizeRange; - use proptest::test_runner::TestRng; - use proptest::{collection, option, prop_compose}; - - use super::*; - use crate::address::testing::arb_address; - use crate::masp_primitives::consensus::BranchId; - use crate::masp_primitives::constants::VALUE_COMMITMENT_RANDOMNESS_GENERATOR; - use crate::masp_primitives::merkle_tree::FrozenCommitmentTree; - use crate::masp_primitives::sapling::keys::OutgoingViewingKey; - use crate::masp_primitives::sapling::redjubjub::PrivateKey; - use crate::masp_primitives::transaction::components::transparent::testing::arb_transparent_address; - use crate::storage::testing::arb_epoch; - use crate::token::testing::arb_denomination; - - /// A context object for verifying the Sapling components of MASP - /// transactions. Same as BatchValidator, but always assumes the - /// proofs and signatures to be valid. - pub struct MockBatchValidator { - inner: BatchValidator, - } - - impl Default for MockBatchValidator { - fn default() -> Self { - MockBatchValidator { - inner: BatchValidator::new(), - } - } - } - - impl MockBatchValidator { - /// Checks the bundle against Sapling-specific consensus rules, and adds - /// its proof and signatures to the validator. - /// - /// Returns `false` if the bundle doesn't satisfy all of the consensus - /// rules. This `BatchValidator` can continue to be used - /// regardless, but some or all of the proofs and signatures - /// from this bundle may have already been added to the batch even if - /// it fails other consensus rules. - pub fn check_bundle( - &mut self, - bundle: Bundle< - masp_primitives::transaction::components::sapling::Authorized, - >, - sighash: [u8; 32], - ) -> bool { - self.inner.check_bundle(bundle, sighash) - } - - /// Batch-validates the accumulated bundles. - /// - /// Returns `true` if every proof and signature in every bundle added to - /// the batch validator is valid, or `false` if one or more are - /// invalid. No attempt is made to figure out which of the - /// accumulated bundles might be invalid; if that information is - /// desired, construct separate [`BatchValidator`]s for sub-batches of - /// the bundles. - pub fn validate( - self, - _spend_vk: &groth16::VerifyingKey, - _convert_vk: &groth16::VerifyingKey, - _output_vk: &groth16::VerifyingKey, - mut _rng: R, - ) -> bool { - true - } - } - - /// This function computes `value` in the exponent of the value commitment - /// base - fn masp_compute_value_balance( - asset_type: AssetType, - value: i128, - ) -> Option { - // Compute the absolute value (failing if -i128::MAX is - // the value) - let abs = match value.checked_abs() { - Some(a) => a as u128, - None => return None, - }; - - // Is it negative? We'll have to negate later if so. - let is_negative = value.is_negative(); - - // Compute it in the exponent - let mut abs_bytes = [0u8; 32]; - abs_bytes[0..16].copy_from_slice(&abs.to_le_bytes()); - let mut value_balance = asset_type.value_commitment_generator() - * jubjub::Fr::from_bytes(&abs_bytes).unwrap(); - - // Negate if necessary - if is_negative { - value_balance = -value_balance; - } - - // Convert to unknown order point - Some(value_balance.into()) - } - - /// A context object for creating the Sapling components of a Zcash - /// transaction. - pub struct SaplingProvingContext { - bsk: jubjub::Fr, - // (sum of the Spend value commitments) - (sum of the Output value - // commitments) - cv_sum: jubjub::ExtendedPoint, - } - - /// An implementation of TxProver that does everything except generating - /// valid zero-knowledge proofs. Uses the supplied source of randomness to - /// carry out its operations. - pub struct MockTxProver(pub Mutex); - - impl TxProver for MockTxProver { - type SaplingProvingContext = SaplingProvingContext; - - fn new_sapling_proving_context(&self) -> Self::SaplingProvingContext { - SaplingProvingContext { - bsk: jubjub::Fr::zero(), - cv_sum: jubjub::ExtendedPoint::identity(), - } - } - - fn spend_proof( - &self, - ctx: &mut Self::SaplingProvingContext, - proof_generation_key: ProofGenerationKey, - _diversifier: Diversifier, - _rseed: Rseed, - ar: jubjub::Fr, - asset_type: AssetType, - value: u64, - _anchor: bls12_381::Scalar, - _merkle_path: MerklePath, - rcv: jubjub::Fr, - ) -> Result< - ([u8; GROTH_PROOF_SIZE], jubjub::ExtendedPoint, PublicKey), - (), - > { - // Accumulate the value commitment randomness in the context - { - let mut tmp = rcv; - tmp.add_assign(&ctx.bsk); - - // Update the context - ctx.bsk = tmp; - } - - // Construct the value commitment - let value_commitment = asset_type.value_commitment(value, rcv); - - // This is the result of the re-randomization, we compute it for the - // caller - let rk = PublicKey(proof_generation_key.ak.into()) - .randomize(ar, SPENDING_KEY_GENERATOR); - - // Compute value commitment - let value_commitment: jubjub::ExtendedPoint = - value_commitment.commitment().into(); - - // Accumulate the value commitment in the context - ctx.cv_sum += value_commitment; - - let mut zkproof = [0u8; GROTH_PROOF_SIZE]; - let proof = Proof:: { - a: G1Affine::generator(), - b: G2Affine::generator(), - c: G1Affine::generator(), +impl ShieldedContext { + /// Fetch the current state of the multi-asset shielded pool into a + /// ShieldedContext + #[allow(clippy::too_many_arguments)] + pub async fn fetch< + C: Client + Sync, + IO: Io + Send + Sync, + L: ProgressLogger + Sync, + >( + &mut self, + client: &C, + logger: &L, + start_query_height: Option, + last_query_height: Option, + _batch_size: u64, + sks: &[ExtendedSpendingKey], + fvks: &[ViewingKey], + ) -> Result<(), Error> { + // add new viewing keys + // Reload the state from file to get the last confirmed state and + // discard any speculative data, we cannot fetch on top of a + // speculative state + // Always reload the confirmed context or initialize a new one if not + // found + if self.load_confirmed().await.is_err() { + // Initialize a default context if we couldn't load a valid one + // from storage + *self = Self { + utils: std::mem::take(&mut self.utils), + ..Default::default() }; - proof - .write(&mut zkproof[..]) - .expect("should be able to serialize a proof"); - Ok((zkproof, value_commitment, rk)) } - fn output_proof( - &self, - ctx: &mut Self::SaplingProvingContext, - _esk: jubjub::Fr, - _payment_address: masp_primitives::sapling::PaymentAddress, - _rcm: jubjub::Fr, - asset_type: AssetType, - value: u64, - rcv: jubjub::Fr, - ) -> ([u8; GROTH_PROOF_SIZE], jubjub::ExtendedPoint) { - // Accumulate the value commitment randomness in the context - { - let mut tmp = rcv.neg(); // Outputs subtract from the total. - tmp.add_assign(&ctx.bsk); - - // Update the context - ctx.bsk = tmp; - } - - // Construct the value commitment for the proof instance - let value_commitment = asset_type.value_commitment(value, rcv); - - // Compute the actual value commitment - let value_commitment_point: jubjub::ExtendedPoint = - value_commitment.commitment().into(); - - // Accumulate the value commitment in the context. We do this to - // check internal consistency. - ctx.cv_sum -= value_commitment_point; // Outputs subtract from the total. - - let mut zkproof = [0u8; GROTH_PROOF_SIZE]; - let proof = Proof:: { - a: G1Affine::generator(), - b: G2Affine::generator(), - c: G1Affine::generator(), - }; - proof - .write(&mut zkproof[..]) - .expect("should be able to serialize a proof"); - - (zkproof, value_commitment_point) + for esk in sks { + let vk = to_viewing_key(esk).vk; + self.vk_heights.entry(vk).or_default(); } - - fn convert_proof( - &self, - ctx: &mut Self::SaplingProvingContext, - allowed_conversion: AllowedConversion, - value: u64, - _anchor: bls12_381::Scalar, - _merkle_path: MerklePath, - rcv: jubjub::Fr, - ) -> Result<([u8; GROTH_PROOF_SIZE], jubjub::ExtendedPoint), ()> - { - // Accumulate the value commitment randomness in the context - { - let mut tmp = rcv; - tmp.add_assign(&ctx.bsk); - - // Update the context - ctx.bsk = tmp; - } - - // Construct the value commitment - let value_commitment = - allowed_conversion.value_commitment(value, rcv); - - // Compute value commitment - let value_commitment: jubjub::ExtendedPoint = - value_commitment.commitment().into(); - - // Accumulate the value commitment in the context - ctx.cv_sum += value_commitment; - - let mut zkproof = [0u8; GROTH_PROOF_SIZE]; - let proof = Proof:: { - a: G1Affine::generator(), - b: G2Affine::generator(), - c: G1Affine::generator(), - }; - proof - .write(&mut zkproof[..]) - .expect("should be able to serialize a proof"); - - Ok((zkproof, value_commitment)) + for vk in fvks { + self.vk_heights.entry(*vk).or_default(); } - - fn binding_sig( - &self, - ctx: &mut Self::SaplingProvingContext, - assets_and_values: &I128Sum, - sighash: &[u8; 32], - ) -> Result { - // Initialize secure RNG - let mut rng = self.0.lock().unwrap(); - - // Grab the current `bsk` from the context - let bsk = PrivateKey(ctx.bsk); - - // Grab the `bvk` using DerivePublic. - let bvk = PublicKey::from_private( - &bsk, - VALUE_COMMITMENT_RANDOMNESS_GENERATOR, - ); - - // In order to check internal consistency, let's use the accumulated - // value commitments (as the verifier would) and apply - // value_balance to compare against our derived bvk. - { - let final_bvk = assets_and_values - .components() - .map(|(asset_type, value_balance)| { - // Compute value balance for each asset - // Error for bad value balances (-INT128_MAX value) - masp_compute_value_balance(*asset_type, *value_balance) + let _ = self.save().await; + let native_token = query_native_token(client).await?; + // the latest block height which has been added to the witness Merkle + // tree + let Some(least_idx) = self.vk_heights.values().min().cloned() else { + return Ok(()); + }; + let last_witnessed_tx = self.tx_note_map.keys().max().cloned(); + // get the bounds on the block heights to fetch + let start_idx = + std::cmp::min(last_witnessed_tx, least_idx).map(|ix| ix.height); + let start_idx = start_query_height.or(start_idx); + // Query for the last produced block height + let last_block_height = query_block(client) + .await? + .map(|b| b.height) + .unwrap_or_else(BlockHeight::first); + let last_query_height = last_query_height.unwrap_or(last_block_height); + let last_query_height = + std::cmp::min(last_query_height, last_block_height); + + let (task_scheduler, mut task_manager) = + TaskManager::::new(self.clone()); + + + std::thread::scope(|s| { + loop { + let (fetch_send, fetch_recv) = + fetch_channel::new(self.unscanned.clone(), last_query_height); + let decryption_handle = s.spawn(|| { + let txs = logger.scan(fetch_recv); + for (indexed_tx, (epoch, tx, stx)) in txs { + if Some(indexed_tx) > last_witnessed_tx { + task_scheduler + .update_witness_map(indexed_tx, &stx)?; + } + let mut vk_heights = task_scheduler.get_vk_heights(); + for (vk, h) in vk_heights + .iter_mut() + .filter(|(_vk, h)| **h < Some(indexed_tx)) + { + task_scheduler.scan_tx( + indexed_tx, + epoch, + &tx, + &stx, + vk, + native_token.clone(), + )?; + *h = Some(indexed_tx); + } + // possibly remove unneeded elements from the cache. + self.unscanned.scanned(&indexed_tx); + task_scheduler.set_vk_heights(vk_heights); + task_scheduler.save(indexed_tx.height); + } + task_scheduler.complete(); + Ok::<(), Error>(()) + }); + + _ = tokio::task::block_in_place(|| { + tokio::runtime::Handle::current().block_on(async { + tokio::join!( + task_manager.run(), + Self::fetch_shielded_transfers( + fetch_send, + client, + logger, + start_idx, + last_query_height, + ) + ) }) - .try_fold(ctx.cv_sum, |tmp, value_balance| { - // Compute cv_sum minus sum of all value balances - Result::<_, ()>::Ok(tmp - value_balance.ok_or(())?) - })?; - - // The result should be the same, unless the provided - // valueBalance is wrong. - if bvk.0 != final_bvk { - return Err(()); - } - } - - // Construct signature message - let mut data_to_be_signed = [0u8; 64]; - data_to_be_signed[0..32].copy_from_slice(&bvk.0.to_bytes()); - data_to_be_signed[32..64].copy_from_slice(&sighash[..]); - - // Sign - Ok(bsk.sign( - &data_to_be_signed, - &mut *rng, - VALUE_COMMITMENT_RANDOMNESS_GENERATOR, - )) - } - } - - #[derive(Debug, Clone)] - /// Adapts a CSPRNG from a PRNG for proptesting - pub struct TestCsprng(R); - - impl CryptoRng for TestCsprng {} - - impl RngCore for TestCsprng { - fn next_u32(&mut self) -> u32 { - self.0.next_u32() - } - - fn next_u64(&mut self) -> u64 { - self.0.next_u64() - } - - fn fill_bytes(&mut self, dest: &mut [u8]) { - self.0.fill_bytes(dest) - } - - fn try_fill_bytes( - &mut self, - dest: &mut [u8], - ) -> Result<(), rand::Error> { - self.0.try_fill_bytes(dest) - } - } - - prop_compose! { - /// Expose a random number generator - pub fn arb_rng()(rng in Just(()).prop_perturb(|(), rng| rng)) -> TestRng { - rng - } - } - - prop_compose! { - /// Generate an arbitrary output description with the given value - pub fn arb_output_description( - asset_type: AssetType, - value: u64, - )( - mut rng in arb_rng().prop_map(TestCsprng), - ) -> (Option, masp_primitives::sapling::PaymentAddress, AssetType, u64, MemoBytes) { - let mut spending_key_seed = [0; 32]; - rng.fill_bytes(&mut spending_key_seed); - let spending_key = masp_primitives::zip32::ExtendedSpendingKey::master(spending_key_seed.as_ref()); - - let viewing_key = ExtendedFullViewingKey::from(&spending_key).fvk.vk; - let (div, _g_d) = find_valid_diversifier(&mut rng); - let payment_addr = viewing_key - .to_payment_address(div) - .expect("a PaymentAddress"); - - (None, payment_addr, asset_type, value, MemoBytes::empty()) - } - } - - prop_compose! { - /// Generate an arbitrary spend description with the given value - pub fn arb_spend_description( - asset_type: AssetType, - value: u64, - )( - address in arb_transparent_address(), - expiration_height in arb_height(BranchId::MASP, &Network), - mut rng in arb_rng().prop_map(TestCsprng), - bparams_rng in arb_rng().prop_map(TestCsprng), - prover_rng in arb_rng().prop_map(TestCsprng), - ) -> (ExtendedSpendingKey, Diversifier, Note, Node) { - let mut spending_key_seed = [0; 32]; - rng.fill_bytes(&mut spending_key_seed); - let spending_key = masp_primitives::zip32::ExtendedSpendingKey::master(spending_key_seed.as_ref()); - - let viewing_key = ExtendedFullViewingKey::from(&spending_key).fvk.vk; - let (div, _g_d) = find_valid_diversifier(&mut rng); - let payment_addr = viewing_key - .to_payment_address(div) - .expect("a PaymentAddress"); - - let mut builder = Builder::::new( - NETWORK, - // NOTE: this is going to add 20 more blocks to the actual - // expiration but there's no other exposed function that we could - // use from the masp crate to specify the expiration better - expiration_height.unwrap(), - ); - // Add a transparent input to support our desired shielded output - builder.add_transparent_input(TxOut { asset_type, value, address }).unwrap(); - // Finally add the shielded output that we need - builder.add_sapling_output(None, payment_addr, asset_type, value, MemoBytes::empty()).unwrap(); - // Build a transaction in order to get its shielded outputs - let (transaction, metadata) = builder.build( - &MockTxProver(Mutex::new(prover_rng)), - &FeeRule::non_standard(U64Sum::zero()), - &mut rng, - &mut RngBuildParams::new(bparams_rng), - ).unwrap(); - // Extract the shielded output from the transaction - let shielded_output = &transaction - .sapling_bundle() - .unwrap() - .shielded_outputs[metadata.output_index(0).unwrap()]; - - // Let's now decrypt the constructed notes - let (note, pa, _memo) = try_sapling_note_decryption::<_, OutputDescription<<::SaplingAuth as masp_primitives::transaction::components::sapling::Authorization>::Proof>>( - &NETWORK, - 1.into(), - &PreparedIncomingViewingKey::new(&viewing_key.ivk()), - shielded_output, - ).unwrap(); - assert_eq!(payment_addr, pa); - // Make a path to out new note - let node = Node::new(shielded_output.cmu.to_repr()); - (spending_key, div, note, node) - } - } - - prop_compose! { - /// Generate an arbitrary MASP denomination - pub fn arb_masp_digit_pos()(denom in 0..4u8) -> MaspDigitPos { - MaspDigitPos::from(denom) - } - } - - // Maximum value for a note partition - const MAX_MONEY: u64 = 100; - // Maximum number of partitions for a note - const MAX_SPLITS: usize = 3; - - prop_compose! { - /// Arbitrarily partition the given vector of integers into sets and sum - /// them - pub fn arb_partition(values: Vec)(buckets in ((!values.is_empty()) as usize)..=values.len())( - values in Just(values.clone()), - assigns in collection::vec(0..buckets, values.len()), - buckets in Just(buckets), - ) -> Vec { - let mut buckets = vec![0; buckets]; - for (bucket, value) in assigns.iter().zip(values) { - buckets[*bucket] += value; - } - buckets - } - } - - prop_compose! { - /// Generate arbitrary spend descriptions with the given asset type - /// partitioning the given values - pub fn arb_spend_descriptions( - asset: AssetData, - values: Vec, - )(partition in arb_partition(values))( - spend_description in partition - .iter() - .map(|value| arb_spend_description( - encode_asset_type( - asset.token.clone(), - asset.denom, - asset.position, - asset.epoch, - ).unwrap(), - *value, - )).collect::>() - ) -> Vec<(ExtendedSpendingKey, Diversifier, Note, Node)> { - spend_description - } - } - - prop_compose! { - /// Generate arbitrary output descriptions with the given asset type - /// partitioning the given values - pub fn arb_output_descriptions( - asset: AssetData, - values: Vec, - )(partition in arb_partition(values))( - output_description in partition - .iter() - .map(|value| arb_output_description( - encode_asset_type( - asset.token.clone(), - asset.denom, - asset.position, - asset.epoch, - ).unwrap(), - *value, - )).collect::>() - ) -> Vec<(Option, masp_primitives::sapling::PaymentAddress, AssetType, u64, MemoBytes)> { - output_description - } - } - - prop_compose! { - /// Generate arbitrary spend descriptions with the given asset type - /// partitioning the given values - pub fn arb_txouts( - asset: AssetData, - values: Vec, - address: TransparentAddress, - )( - partition in arb_partition(values), - ) -> Vec { - partition - .iter() - .map(|value| TxOut { - asset_type: encode_asset_type( - asset.token.clone(), - asset.denom, - asset.position, - asset.epoch, - ).unwrap(), - value: *value, - address, - }).collect::>() - } - } - - prop_compose! { - /// Generate an arbitrary shielded MASP transaction builder - pub fn arb_shielded_builder(asset_range: impl Into)( - assets in collection::hash_map( - arb_pre_asset_type(), - collection::vec(..MAX_MONEY, ..MAX_SPLITS), - asset_range, - ), - )( - expiration_height in arb_height(BranchId::MASP, &Network), - spend_descriptions in assets - .iter() - .map(|(asset, values)| arb_spend_descriptions(asset.clone(), values.clone())) - .collect::>(), - output_descriptions in assets - .iter() - .map(|(asset, values)| arb_output_descriptions(asset.clone(), values.clone())) - .collect::>(), - assets in Just(assets), - ) -> ( - Builder::, - HashMap, - ) { - let mut builder = Builder::::new( - NETWORK, - // NOTE: this is going to add 20 more blocks to the actual - // expiration but there's no other exposed function that we could - // use from the masp crate to specify the expiration better - expiration_height.unwrap(), - ); - let mut leaves = Vec::new(); - // First construct a Merkle tree containing all notes to be used - for (_esk, _div, _note, node) in spend_descriptions.iter().flatten() { - leaves.push(*node); - } - let tree = FrozenCommitmentTree::new(&leaves); - // Then use the notes knowing that they all have the same anchor - for (idx, (esk, div, note, _node)) in spend_descriptions.iter().flatten().enumerate() { - builder.add_sapling_spend(*esk, *div, *note, tree.path(idx)).unwrap(); - } - for (ovk, payment_addr, asset_type, value, memo) in output_descriptions.into_iter().flatten() { - builder.add_sapling_output(ovk, payment_addr, asset_type, value, memo).unwrap(); - } - (builder, assets.into_iter().map(|(k, v)| (k, v.iter().sum())).collect()) - } - } - - prop_compose! { - /// Generate an arbitrary pre-asset type - pub fn arb_pre_asset_type()( - token in arb_address(), - denom in arb_denomination(), - position in arb_masp_digit_pos(), - epoch in option::of(arb_epoch()), - ) -> AssetData { - AssetData { - token, - denom, - position, - epoch, - } - } - } - - prop_compose! { - /// Generate an arbitrary shielding MASP transaction builder - pub fn arb_shielding_builder( - source: TransparentAddress, - asset_range: impl Into, - )( - assets in collection::hash_map( - arb_pre_asset_type(), - collection::vec(..MAX_MONEY, ..MAX_SPLITS), - asset_range, - ), - )( - expiration_height in arb_height(BranchId::MASP, &Network), - txins in assets - .iter() - .map(|(asset, values)| arb_txouts(asset.clone(), values.clone(), source)) - .collect::>(), - output_descriptions in assets - .iter() - .map(|(asset, values)| arb_output_descriptions(asset.clone(), values.clone())) - .collect::>(), - assets in Just(assets), - ) -> ( - Builder::, - HashMap, - ) { - let mut builder = Builder::::new( - NETWORK, - // NOTE: this is going to add 20 more blocks to the actual - // expiration but there's no other exposed function that we could - // use from the masp crate to specify the expiration better - expiration_height.unwrap(), - ); - for txin in txins.into_iter().flatten() { - builder.add_transparent_input(txin).unwrap(); - } - for (ovk, payment_addr, asset_type, value, memo) in output_descriptions.into_iter().flatten() { - builder.add_sapling_output(ovk, payment_addr, asset_type, value, memo).unwrap(); - } - (builder, assets.into_iter().map(|(k, v)| (k, v.iter().sum())).collect()) - } - } - - prop_compose! { - /// Generate an arbitrary deshielding MASP transaction builder - pub fn arb_deshielding_builder( - target: TransparentAddress, - asset_range: impl Into, - )( - assets in collection::hash_map( - arb_pre_asset_type(), - collection::vec(..MAX_MONEY, ..MAX_SPLITS), - asset_range, - ), - )( - expiration_height in arb_height(BranchId::MASP, &Network), - spend_descriptions in assets - .iter() - .map(|(asset, values)| arb_spend_descriptions(asset.clone(), values.clone())) - .collect::>(), - txouts in assets - .iter() - .map(|(asset, values)| arb_txouts(asset.clone(), values.clone(), target)) - .collect::>(), - assets in Just(assets), - ) -> ( - Builder::, - HashMap, - ) { - let mut builder = Builder::::new( - NETWORK, - // NOTE: this is going to add 20 more blocks to the actual - // expiration but there's no other exposed function that we could - // use from the masp crate to specify the expiration better - expiration_height.unwrap(), - ); - let mut leaves = Vec::new(); - // First construct a Merkle tree containing all notes to be used - for (_esk, _div, _note, node) in spend_descriptions.iter().flatten() { - leaves.push(*node); - } - let tree = FrozenCommitmentTree::new(&leaves); - // Then use the notes knowing that they all have the same anchor - for (idx, (esk, div, note, _node)) in spend_descriptions.into_iter().flatten().enumerate() { - builder.add_sapling_spend(esk, div, note, tree.path(idx)).unwrap(); - } - for txout in txouts.into_iter().flatten() { - builder.add_transparent_output(&txout.address, txout.asset_type, txout.value).unwrap(); - } - (builder, assets.into_iter().map(|(k, v)| (k, v.iter().sum())).collect()) - } - } - - prop_compose! { - /// Generate an arbitrary MASP shielded transfer - pub fn arb_shielded_transfer( - asset_range: impl Into, - )(asset_range in Just(asset_range.into()))( - (builder, asset_types) in arb_shielded_builder(asset_range), - epoch in arb_epoch(), - prover_rng in arb_rng().prop_map(TestCsprng), - mut rng in arb_rng().prop_map(TestCsprng), - bparams_rng in arb_rng().prop_map(TestCsprng), - ) -> (ShieldedTransfer, HashMap, StoredBuildParams) { - let mut rng_build_params = RngBuildParams::new(bparams_rng); - let (masp_tx, metadata) = builder.clone().build( - &MockTxProver(Mutex::new(prover_rng)), - &FeeRule::non_standard(U64Sum::zero()), - &mut rng, - &mut rng_build_params, - ).unwrap(); - (ShieldedTransfer { - builder: builder.map_builder(WalletMap), - metadata, - masp_tx, - epoch, - }, asset_types, rng_build_params.to_stored().unwrap()) - } - } - - prop_compose! { - /// Generate an arbitrary MASP shielded transfer - pub fn arb_shielding_transfer( - source: TransparentAddress, - asset_range: impl Into, - )(asset_range in Just(asset_range.into()))( - (builder, asset_types) in arb_shielding_builder( - source, - asset_range, - ), - epoch in arb_epoch(), - prover_rng in arb_rng().prop_map(TestCsprng), - mut rng in arb_rng().prop_map(TestCsprng), - bparams_rng in arb_rng().prop_map(TestCsprng), - ) -> (ShieldedTransfer, HashMap, StoredBuildParams) { - let mut rng_build_params = RngBuildParams::new(bparams_rng); - let (masp_tx, metadata) = builder.clone().build( - &MockTxProver(Mutex::new(prover_rng)), - &FeeRule::non_standard(U64Sum::zero()), - &mut rng, - &mut rng_build_params, - ).unwrap(); - (ShieldedTransfer { - builder: builder.map_builder(WalletMap), - metadata, - masp_tx, - epoch, - }, asset_types, rng_build_params.to_stored().unwrap()) - } - } - - prop_compose! { - /// Generate an arbitrary MASP shielded transfer - pub fn arb_deshielding_transfer( - target: TransparentAddress, - asset_range: impl Into, - )(asset_range in Just(asset_range.into()))( - (builder, asset_types) in arb_deshielding_builder( - target, - asset_range, - ), - epoch in arb_epoch(), - prover_rng in arb_rng().prop_map(TestCsprng), - mut rng in arb_rng().prop_map(TestCsprng), - bparams_rng in arb_rng().prop_map(TestCsprng), - ) -> (ShieldedTransfer, HashMap, StoredBuildParams) { - let mut rng_build_params = RngBuildParams::new(bparams_rng); - let (masp_tx, metadata) = builder.clone().build( - &MockTxProver(Mutex::new(prover_rng)), - &FeeRule::non_standard(U64Sum::zero()), - &mut rng, - &mut rng_build_params, - ).unwrap(); - (ShieldedTransfer { - builder: builder.map_builder(WalletMap), - metadata, - masp_tx, - epoch, - }, asset_types, rng_build_params.to_stored().unwrap()) - } - } -} - -#[cfg(feature = "std")] -/// Implementation of MASP functionality depending on a standard filesystem -pub mod fs { - use std::fs::{File, OpenOptions}; - use std::io::{Read, Write}; - - use super::*; - - /// Shielded context file name - const FILE_NAME: &str = "shielded.dat"; - const TMP_FILE_NAME: &str = "shielded.tmp"; - const SPECULATIVE_FILE_NAME: &str = "speculative_shielded.dat"; - const SPECULATIVE_TMP_FILE_NAME: &str = "speculative_shielded.tmp"; - - #[derive(Debug, BorshSerialize, BorshDeserialize, Clone)] - /// An implementation of ShieldedUtils for standard filesystems - pub struct FsShieldedUtils { - #[borsh(skip)] - context_dir: PathBuf, - } - - impl FsShieldedUtils { - /// Initialize a shielded transaction context that identifies notes - /// decryptable by any viewing key in the given set - pub fn new(context_dir: PathBuf) -> ShieldedContext { - // Make sure that MASP parameters are downloaded to enable MASP - // transaction building and verification later on - let params_dir = get_params_dir(); - let spend_path = params_dir.join(SPEND_NAME); - let convert_path = params_dir.join(CONVERT_NAME); - let output_path = params_dir.join(OUTPUT_NAME); - if !(spend_path.exists() - && convert_path.exists() - && output_path.exists()) - { - #[allow(clippy::print_stdout)] - { - println!("MASP parameters not present, downloading..."); - } - masp_proofs::download_masp_parameters(None) - .expect("MASP parameters not present or downloadable"); - #[allow(clippy::print_stdout)] - { - println!( - "MASP parameter download complete, resuming \ - execution..." - ); - } - } - // Finally initialize a shielded context with the supplied directory - - let sync_status = - if std::fs::read(context_dir.join(SPECULATIVE_FILE_NAME)) - .is_ok() - { - // Load speculative state - ContextSyncStatus::Speculative - } else { - ContextSyncStatus::Confirmed - }; - - let utils = Self { context_dir }; - ShieldedContext { - utils, - sync_status, - ..Default::default() - } - } - } - - impl Default for FsShieldedUtils { - fn default() -> Self { - Self { - context_dir: PathBuf::from(FILE_NAME), - } - } - } - - #[cfg_attr(feature = "async-send", async_trait::async_trait)] - #[cfg_attr(not(feature = "async-send"), async_trait::async_trait(?Send))] - impl ShieldedUtils for FsShieldedUtils { - fn local_tx_prover(&self) -> LocalTxProver { - if let Ok(params_dir) = env::var(ENV_VAR_MASP_PARAMS_DIR) { - let params_dir = PathBuf::from(params_dir); - let spend_path = params_dir.join(SPEND_NAME); - let convert_path = params_dir.join(CONVERT_NAME); - let output_path = params_dir.join(OUTPUT_NAME); - LocalTxProver::new(&spend_path, &output_path, &convert_path) - } else { - LocalTxProver::with_default_location() - .expect("unable to load MASP Parameters") - } - } + }); + decryption_handle.join().unwrap()?; - /// Try to load the last saved shielded context from the given context - /// directory. If this fails, then leave the current context unchanged. - async fn load( - &self, - ctx: &mut ShieldedContext, - force_confirmed: bool, - ) -> std::io::Result<()> { - // Try to load shielded context from file - let file_name = if force_confirmed { - FILE_NAME - } else { - match ctx.sync_status { - ContextSyncStatus::Confirmed => FILE_NAME, - ContextSyncStatus::Speculative => SPECULATIVE_FILE_NAME, + // if fetching failed for before completing, we restart + // the fetch process. Otherwise, we can break the loop. + if logger.left_to_fetch() == 0 { + break Ok(()); } - }; - let mut ctx_file = File::open(self.context_dir.join(file_name))?; - let mut bytes = Vec::new(); - ctx_file.read_to_end(&mut bytes)?; - // Fill the supplied context with the deserialized object - *ctx = ShieldedContext { - utils: ctx.utils.clone(), - ..ShieldedContext::::deserialize(&mut &bytes[..])? - }; - Ok(()) - } - - /// Save this confirmed shielded context into its associated context - /// directory. At the same time, delete the speculative file if present - async fn save( - &self, - ctx: &ShieldedContext, - ) -> std::io::Result<()> { - // TODO: use mktemp crate? - let (tmp_file_name, file_name) = match ctx.sync_status { - ContextSyncStatus::Confirmed => (TMP_FILE_NAME, FILE_NAME), - ContextSyncStatus::Speculative => { - (SPECULATIVE_TMP_FILE_NAME, SPECULATIVE_FILE_NAME) - } - }; - let tmp_path = self.context_dir.join(tmp_file_name); - { - // First serialize the shielded context into a temporary file. - // Inability to create this file implies a simultaneuous write - // is in progress. In this case, immediately - // fail. This is unproblematic because the data - // intended to be stored can always be re-fetched - // from the blockchain. - let mut ctx_file = OpenOptions::new() - .write(true) - .create_new(true) - .open(tmp_path.clone())?; - let mut bytes = Vec::new(); - ctx.serialize(&mut bytes) - .expect("cannot serialize shielded context"); - ctx_file.write_all(&bytes[..])?; - } - // Atomically update the old shielded context file with new data. - // Atomicity is required to prevent other client instances from - // reading corrupt data. - std::fs::rename(tmp_path, self.context_dir.join(file_name))?; - - // Remove the speculative file if present since it's state is - // overruled by the confirmed one we just saved - if let ContextSyncStatus::Confirmed = ctx.sync_status { - let _ = std::fs::remove_file( - self.context_dir.join(SPECULATIVE_FILE_NAME), - ); } - - Ok(()) - } - } -} - -/// A enum to indicate how to log sync progress depending on -/// whether sync is currently fetch or scanning blocks. -#[derive(Debug, Copy, Clone)] -pub enum ProgressType { - /// Fetch - Fetch, - /// Scan - Scan, -} - -#[allow(missing_docs)] -pub trait ProgressLogger { - type Fetch: Iterator; - type Scan: Iterator; - - fn io(&self) -> &IO; - - fn fetch(&self, items: I) -> Self::Fetch - where - I: IntoIterator; - - fn scan(&self, items: I) -> Self::Scan - where - I: IntoIterator; -} - -/// The default type for logging sync progress. -#[derive(Debug, Clone)] -pub struct DefaultLogger<'io, IO: Io> { - io: &'io IO, -} - -impl<'io, IO: Io> DefaultLogger<'io, IO> { - /// Initialize default logger - pub fn new(io: &'io IO) -> Self { - Self { io } - } -} - -impl<'io, IO: Io> ProgressLogger for DefaultLogger<'io, IO> { - type Fetch = as IntoIterator>::IntoIter; - type Scan = as IntoIterator>::IntoIter; - - fn io(&self) -> &IO { - self.io - } - - fn fetch(&self, items: I) -> Self::Fetch - where - I: IntoIterator, - { - let items: Vec<_> = items.into_iter().collect(); - items.into_iter() - } - - fn scan(&self, items: I) -> Self::Scan - where - I: IntoIterator, - { - let items: Vec<_> = items.into_iter().collect(); - items.into_iter() + }) } } diff --git a/crates/sdk/src/masp/types.rs b/crates/sdk/src/masp/types.rs new file mode 100644 index 0000000000..50fdfe31f3 --- /dev/null +++ b/crates/sdk/src/masp/types.rs @@ -0,0 +1,262 @@ +use std::collections::{BTreeMap, BTreeSet, HashMap}; +use std::io::{Read, Write}; +use std::sync::{Arc, Mutex}; + +use bls12_381::Bls12; +use borsh_ext::BorshSerializeExt; +use masp_primitives::asset_type::AssetType; +use masp_primitives::convert::AllowedConversion; +use masp_primitives::merkle_tree::MerklePath; +use masp_primitives::sapling::{Node, ViewingKey}; +use masp_primitives::transaction::builder::{Builder, MapBuilder}; +use masp_primitives::transaction::components::sapling::builder::SaplingMetadata; +use masp_primitives::transaction::components::{I128Sum, ValueSum}; +use masp_primitives::transaction::{ + builder, Authorization, Authorized, Transaction, Unauthorized, +}; +use masp_primitives::zip32::{ExtendedFullViewingKey, ExtendedSpendingKey}; +use masp_proofs::bellman::groth16::PreparedVerifyingKey; +use namada_core::address::Address; +use namada_core::borsh::{BorshDeserialize, BorshSerialize}; +use namada_core::dec::Dec; +use namada_core::storage::{BlockHeight, Epoch, IndexedTx}; +use namada_core::uint::Uint; +use namada_macros::BorshDeserializer; +#[cfg(feature = "migrations")] +use namada_migrations::*; +use namada_token as token; +use thiserror::Error; + +use crate::error::Error; + +/// Type alias for convenience and profit +pub type IndexedNoteData = BTreeMap< + IndexedTx, + (Epoch, BTreeSet, Transaction), +>; + +/// Type alias for the entries of [`IndexedNoteData`] iterators +pub type IndexedNoteEntry = ( + IndexedTx, + (Epoch, BTreeSet, Transaction), +); + +/// Represents the amount used of different conversions +pub type Conversions = + BTreeMap, i128)>; + +/// Represents the changes that were made to a list of transparent accounts +pub type TransferDelta = HashMap; + +/// a masp amount +pub type MaspAmount = ValueSum<(Option, Address), token::Change>; + +/// Represents the changes that were made to a list of shielded accounts +pub type TransactionDelta = HashMap; + +/// A return type for gen_shielded_transfer +#[derive(Error, Debug)] +pub enum TransferErr { + /// Build error for masp errors + #[error("{0}")] + Build(#[from] builder::Error), + /// errors + #[error("{0}")] + General(#[from] Error), +} + +/// Represents an authorization where the Sapling bundle is authorized and the +/// transparent bundle is unauthorized. +pub struct PartialAuthorized; + +impl Authorization for PartialAuthorized { + type SaplingAuth = ::SaplingAuth; + type TransparentAuth = ::TransparentAuth; +} + +/// Shielded transfer +#[derive(Clone, Debug, BorshSerialize, BorshDeserialize, BorshDeserializer)] +pub struct ShieldedTransfer { + /// Shielded transfer builder + pub builder: Builder<(), (), ExtendedFullViewingKey, ()>, + /// MASP transaction + pub masp_tx: Transaction, + /// Metadata + pub metadata: SaplingMetadata, + /// Epoch in which the transaction was created + pub epoch: Epoch, +} + +/// Shielded pool data for a token +#[derive(Debug, BorshSerialize, BorshDeserialize, BorshDeserializer)] +pub struct MaspTokenRewardData { + pub name: String, + pub address: Address, + pub max_reward_rate: Dec, + pub kp_gain: Dec, + pub kd_gain: Dec, + pub locked_amount_target: Uint, +} + +#[derive(Debug, Clone)] +struct ExtractedMaspTx { + fee_unshielding: Option<(BTreeSet, Transaction)>, + inner_tx: Option<(BTreeSet, Transaction)>, +} + +/// MASP verifying keys +pub struct PVKs { + /// spend verifying key + pub spend_vk: PreparedVerifyingKey, + /// convert verifying key + pub convert_vk: PreparedVerifyingKey, + /// output verifying key + pub output_vk: PreparedVerifyingKey, +} + +#[derive(BorshSerialize, BorshDeserialize, Debug, Copy, Clone)] +/// The possible sync states of the shielded context +pub enum ContextSyncStatus { + /// The context contains only data that has been confirmed by the protocol + Confirmed, + /// The context contains that that has not yet been confirmed by the + /// protocol and could end up being invalid + Speculative, +} + +/// a masp change +#[derive(BorshSerialize, BorshDeserialize, BorshDeserializer, Debug, Clone)] +pub struct MaspChange { + /// the token address + pub asset: Address, + /// the change in the token + pub change: token::Change, +} + +/// A cache of fetched indexed transactions. +/// +/// The cache is designed so that it either contains +/// all transactions from a given height, or none. +#[derive(Debug, Default, Clone)] +pub struct Unscanned { + txs: Arc>, +} + +impl BorshSerialize for Unscanned { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + let locked = self.txs.lock().unwrap(); + let bytes = locked.serialize_to_vec(); + writer.write(&bytes).map(|_| ()) + } +} + +impl BorshDeserialize for Unscanned { + fn deserialize_reader(reader: &mut R) -> std::io::Result { + let unscanned = IndexedNoteData::deserialize_reader(reader)?; + Ok(Self { + txs: Arc::new(Mutex::new(unscanned)), + }) + } +} + +impl Unscanned { + pub fn extend(&self, items: I) + where + I: IntoIterator, + { + let mut locked = self.txs.lock().unwrap(); + locked.extend(items); + } + + pub fn insert(&self, (k, v): IndexedNoteEntry) { + let mut locked = self.txs.lock().unwrap(); + locked.insert(k, v); + } + + pub fn contains_height(&self, height: u64) -> bool { + let locked = self.txs.lock().unwrap(); + locked.keys().any(|k| k.height.0 == height) + } + + /// We remove all indices from blocks that have been entirely scanned. + /// If a block is only partially scanned, we leave all the events in the + /// cache. + pub fn scanned(&self, ix: &IndexedTx) { + let mut locked = self.txs.lock().unwrap(); + locked.retain(|i, _| i.height >= ix.height); + } + + /// Gets the latest block height present in the cache + pub fn latest_height(&self) -> BlockHeight { + let txs = self.txs.lock().unwrap(); + txs.keys() + .max_by_key(|ix| ix.height) + .map(|ix| ix.height) + .unwrap_or_default() + } + + /// Gets the first block height present in the cache + pub fn first_height(&self) -> BlockHeight { + let txs = self.txs.lock().unwrap(); + txs.keys() + .min_by_key(|ix| ix.height) + .map(|ix| ix.height) + .unwrap_or_default() + } + + pub fn pop_first(&self) -> Option { + let mut locked = self.txs.lock().unwrap(); + locked.pop_first() + } +} + +impl IntoIterator for Unscanned { + type IntoIter = ::IntoIter; + type Item = IndexedNoteEntry; + + fn into_iter(self) -> Self::IntoIter { + let txs = { + let mut txs: IndexedNoteData = Default::default(); + let mut locked = self.txs.lock().unwrap(); + std::mem::swap(&mut txs, &mut locked); + txs + }; + txs.into_iter() + } +} + +/// Freeze a Builder into the format necessary for inclusion in a Tx. This is +/// the format used by hardware wallets to validate a MASP Transaction. +pub(super) struct WalletMap; + +impl + masp_primitives::transaction::components::sapling::builder::MapBuilder< + P1, + ExtendedSpendingKey, + (), + ExtendedFullViewingKey, + > for WalletMap +{ + fn map_params(&self, _s: P1) {} + + fn map_key(&self, s: ExtendedSpendingKey) -> ExtendedFullViewingKey { + (&s).into() + } +} + +impl + MapBuilder< + P1, + R1, + ExtendedSpendingKey, + N1, + (), + (), + ExtendedFullViewingKey, + (), + > for WalletMap +{ + fn map_rng(&self, _s: R1) {} + + fn map_notifier(&self, _s: N1) {} +} diff --git a/crates/sdk/src/masp/utils.rs b/crates/sdk/src/masp/utils.rs new file mode 100644 index 0000000000..d38d4f083c --- /dev/null +++ b/crates/sdk/src/masp/utils.rs @@ -0,0 +1,653 @@ +use core::str::FromStr; +use std::collections::{BTreeMap, BTreeSet}; +use std::env; +use std::path::PathBuf; +use std::sync::Arc; + +use borsh::{BorshDeserialize, BorshSerialize}; +use masp_primitives::sapling::keys::FullViewingKey; +use masp_primitives::sapling::{Diversifier, ViewingKey}; +use masp_primitives::transaction::components::I128Sum; +use masp_primitives::transaction::Transaction; +use masp_primitives::zip32::{ExtendedFullViewingKey, ExtendedSpendingKey}; +use masp_proofs::prover::LocalTxProver; +use namada_core::address::Address; +use namada_core::storage::{BlockHeight, Epoch, IndexedTx, Key, TxIndex}; +use namada_core::token::Transfer; +use namada_ibc::IbcMessage; +use namada_tx::data::{TxResult, WrapperTx}; +use namada_tx::Tx; +use rand_core::{CryptoRng, RngCore}; +use tokio::sync::mpsc::{Receiver, Sender}; + +use crate::error::{Error, QueryError}; +use crate::io::Io; +use crate::masp::shielded_ctx::ShieldedContext; +use crate::masp::types::{IndexedNoteEntry, PVKs, Unscanned}; +use crate::masp::{ENV_VAR_MASP_PARAMS_DIR, VERIFIYING_KEYS}; +use crate::queries::Client; +use crate::{MaybeSend, MaybeSync}; + +/// Make sure the MASP params are present and load verifying keys into memory +pub fn preload_verifying_keys() -> &'static PVKs { + &VERIFIYING_KEYS +} + +pub(super) fn load_pvks() -> &'static PVKs { + &VERIFIYING_KEYS +} + +/// Get the path to MASP parameters from [`ENV_VAR_MASP_PARAMS_DIR`] env var or +/// use the default. +pub fn get_params_dir() -> PathBuf { + if let Ok(params_dir) = env::var(ENV_VAR_MASP_PARAMS_DIR) { + println!("Using {} as masp parameter folder.", params_dir); + PathBuf::from(params_dir) + } else { + masp_proofs::default_params_folder().unwrap() + } +} + +/// Abstracts platform specific details away from the logic of shielded pool +/// operations. +#[cfg_attr(feature = "async-send", async_trait::async_trait)] +#[cfg_attr(not(feature = "async-send"), async_trait::async_trait(?Send))] +pub trait ShieldedUtils: + Sized + BorshDeserialize + BorshSerialize + Default + Clone +{ + /// Get a MASP transaction prover + fn local_tx_prover(&self) -> LocalTxProver; + + /// Load up the currently saved ShieldedContext + async fn load( + &self, + ctx: &mut ShieldedContext, + force_confirmed: bool, + ) -> std::io::Result<()>; + + /// Save the given ShieldedContext for future loads + async fn save( + &self, + ctx: &ShieldedContext, + ) -> std::io::Result<()>; +} + +/// Make a ViewingKey that can view notes encrypted by given ExtendedSpendingKey +pub fn to_viewing_key(esk: &ExtendedSpendingKey) -> FullViewingKey { + ExtendedFullViewingKey::from(esk).fvk +} + +/// Generate a valid diversifier, i.e. one that has a diversified base. Return +/// also this diversified base. +pub fn find_valid_diversifier( + rng: &mut R, +) -> (Diversifier, masp_primitives::jubjub::SubgroupPoint) { + let mut diversifier; + let g_d; + // Keep generating random diversifiers until one has a diversified base + loop { + let mut d = [0; 11]; + rng.fill_bytes(&mut d); + diversifier = Diversifier(d); + if let Some(val) = diversifier.g_d() { + g_d = val; + break; + } + } + (diversifier, g_d) +} + +/// Determine if using the current note would actually bring us closer to our +/// target +pub fn is_amount_required(src: I128Sum, dest: I128Sum, delta: I128Sum) -> bool { + let gap = dest - src; + for (asset_type, value) in gap.components() { + if *value > 0 && delta[asset_type] > 0 { + return true; + } + } + false +} + +/// An extension of Option's cloned method for pair types +pub(super) fn cloned_pair((a, b): (&T, &U)) -> (T, U) { + (a.clone(), b.clone()) +} + +/// Extract the payload from the given Tx object +pub(super) fn extract_payload( + tx: Tx, + wrapper: &mut Option, + transfer: &mut Option, +) -> Result<(), Error> { + *wrapper = tx.header.wrapper(); + let _ = tx.data().map(|signed| { + Transfer::try_from_slice(&signed[..]).map(|tfer| *transfer = Some(tfer)) + }); + Ok(()) +} + +// Retrieves all the indexes and tx events at the specified height which refer +// to a valid masp transaction. If an index is given, it filters only the +// transactions with an index equal or greater to the provided one. +pub(super) async fn get_indexed_masp_events_at_height( + client: &C, + height: BlockHeight, + first_idx_to_query: Option, +) -> Result>, Error> { + let first_idx_to_query = first_idx_to_query.unwrap_or_default(); + + Ok(client + .block_results(height.0 as u32) + .await + .map_err(|e| Error::from(QueryError::General(e.to_string())))? + .end_block_events + .map(|events| { + events + .into_iter() + .filter_map(|event| { + let tx_index = + event.attributes.iter().find_map(|attribute| { + if attribute.key == "is_valid_masp_tx" { + Some(TxIndex( + u32::from_str(&attribute.value).unwrap(), + )) + } else { + None + } + }); + + match tx_index { + Some(idx) => { + if idx >= first_idx_to_query { + Some((idx, event)) + } else { + None + } + } + None => None, + } + }) + .collect::>() + })) +} + +pub(super) enum ExtractShieldedActionArg<'args, C: Client> { + Event(&'args crate::tendermint::abci::Event), + Request((&'args C, BlockHeight, Option)), +} + +/// Extract the relevant shield portions of a [`Tx`], if any. +pub(super) async fn extract_masp_tx<'args, C: Client + Sync>( + tx: &Tx, + action_arg: ExtractShieldedActionArg<'args, C>, + check_header: bool, +) -> Result { + // We use the changed keys instead of the Transfer object + // because those are what the masp validity predicate works on + let (wrapper_changed_keys, changed_keys) = + if let ExtractShieldedActionArg::Event(tx_event) = action_arg { + let tx_result_str = tx_event + .attributes + .iter() + .find_map(|attr| { + if attr.key == "inner_tx" { + Some(&attr.value) + } else { + None + } + }) + .ok_or_else(|| { + Error::Other( + "Missing required tx result in event".to_string(), + ) + })?; + let result = TxResult::from_str(tx_result_str) + .map_err(|e| Error::Other(e.to_string()))?; + (result.wrapper_changed_keys, result.changed_keys) + } else { + (Default::default(), Default::default()) + }; + + let tx_header = tx.header(); + // NOTE: simply looking for masp sections attached to the tx + // is not safe. We don't validate the sections attached to a + // transaction se we could end up with transactions carrying + // an unnecessary masp section. We must instead look for the + // required masp sections in the signed commitments (hashes) + // of the transactions' headers/data sections + let wrapper_header = tx_header + .wrapper() + .expect("All transactions must have a wrapper"); + let maybe_fee_unshield = if let (Some(hash), true) = + (wrapper_header.unshield_section_hash, check_header) + { + let masp_transaction = tx + .get_section(&hash) + .ok_or_else(|| { + Error::Other("Missing expected masp section".to_string()) + })? + .masp_tx() + .ok_or_else(|| { + Error::Other("Missing masp transaction".to_string()) + })?; + + Some((wrapper_changed_keys, masp_transaction)) + } else { + None + }; + + // Expect transaction + let tx_data = tx + .data() + .ok_or_else(|| Error::Other("Missing data section".to_string()))?; + let maybe_masp_tx = match Transfer::try_from_slice(&tx_data) { + Ok(transfer) => Some((changed_keys, transfer)), + Err(_) => { + // This should be a MASP over IBC transaction, it + // could be a ShieldedTransfer or an Envelope + // message, need to try both + extract_payload_from_shielded_action::(&tx_data, action_arg) + .await + .ok() + } + } + .map(|(changed_keys, transfer)| { + if let Some(hash) = transfer.shielded { + let masp_tx = tx + .get_section(&hash) + .ok_or_else(|| { + Error::Other( + "Missing masp section in transaction".to_string(), + ) + })? + .masp_tx() + .ok_or_else(|| { + Error::Other("Missing masp transaction".to_string()) + })?; + + Ok::<_, Error>(Some((changed_keys, masp_tx))) + } else { + Ok(None) + } + }) + .transpose()? + .flatten(); + + Ok(ExtractedMaspTx { + fee_unshielding: maybe_fee_unshield, + inner_tx: maybe_masp_tx, + }) +} + +// Extract the changed keys and Transaction hash from a masp over ibc message +pub(super) async fn extract_payload_from_shielded_action<'args, C: Client>( + tx_data: &[u8], + mut args: ExtractShieldedActionArg<'args, C>, +) -> Result<(BTreeSet, Transfer), Error> { + let message = namada_ibc::decode_message(tx_data) + .map_err(|e| Error::Other(e.to_string()))?; + + let result = match message { + IbcMessage::Transfer(msg) => { + let tx_result = get_sending_result(args)?; + + let transfer = msg.transfer.ok_or_else(|| { + Error::Other("Missing masp tx in the ibc message".to_string()) + })?; + + (tx_result.changed_keys, transfer) + } + IbcMessage::NftTransfer(msg) => { + let tx_result = get_sending_result(args)?; + + let transfer = msg.transfer.ok_or_else(|| { + Error::Other("Missing masp tx in the ibc message".to_string()) + })?; + + (tx_result.changed_keys, transfer) + } + IbcMessage::RecvPacket(msg) => { + let tx_result = get_receiving_result(args).await?; + + let transfer = msg.transfer.ok_or_else(|| { + Error::Other("Missing masp tx in the ibc message".to_string()) + })?; + + (tx_result.changed_keys, transfer) + } + IbcMessage::AckPacket(msg) => { + // Refund tokens by the ack message + let tx_result = get_receiving_result(args).await?; + + let transfer = msg.transfer.ok_or_else(|| { + Error::Other("Missing masp tx in the ibc message".to_string()) + })?; + + (tx_result.changed_keys, transfer) + } + IbcMessage::Timeout(msg) => { + // Refund tokens by the timeout message + let tx_result = get_receiving_result(args).await?; + + let transfer = msg.transfer.ok_or_else(|| { + Error::Other("Missing masp tx in the ibc message".to_string()) + })?; + + (tx_result.changed_keys, transfer) + } + IbcMessage::Envelope(_) => { + return Err(Error::Other( + "Unexpected ibc message for masp".to_string(), + )); + } + }; + + Ok(result) +} + +fn get_sending_result( + args: ExtractShieldedActionArg<'_, C>, +) -> Result { + let tx_event = match args { + ExtractShieldedActionArg::Event(event) => event, + ExtractShieldedActionArg::Request(_) => { + return Err(Error::Other( + "Unexpected event request for ShieldedTransfer".to_string(), + )); + } + }; + + get_tx_result(tx_event) +} + +async fn get_receiving_result( + args: ExtractShieldedActionArg<'_, C>, +) -> Result { + let tx_event = match args { + ExtractShieldedActionArg::Event(event) => { + std::borrow::Cow::Borrowed(event) + } + ExtractShieldedActionArg::Request((client, height, index)) => { + std::borrow::Cow::Owned( + get_indexed_masp_events_at_height(client, height, index) + .await? + .ok_or_else(|| { + Error::Other(format!( + "Missing required ibc event at block height {}", + height + )) + })? + .first() + .ok_or_else(|| { + Error::Other(format!( + "Missing required ibc event at block height {}", + height + )) + })? + .1 + .to_owned(), + ) + } + }; + + get_tx_result(&tx_event) +} + +fn get_tx_result( + tx_event: &crate::tendermint::abci::Event, +) -> Result { + tx_event + .attributes + .iter() + .find_map(|attribute| { + if attribute.key == "inner_tx" { + let tx_result = TxResult::from_str(&attribute.value) + .expect("The event value should be parsable"); + Some(tx_result) + } else { + None + } + }) + .ok_or_else(|| { + Error::Other( + "Couldn't find changed keys in the event for the provided \ + transaction" + .to_string(), + ) + }) +} + +/// A channel-like struct for "sending" newly fetched blocks +/// to the scanning algorithm. +/// +/// Holds a pointer to the unscanned cache which it can append to. +/// Furthermore, has an actual channel for keeping track if +/// 1. The process in possession of the channel is still alive +/// 2. Quickly updating the latest block height scanned. +#[derive(Clone)] +pub(super) struct FetchQueueSender { + cache: Unscanned, + last_fetched: flume::Sender, +} + +/// A channel-like struct for "receiving" new fetched +/// blocks for the scanning algorithm. +/// +/// This is implemented as an iterator for the scanning +/// algorithm. This receiver pulls from the cache until +/// it is empty. It then waits until new entries appear +/// in the cache or the sender hangs up. +#[derive(Clone)] +pub(super) struct FetchQueueReceiver { + cache: Unscanned, + last_fetched: flume::Receiver, + last_query_height: BlockHeight, +} + +impl FetchQueueReceiver { + /// Check if the sender has hung up. If so, manually calculate the latest + /// height fetched. Otherwise, update the latest height fetched with the + /// data provided by the sender. + fn sender_alive(&self) -> bool { + self.last_fetched.sender_count() > 0 + } +} + +impl Iterator for FetchQueueReceiver { + type Item = IndexedNoteEntry; + + fn next(&mut self) -> Option { + if let Some(entry) = self.cache.pop_first() { + Some(entry) + } else { + while self.sender_alive() { + if let Some(entry) = self.cache.pop_first() { + return Some(entry); + } + } + None + } + } + + fn size_hint(&self) -> (usize, Option) { + let size = (self.last_query_height - self.cache.first_height().into()).0 + as usize; + (size, Some(size)) + } +} + +impl FetchQueueSender { + pub(super) fn contains_height(&self, height: u64) -> bool { + self.cache.contains_height(height) + } + + pub(super) fn send(&mut self, data: IndexedNoteEntry) { + self.last_fetched.send(data.0.height).unwrap(); + self.cache.insert(data); + } +} + +/// A convenience for creating a channel for fetching blocks. +pub mod fetch_channel { + use namada_core::storage::BlockHeight; + + use super::{FetchQueueReceiver, FetchQueueSender, Unscanned}; + pub(in super::super) fn new( + cache: Unscanned, + last_query_height: BlockHeight, + ) -> (FetchQueueSender, FetchQueueReceiver) { + let (fetch_send, fetch_recv) = flume::unbounded(); + ( + FetchQueueSender { + cache: cache.clone(), + last_fetched: fetch_send, + }, + FetchQueueReceiver { + cache: cache.clone(), + last_fetched: fetch_recv, + last_query_height, + }, + ) + } +} + +enum Action { + Complete, + Data(Arc>>, BlockHeight), +} +pub struct TaskManager { + action: Receiver>, + pub(super) latest_height: BlockHeight, +} + +#[derive(Clone)] +pub(super) struct TaskManagerChannel { + action: Sender>, + ctx: Arc>>, +} + +impl TaskManager { + /// Create a client proxy and spawn a process to forward + /// proxy requests. + pub(super) fn new( + ctx: ShieldedContext, + ) -> (TaskManagerChannel, Self) { + let (save_send, save_recv) = tokio::sync::mpsc::channel(100); + ( + TaskManagerChannel { + action: save_send, + ctx: Arc::new(futures_locks::Mutex::new(ctx)), + }, + TaskManager { + action: save_recv, + latest_height: Default::default(), + }, + ) + } + + pub async fn run(&mut self) { + while let Some(action) = self.action.recv().await { + match action { + Action::Complete => return, + Action::Data(data, height) => { + self.latest_height = height; + let locked = data.lock().await; + _ = locked.save().await; + } + } + } + } +} + +impl TaskManagerChannel { + + pub(super) fn complete(&self) { + self.action.blocking_send(Action::Complete).unwrap() + } + + pub(super) fn save(&self, latest_height: BlockHeight) { + self.action + .blocking_send(Action::Data(self.ctx.clone(), latest_height)) + .unwrap(); + } + + pub(super) fn update_witness_map( + &self, + indexed_tx: IndexedTx, + stx: &Transaction, + ) -> Result<(), Error> { + let mut locked = self.acquire(); + let res = locked.update_witness_map(indexed_tx, stx); + if res.is_err() { + self.complete() + } + res + } + + pub(super) fn scan_tx( + &self, + indexed_tx: IndexedTx, + epoch: Epoch, + tx: &BTreeSet, + stx: &Transaction, + vk: &ViewingKey, + native_token: Address, + ) -> Result<(), Error> { + let mut locked = self.acquire(); + let res = locked.scan_tx(indexed_tx, epoch, tx, stx, vk, native_token); + if res.is_err() { + self.complete(); + } + res + } + + pub(super) fn get_vk_heights( + &self, + ) -> BTreeMap> { + let mut locked = self.acquire(); + let mut vk_heights = BTreeMap::new(); + std::mem::swap(&mut vk_heights, &mut locked.vk_heights); + vk_heights + } + + pub(super) fn set_vk_heights( + &self, + mut vk_heights: BTreeMap>, + ) { + let mut locked = self.acquire(); + std::mem::swap(&mut vk_heights, &mut locked.vk_heights); + } + + /// Kids, don't try this at home. + fn acquire(&self) -> futures_locks::MutexGuard> { + loop { + if let Ok(ctx) = self.ctx.try_lock() { + return ctx; + } + std::hint::spin_loop(); + } + } +} + +/// An enum to indicate how to log sync progress depending on +/// whether sync is currently fetch or scanning blocks. +#[derive(Debug, Copy, Clone)] +pub enum ProgressType { + Fetch, + Scan, +} + +pub trait ProgressLogger { + fn io(&self) -> &IO; + + fn fetch(&self, items: I) -> impl Iterator + where + I: Iterator; + + fn scan(&self, items: I) -> impl Iterator + where + I: Iterator; + + fn left_to_fetch(&self) -> usize; +} \ No newline at end of file diff --git a/crates/sdk/src/queries/mod.rs b/crates/sdk/src/queries/mod.rs index eb6232cf71..97806e7022 100644 --- a/crates/sdk/src/queries/mod.rs +++ b/crates/sdk/src/queries/mod.rs @@ -423,7 +423,9 @@ pub trait Client { #[cfg_attr(feature = "async-send", async_trait::async_trait)] #[cfg_attr(not(feature = "async-send"), async_trait::async_trait(?Send))] -impl Client for C { +impl Client + for C +{ type Error = Error; async fn request( diff --git a/crates/sdk/src/queries/shell.rs b/crates/sdk/src/queries/shell.rs index caaff8f7fb..419b728380 100644 --- a/crates/sdk/src/queries/shell.rs +++ b/crates/sdk/src/queries/shell.rs @@ -32,7 +32,7 @@ use crate::events::Event; use crate::ibc::core::host::types::identifiers::{ ChannelId, ClientId, PortId, Sequence, }; -use crate::masp::MaspTokenRewardData; +use crate::masp::types::MaspTokenRewardData; use crate::queries::types::{RequestCtx, RequestQuery}; use crate::queries::{require_latest_height, EncodedResponseQuery}; use crate::tendermint::merkle::proof::ProofOps; diff --git a/crates/sdk/src/tx.rs b/crates/sdk/src/tx.rs index 5dc90cec70..0c69c87c55 100644 --- a/crates/sdk/src/tx.rs +++ b/crates/sdk/src/tx.rs @@ -68,8 +68,9 @@ use rand_core::{OsRng, RngCore}; use crate::control_flow::time; use crate::error::{EncodingError, Error, QueryError, Result, TxSubmitError}; use crate::io::Io; -use crate::masp::TransferErr::Build; -use crate::masp::{ShieldedContext, ShieldedTransfer}; +use crate::masp::types::ShieldedTransfer; +use crate::masp::types::TransferErr::Build; +use crate::masp::ShieldedContext; use crate::queries::Client; use crate::rpc::{ self, get_validator_stake, query_wasm_code_hash, validate_amount, diff --git a/crates/tests/src/e2e/ledger_tests.rs b/crates/tests/src/e2e/ledger_tests.rs index e0a69364e9..395db3d728 100644 --- a/crates/tests/src/e2e/ledger_tests.rs +++ b/crates/tests/src/e2e/ledger_tests.rs @@ -237,7 +237,6 @@ fn test_namada_shuts_down_if_tendermint_dies() -> Result<()> { ethereum_bridge::ledger::Mode::Off, None, ); - // 1. Run the ledger node let mut ledger = start_namada_ledger_node_wait_wasm(&test, Some(0), Some(40))?; @@ -253,7 +252,6 @@ fn test_namada_shuts_down_if_tendermint_dies() -> Result<()> { // 3. Check that namada detects that the tendermint node is dead ledger.exp_string("Tendermint node is no longer running.")?; - // 4. Check that the ledger node shuts down ledger.exp_string(LEDGER_SHUTDOWN)?; ledger.exp_eof()?; From 4d2fd5b51540e1537c50369c3427c5e4b2674877 Mon Sep 17 00:00:00 2001 From: satan Date: Mon, 8 Apr 2024 11:42:18 +0200 Subject: [PATCH 02/29] [fix]: Fixed some logging, improved shutdown logic --- crates/apps_lib/src/client/masp.rs | 8 ++- crates/sdk/src/masp/shielded_ctx.rs | 5 +- crates/sdk/src/masp/utils.rs | 92 +++++++++++++++++++++++++---- 3 files changed, 89 insertions(+), 16 deletions(-) diff --git a/crates/apps_lib/src/client/masp.rs b/crates/apps_lib/src/client/masp.rs index ae51df48ef..bbdba7c755 100644 --- a/crates/apps_lib/src/client/masp.rs +++ b/crates/apps_lib/src/client/masp.rs @@ -1,7 +1,6 @@ use std::fmt::Debug; use std::sync::{Arc, Mutex}; -use color_eyre::owo_colors::OwoColorize; use masp_primitives::sapling::ViewingKey; use masp_primitives::zip32::ExtendedSpendingKey; use namada_sdk::error::Error; @@ -34,7 +33,6 @@ pub async fn syncing< rx.await }; - display_line!(io, "{}", "==== Shielded sync started ====".on_white()); display_line!(io, "\n\n"); let logger = CliLogger::new(io); let sync = async move { @@ -54,7 +52,7 @@ pub async fn syncing< tokio::select! { sync = sync => { let shielded = sync?; - display!(io, "Syncing finished\n"); + display!(io, "\nSyncing finished\n"); Ok(shielded) }, sig = shutdown_signal => { @@ -70,6 +68,7 @@ struct IterProgress { index: usize, length: usize, } + struct StdoutDrawer<'io, IO: Io> { io: &'io IO, fetch: IterProgress, @@ -135,6 +134,7 @@ impl<'io, IO: Io> StdoutDrawer<'io, IO> { self.io.flush() } (Some(fp), None) => { + display_line!(self.io, "\x1b[4A\x1b[J"); display_line!(self.io, "\x1b[4A\x1b[J"); display_line!( self.io, @@ -152,6 +152,7 @@ impl<'io, IO: Io> StdoutDrawer<'io, IO> { self.io.flush() } (None, Some(sp)) => { + display_line!(self.io, "\x1b[4A\x1b[J"); display_line!(self.io, "\x1b[4A\x1b[J"); display_line!( self.io, @@ -222,6 +223,7 @@ where } ProgressType::Scan => { locked.scan.index += 1; + locked.scan.length = self.items.size_hint().0; } } } diff --git a/crates/sdk/src/masp/shielded_ctx.rs b/crates/sdk/src/masp/shielded_ctx.rs index 77582cb228..9c689b6021 100644 --- a/crates/sdk/src/masp/shielded_ctx.rs +++ b/crates/sdk/src/masp/shielded_ctx.rs @@ -1737,7 +1737,7 @@ impl ShieldedContext { shortfall += I128Sum::from_pair(*asset_type, val.into()) .expect("unable to construct value sum"); } - // Return an insufficient ffunds error + // Return an insufficient funds error return Result::Err(TransferErr::from( builder::Error::InsufficientFunds(shortfall), )); @@ -1970,6 +1970,7 @@ impl ShieldedContext { self.vk_heights.entry(*vk).or_default(); } let _ = self.save().await; + let native_token = query_native_token(client).await?; // the latest block height which has been added to the witness Merkle // tree @@ -1997,7 +1998,7 @@ impl ShieldedContext { std::thread::scope(|s| { loop { let (fetch_send, fetch_recv) = - fetch_channel::new(self.unscanned.clone(), last_query_height); + fetch_channel::new(self.unscanned.clone()); let decryption_handle = s.spawn(|| { let txs = logger.scan(fetch_recv); for (indexed_tx, (epoch, tx, stx)) in txs { diff --git a/crates/sdk/src/masp/utils.rs b/crates/sdk/src/masp/utils.rs index d38d4f083c..5b3850dc62 100644 --- a/crates/sdk/src/masp/utils.rs +++ b/crates/sdk/src/masp/utils.rs @@ -2,7 +2,7 @@ use core::str::FromStr; use std::collections::{BTreeMap, BTreeSet}; use std::env; use std::path::PathBuf; -use std::sync::Arc; +use std::sync::{Arc, Mutex}; use borsh::{BorshDeserialize, BorshSerialize}; use masp_primitives::sapling::keys::FullViewingKey; @@ -442,7 +442,6 @@ pub(super) struct FetchQueueSender { pub(super) struct FetchQueueReceiver { cache: Unscanned, last_fetched: flume::Receiver, - last_query_height: BlockHeight, } impl FetchQueueReceiver { @@ -471,8 +470,7 @@ impl Iterator for FetchQueueReceiver { } fn size_hint(&self) -> (usize, Option) { - let size = (self.last_query_height - self.cache.first_height().into()).0 - as usize; + let size = self.last_fetched.len(); (size, Some(size)) } } @@ -490,12 +488,10 @@ impl FetchQueueSender { /// A convenience for creating a channel for fetching blocks. pub mod fetch_channel { - use namada_core::storage::BlockHeight; use super::{FetchQueueReceiver, FetchQueueSender, Unscanned}; pub(in super::super) fn new( cache: Unscanned, - last_query_height: BlockHeight, ) -> (FetchQueueSender, FetchQueueReceiver) { let (fetch_send, fetch_recv) = flume::unbounded(); ( @@ -506,7 +502,6 @@ pub mod fetch_channel { FetchQueueReceiver { cache: cache.clone(), last_fetched: fetch_recv, - last_query_height, }, ) } @@ -522,7 +517,7 @@ pub struct TaskManager { } #[derive(Clone)] -pub(super) struct TaskManagerChannel { +pub(super) struct TaskRunner { action: Sender>, ctx: Arc>>, } @@ -532,10 +527,10 @@ impl TaskManager { /// proxy requests. pub(super) fn new( ctx: ShieldedContext, - ) -> (TaskManagerChannel, Self) { + ) -> (TaskRunner, Self) { let (save_send, save_recv) = tokio::sync::mpsc::channel(100); ( - TaskManagerChannel { + TaskRunner { action: save_send, ctx: Arc::new(futures_locks::Mutex::new(ctx)), }, @@ -560,7 +555,7 @@ impl TaskManager { } } -impl TaskManagerChannel { +impl TaskRunner { pub(super) fn complete(&self) { self.action.blocking_send(Action::Complete).unwrap() @@ -650,4 +645,79 @@ pub trait ProgressLogger { I: Iterator; fn left_to_fetch(&self) -> usize; +} + +/// The default type for logging sync progress. +#[derive(Debug, Clone)] +pub struct DefaultLogger<'io, IO: Io> { + io: &'io IO, + progress: Arc> +} + +impl<'io, IO: Io> DefaultLogger<'io, IO> { + pub fn new(io: &'io IO) -> Self { + Self { + io, + progress: Arc::new(Mutex::new(Default::default())) + } + } +} + +#[derive(Default, Copy, Clone, Debug)] +struct IterProgress { + index: usize, + length: usize, +} + +struct DefaultFetchIterator +where + I: Iterator, +{ + inner: I, + progress: Arc> +} + +impl> Iterator for DefaultFetchIterator { + type Item = u64; + + fn next(&mut self) -> Option { + let item = self.inner.next()?; + let mut locked = self.progress.lock().unwrap(); + locked.index += 1; + Some(item) + } +} + +impl<'io, IO: Io> ProgressLogger for DefaultLogger<'io, IO> { + + fn io(&self) -> &IO { + self.io + } + + fn fetch(&self, items: I) -> impl Iterator + where + I: Iterator, + { + { + let mut locked = self.progress.lock().unwrap(); + locked.length = items.size_hint().0; + } + DefaultFetchIterator { + inner: items, + progress: self.progress.clone(), + } + } + + fn scan(&self, items: I) -> impl Iterator + where + I: IntoIterator, + { + let items: Vec<_> = items.into_iter().collect(); + items.into_iter() + } + + fn left_to_fetch(&self) -> usize { + let locked = self.progress.lock().unwrap(); + locked.length - locked.index + } } \ No newline at end of file From b8be0678d6811707d8f7b59ad0f00bc382f5045b Mon Sep 17 00:00:00 2001 From: satan Date: Wed, 10 Apr 2024 15:46:51 +0200 Subject: [PATCH 03/29] [fix]: Fixing check-crates to work --- crates/sdk/src/masp/shielded_ctx.rs | 1054 +++++++++++++-------------- crates/sdk/src/masp/types.rs | 2 +- crates/sdk/src/masp/utils.rs | 37 +- 3 files changed, 510 insertions(+), 583 deletions(-) diff --git a/crates/sdk/src/masp/shielded_ctx.rs b/crates/sdk/src/masp/shielded_ctx.rs index 9c689b6021..dfe55e50a1 100644 --- a/crates/sdk/src/masp/shielded_ctx.rs +++ b/crates/sdk/src/masp/shielded_ctx.rs @@ -2,13 +2,11 @@ use std::cmp::Ordering; use std::collections::{btree_map, BTreeMap, BTreeSet, HashMap, HashSet}; use std::convert::TryInto; use std::env; -use std::fmt::Debug; -use std::ops::Deref; -use std::path::PathBuf; +use std::str::FromStr; use borsh::{BorshDeserialize, BorshSerialize}; use borsh_ext::BorshSerializeExt; -use lazy_static::lazy_static; +use itertools::Either; use masp_primitives::asset_type::AssetType; use masp_primitives::consensus::TestNetwork; use masp_primitives::convert::AllowedConversion; @@ -23,11 +21,7 @@ use masp_primitives::sapling::note_encryption::{ use masp_primitives::sapling::{ Diversifier, Node, Note, Nullifier, ViewingKey, }; -use masp_primitives::transaction::builder::{self, *}; -use masp_primitives::transaction::components::sapling::builder::{ - RngBuildParams, SaplingMetadata, -}; -use masp_primitives::transaction::components::transparent::builder::TransparentBuilder; +use masp_primitives::transaction::builder::Builder; use masp_primitives::transaction::components::{ I128Sum, OutputDescription, TxOut, U64Sum, ValueSum, }; @@ -36,37 +30,26 @@ use masp_primitives::transaction::{ builder, Authorization, Authorized, Transaction, TransparentAddress, }; use masp_primitives::zip32::{ExtendedFullViewingKey, ExtendedSpendingKey}; -use masp_proofs::bellman::groth16::PreparedVerifyingKey; -use masp_proofs::bls12_381::Bls12; -use masp_proofs::prover::LocalTxProver; -#[cfg(not(feature = "testing"))] -use masp_proofs::sapling::SaplingVerificationContext; -use namada_core::address::Address; -use namada_core::collections::{HashMap, HashSet}; -use namada_core::dec::Dec; -pub use namada_core::masp::{ +use namada_core::address::{Address, MASP}; +use namada_core::masp::{ encode_asset_type, AssetData, BalanceOwner, ExtendedViewingKey, PaymentAddress, TransferSource, TransferTarget, }; use namada_core::storage::{BlockHeight, Epoch, IndexedTx, TxIndex}; use namada_core::time::{DateTimeUtc, DurationSecs}; -use namada_core::uint::Uint; -use namada_events::extend::{ - ReadFromEventAttributes, ValidMaspTx as ValidMaspTxAttr, -}; -use namada_ibc::IbcMessage; -use namada_macros::BorshDeserializer; -#[cfg(feature = "migrations")] -use namada_migrations::*; -use namada_state::StorageError; -use namada_token::{self as token, Denomination, MaspDigitPos, Transfer}; +use namada_core::token::Amount; +use namada_token::{self as token, Denomination, MaspDigitPos}; use namada_tx::Tx; use rand_core::OsRng; use ripemd::Digest as RipemdDigest; use sha2::Digest; -use thiserror::Error; +use tendermint_rpc::query::Query; +use tendermint_rpc::Order; -use crate::error::{Error, QueryError}; +use crate::error::{Error, PinnedBalanceError, QueryError}; +use crate::eth_bridge::token::storage_key::{ + balance_key, is_any_shielded_action_balance_key, +}; use crate::io::Io; use crate::masp::types::{ ContextSyncStatus, Conversions, MaspAmount, MaspChange, ShieldedTransfer, @@ -78,512 +61,16 @@ use crate::masp::utils::{ DefaultLogger, ExtractShieldedActionArg, FetchQueueSender, ProgressLogger, ShieldedUtils, TaskManager, }; -use crate::masp::{testing, ENV_VAR_MASP_TEST_SEED, NETWORK}; +use crate::masp::NETWORK; +#[cfg(any(test, feature = "testing"))] +use crate::masp::{testing, ENV_VAR_MASP_TEST_SEED}; use crate::queries::Client; -use crate::rpc::{query_block, query_conversion, query_denom}; +use crate::rpc::{ + query_block, query_conversion, query_denom, query_epoch_at_height, + query_native_token, +}; use crate::{display_line, edisplay_line, rpc, MaybeSend, MaybeSync, Namada}; -/// Env var to point to a dir with MASP parameters. When not specified, -/// the default OS specific path is used. -pub const ENV_VAR_MASP_PARAMS_DIR: &str = "NAMADA_MASP_PARAMS_DIR"; - -/// Randomness seed for MASP integration tests to build proofs with -/// deterministic rng. -pub const ENV_VAR_MASP_TEST_SEED: &str = "NAMADA_MASP_TEST_SEED"; - -/// The network to use for MASP -#[cfg(feature = "mainnet")] -const NETWORK: MainNetwork = MainNetwork; -#[cfg(not(feature = "mainnet"))] -const NETWORK: TestNetwork = TestNetwork; - -// TODO these could be exported from masp_proof crate -/// Spend circuit name -pub const SPEND_NAME: &str = "masp-spend.params"; -/// Output circuit name -pub const OUTPUT_NAME: &str = "masp-output.params"; -/// Convert circuit name -pub const CONVERT_NAME: &str = "masp-convert.params"; - -/// Type alias for convenience and profit -pub type IndexedNoteData = BTreeMap; - -/// Type alias for the entries of [`IndexedNoteData`] iterators -pub type IndexedNoteEntry = (IndexedTx, Transaction); - -/// Shielded transfer -#[derive(Clone, Debug, BorshSerialize, BorshDeserialize, BorshDeserializer)] -pub struct ShieldedTransfer { - /// Shielded transfer builder - pub builder: Builder<(), ExtendedFullViewingKey, ()>, - /// MASP transaction - pub masp_tx: Transaction, - /// Metadata - pub metadata: SaplingMetadata, - /// Epoch in which the transaction was created - pub epoch: Epoch, -} - -/// Shielded pool data for a token -#[derive(Debug, BorshSerialize, BorshDeserialize, BorshDeserializer)] -pub struct MaspTokenRewardData { - pub name: String, - pub address: Address, - pub max_reward_rate: Dec, - pub kp_gain: Dec, - pub kd_gain: Dec, - pub locked_amount_target: Uint, -} - -/// A return type for gen_shielded_transfer -#[derive(Error, Debug)] -pub enum TransferErr { - /// Build error for masp errors - #[error("{0}")] - Build(#[from] builder::Error), - /// errors - #[error("{0}")] - General(#[from] Error), -} - -#[derive(Debug, Clone)] -struct ExtractedMaspTx { - fee_unshielding: Option, - inner_tx: Option, -} - -/// MASP verifying keys -pub struct PVKs { - /// spend verifying key - pub spend_vk: PreparedVerifyingKey, - /// convert verifying key - pub convert_vk: PreparedVerifyingKey, - /// output verifying key - pub output_vk: PreparedVerifyingKey, -} - -lazy_static! { - /// MASP verifying keys load from parameters - static ref VERIFIYING_KEYS: PVKs = - { - let params_dir = get_params_dir(); - let [spend_path, convert_path, output_path] = - [SPEND_NAME, CONVERT_NAME, OUTPUT_NAME].map(|p| params_dir.join(p)); - - #[cfg(feature = "download-params")] - if !spend_path.exists() || !convert_path.exists() || !output_path.exists() { - let paths = masp_proofs::download_masp_parameters(None).expect( - "MASP parameters were not present, expected the download to \ - succeed", - ); - if paths.spend != spend_path - || paths.convert != convert_path - || paths.output != output_path - { - panic!( - "unrecoverable: downloaded missing masp params, but to an \ - unfamiliar path" - ) - } - } - // size and blake2b checked here - let params = masp_proofs::load_parameters( - spend_path.as_path(), - output_path.as_path(), - convert_path.as_path(), - ); - PVKs { - spend_vk: params.spend_vk, - convert_vk: params.convert_vk, - output_vk: params.output_vk - } - }; -} - -/// Make sure the MASP params are present and load verifying keys into memory -pub fn preload_verifying_keys() -> &'static PVKs { - &VERIFIYING_KEYS -} - -fn load_pvks() -> &'static PVKs { - &VERIFIYING_KEYS -} - -/// check_spend wrapper -pub fn check_spend( - spend: &SpendDescription<::SaplingAuth>, - sighash: &[u8; 32], - #[cfg(not(feature = "testing"))] ctx: &mut SaplingVerificationContext, - #[cfg(feature = "testing")] - ctx: &mut testing::MockSaplingVerificationContext, - parameters: &PreparedVerifyingKey, -) -> bool { - let zkproof = - masp_proofs::bellman::groth16::Proof::read(spend.zkproof.as_slice()); - let zkproof = match zkproof { - Ok(zkproof) => zkproof, - _ => return false, - }; - - ctx.check_spend( - spend.cv, - spend.anchor, - &spend.nullifier.0, - PublicKey(spend.rk.0), - sighash, - spend.spend_auth_sig, - zkproof, - parameters, - ) -} - -/// check_output wrapper -pub fn check_output( - output: &OutputDescription<<::SaplingAuth as masp_primitives::transaction::components::sapling::Authorization>::Proof>, - #[cfg(not(feature = "testing"))] ctx: &mut SaplingVerificationContext, - #[cfg(feature = "testing")] - ctx: &mut testing::MockSaplingVerificationContext, - parameters: &PreparedVerifyingKey, -) -> bool { - let zkproof = - masp_proofs::bellman::groth16::Proof::read(output.zkproof.as_slice()); - let zkproof = match zkproof { - Ok(zkproof) => zkproof, - _ => return false, - }; - let epk = - masp_proofs::jubjub::ExtendedPoint::from_bytes(&output.ephemeral_key.0); - let epk = match epk.into() { - Some(p) => p, - None => return false, - }; - - ctx.check_output(output.cv, output.cmu, epk, zkproof, parameters) -} - -/// check convert wrapper -pub fn check_convert( - convert: &ConvertDescription<<::SaplingAuth as masp_primitives::transaction::components::sapling::Authorization>::Proof>, - #[cfg(not(feature = "testing"))] ctx: &mut SaplingVerificationContext, - #[cfg(feature = "testing")] - ctx: &mut testing::MockSaplingVerificationContext, - parameters: &PreparedVerifyingKey, -) -> bool { - let zkproof = - masp_proofs::bellman::groth16::Proof::read(convert.zkproof.as_slice()); - let zkproof = match zkproof { - Ok(zkproof) => zkproof, - _ => return false, - }; - - ctx.check_convert(convert.cv, convert.anchor, zkproof, parameters) -} - -/// Represents an authorization where the Sapling bundle is authorized and the -/// transparent bundle is unauthorized. -pub struct PartialAuthorized; - -impl Authorization for PartialAuthorized { - type SaplingAuth = ::SaplingAuth; - type TransparentAuth = ::TransparentAuth; -} - -/// Partially deauthorize the transparent bundle -pub fn partial_deauthorize( - tx_data: &TransactionData, -) -> Option> { - let transp = tx_data.transparent_bundle().and_then(|x| { - let mut tb = TransparentBuilder::empty(); - for vin in &x.vin { - tb.add_input(TxOut { - asset_type: vin.asset_type, - value: vin.value, - address: vin.address, - }) - .ok()?; - } - for vout in &x.vout { - tb.add_output(&vout.address, vout.asset_type, vout.value) - .ok()?; - } - tb.build() - }); - if tx_data.transparent_bundle().is_some() != transp.is_some() { - return None; - } - Some(TransactionData::from_parts( - tx_data.version(), - tx_data.consensus_branch_id(), - tx_data.lock_time(), - tx_data.expiry_height(), - transp, - tx_data.sapling_bundle().cloned(), - )) -} - -/// Verify a shielded transaction. -pub fn verify_shielded_tx( - transaction: &Transaction, - mut consume_verify_gas: F, -) -> Result<(), StorageError> -where - F: FnMut(u64) -> std::result::Result<(), StorageError>, -{ - tracing::info!("entered verify_shielded_tx()"); - - let sapling_bundle = if let Some(bundle) = transaction.sapling_bundle() { - bundle - } else { - return Err(StorageError::SimpleMessage("no sapling bundle")); - }; - let tx_data = transaction.deref(); - - // Partially deauthorize the transparent bundle - let unauth_tx_data = match partial_deauthorize(tx_data) { - Some(tx_data) => tx_data, - None => { - return Err(StorageError::SimpleMessage( - "Failed to partially de-authorize", - )); - } - }; - - let txid_parts = unauth_tx_data.digest(TxIdDigester); - // the commitment being signed is shared across all Sapling inputs; once - // V4 transactions are deprecated this should just be the txid, but - // for now we need to continue to compute it here. - let sighash = - signature_hash(&unauth_tx_data, &SignableInput::Shielded, &txid_parts); - - tracing::info!("sighash computed"); - - let PVKs { - spend_vk, - convert_vk, - output_vk, - } = load_pvks(); - - #[cfg(not(feature = "testing"))] - let mut ctx = SaplingVerificationContext::new(true); - #[cfg(feature = "testing")] - let mut ctx = testing::MockSaplingVerificationContext::new(true); - for spend in &sapling_bundle.shielded_spends { - consume_verify_gas(namada_gas::MASP_VERIFY_SPEND_GAS)?; - if !check_spend(spend, sighash.as_ref(), &mut ctx, spend_vk) { - return Err(StorageError::SimpleMessage("Invalid shielded spend")); - } - } - for convert in &sapling_bundle.shielded_converts { - consume_verify_gas(namada_gas::MASP_VERIFY_CONVERT_GAS)?; - if !check_convert(convert, &mut ctx, convert_vk) { - return Err(StorageError::SimpleMessage( - "Invalid shielded conversion", - )); - } - } - for output in &sapling_bundle.shielded_outputs { - consume_verify_gas(namada_gas::MASP_VERIFY_OUTPUT_GAS)?; - if !check_output(output, &mut ctx, output_vk) { - return Err(StorageError::SimpleMessage("Invalid shielded output")); - } - } - - tracing::info!("passed spend/output verification"); - - let assets_and_values: I128Sum = sapling_bundle.value_balance.clone(); - - tracing::info!( - "accumulated {} assets/values", - assets_and_values.components().len() - ); - - consume_verify_gas(namada_gas::MASP_VERIFY_FINAL_GAS)?; - let result = ctx.final_check( - assets_and_values, - sighash.as_ref(), - sapling_bundle.authorization.binding_sig, - ); - tracing::info!("final check result {result}"); - if !result { - return Err(StorageError::SimpleMessage("MASP final check failed")); - } - Ok(()) -} - -/// Get the path to MASP parameters from [`ENV_VAR_MASP_PARAMS_DIR`] env var or -/// use the default. -pub fn get_params_dir() -> PathBuf { - if let Ok(params_dir) = env::var(ENV_VAR_MASP_PARAMS_DIR) { - println!("Using {} as masp parameter folder.", params_dir); - PathBuf::from(params_dir) - } else { - masp_proofs::default_params_folder().unwrap() - } -} - -/// Freeze a Builder into the format necessary for inclusion in a Tx. This is -/// the format used by hardware wallets to validate a MASP Transaction. -struct WalletMap; - -impl - masp_primitives::transaction::components::sapling::builder::MapBuilder< - P1, - ExtendedSpendingKey, - (), - ExtendedFullViewingKey, - > for WalletMap -{ - fn map_params(&self, _s: P1) {} - - fn map_key(&self, s: ExtendedSpendingKey) -> ExtendedFullViewingKey { - (&s).into() - } -} - -impl - MapBuilder - for WalletMap -{ - fn map_notifier(&self, _s: N1) {} -} - -/// Abstracts platform specific details away from the logic of shielded pool -/// operations. -#[cfg_attr(feature = "async-send", async_trait::async_trait)] -#[cfg_attr(not(feature = "async-send"), async_trait::async_trait(?Send))] -pub trait ShieldedUtils: - Sized + BorshDeserialize + BorshSerialize + Default + Clone -{ - /// Get a MASP transaction prover - fn local_tx_prover(&self) -> LocalTxProver; - - /// Load up the currently saved ShieldedContext - async fn load( - &self, - ctx: &mut ShieldedContext, - force_confirmed: bool, - ) -> std::io::Result<()>; - - /// Save the given ShieldedContext for future loads - async fn save( - &self, - ctx: &ShieldedContext, - ) -> std::io::Result<()>; -} - -/// Make a ViewingKey that can view notes encrypted by given ExtendedSpendingKey -pub fn to_viewing_key(esk: &ExtendedSpendingKey) -> FullViewingKey { - ExtendedFullViewingKey::from(esk).fvk -} - -/// Generate a valid diversifier, i.e. one that has a diversified base. Return -/// also this diversified base. -pub fn find_valid_diversifier( - rng: &mut R, -) -> (Diversifier, masp_primitives::jubjub::SubgroupPoint) { - let mut diversifier; - let g_d; - // Keep generating random diversifiers until one has a diversified base - loop { - let mut d = [0; 11]; - rng.fill_bytes(&mut d); - diversifier = Diversifier(d); - if let Some(val) = diversifier.g_d() { - g_d = val; - break; - } - } - (diversifier, g_d) -} - -/// Determine if using the current note would actually bring us closer to our -/// target -pub fn is_amount_required(src: I128Sum, dest: I128Sum, delta: I128Sum) -> bool { - let gap = dest - src; - for (asset_type, value) in gap.components() { - if *value > 0 && delta[asset_type] > 0 { - return true; - } - } - false -} - -/// a masp change -#[derive(BorshSerialize, BorshDeserialize, BorshDeserializer, Debug, Clone)] -pub struct MaspChange { - /// the token address - pub asset: Address, - /// the change in the token - pub change: token::Change, -} - -/// a masp amount -pub type MaspAmount = ValueSum<(Option, Address), token::Change>; - -/// An extension of Option's cloned method for pair types -fn cloned_pair((a, b): (&T, &U)) -> (T, U) { - (a.clone(), b.clone()) -} - -/// Represents the amount used of different conversions -pub type Conversions = - BTreeMap, i128)>; - -/// Represents the changes that were made to a list of transparent accounts -pub type TransferDelta = HashMap; - -/// Represents the changes that were made to a list of shielded accounts -pub type TransactionDelta = HashMap; -/// A cache of fetched indexed transactions. -/// -/// The cache is designed so that it either contains -/// all transactions from a given height, or none. -#[derive( - BorshSerialize, BorshDeserialize, BorshDeserializer, Debug, Default, Clone, -)] -pub struct Unscanned { - txs: IndexedNoteData, -} - -impl Unscanned { - fn extend(&mut self, items: I) - where - I: IntoIterator, - { - self.txs.extend(items); - } - - fn contains_height(&self, height: u64) -> bool { - self.txs.keys().any(|k| k.height.0 == height) - } - - /// We remove all indices from blocks that have been entirely scanned. - /// If a block is only partially scanned, we leave all the events in the - /// cache. - fn scanned(&mut self, ix: &IndexedTx) { - self.txs.retain(|i, _| i.height >= ix.height); - } -} - -impl IntoIterator for Unscanned { - type IntoIter = ::IntoIter; - type Item = IndexedNoteEntry; - - fn into_iter(self) -> Self::IntoIter { - self.txs.into_iter() - } -} - -#[derive(BorshSerialize, BorshDeserialize, Debug)] -/// The possible sync states of the shielded context -pub enum ContextSyncStatus { - /// The context contains only data that has been confirmed by the protocol - Confirmed, - /// The context contains that that has not yet been confirmed by the - /// protocol and could end up being invalid - Speculative, -} - /// Represents the current state of the shielded pool from the perspective of /// the chosen viewing keys. #[derive(BorshSerialize, BorshDeserialize, Debug, Clone)] @@ -608,6 +95,9 @@ pub struct ShieldedContext { pub div_map: HashMap, /// Maps note positions to their witness (used to make merkle paths) pub witness_map: HashMap>, + /// Tracks what each transaction does to various account balances + pub delta_map: + BTreeMap, /// The set of note positions that have been spent pub spents: HashSet, /// Maps asset types to their decodings @@ -638,6 +128,7 @@ impl Default for ShieldedContext { div_map: HashMap::default(), witness_map: HashMap::default(), spents: HashSet::default(), + delta_map: BTreeMap::default(), asset_types: HashMap::default(), vk_map: HashMap::default(), unscanned: Default::default(), @@ -754,32 +245,36 @@ impl ShieldedContext { .block .data; - for idx in txs_results { + for (idx, tx_event) in txs_results { let tx = Tx::try_from(block[idx.0 as usize].as_ref()) .map_err(|e| Error::Other(e.to_string()))?; let ExtractedMaspTx { fee_unshielding, inner_tx, - } = Self::extract_masp_tx(&tx, true).await?; - // Collect the current transaction(s) - fee_unshielding.and_then(|masp_transaction| { + } = extract_masp_tx::( + &tx, + ExtractShieldedActionArg::Event(&tx_event), + true, + ) + .await?; + fee_unshielding.and_then(|(changed_keys, masp_transaction)| { block_sender.send(( IndexedTx { height: height.into(), index: idx, is_wrapper: true, }, - masp_transaction, + (epoch, changed_keys, masp_transaction), )); }); - inner_tx.and_then(|masp_transaction| { + inner_tx.and_then(|(changed_keys, masp_transaction)| { block_sender.send(( IndexedTx { height: height.into(), index: idx, is_wrapper: false, }, - masp_transaction, + (epoch, changed_keys, masp_transaction), )); }) } @@ -798,8 +293,11 @@ impl ShieldedContext { pub fn scan_tx( &mut self, indexed_tx: IndexedTx, + epoch: Epoch, + tx_changed_keys: &BTreeSet, shielded: &Transaction, vk: &ViewingKey, + native_token: Address, ) -> Result<(), Error> { // For tracking the account changes caused by this Transaction let mut transaction_delta = TransactionDelta::new(); @@ -885,10 +383,155 @@ impl ShieldedContext { })?; } } + // Record the changes to the transparent accounts + let mut transfer_delta = TransferDelta::new(); + let balance_keys: Vec<_> = tx_changed_keys + .iter() + .filter_map(is_any_shielded_action_balance_key) + .collect(); + let (source, token, amount) = match shielded.transparent_bundle() { + Some(transp_bundle) => { + // Shielding/Unshielding transfer + match (transp_bundle.vin.len(), transp_bundle.vout.len()) { + (0, 0) => { + return Err(Error::Other( + "Expected shielding/unshielding transaction" + .to_string(), + )); + } + (_, 0) => { + // Shielding, only if we are syncing. If in + // speculative context do not update + if let ContextSyncStatus::Confirmed = self.sync_status { + let addresses = balance_keys + .iter() + .find(|addresses| { + if addresses[1] != &MASP { + let transp_addr_commit = + TransparentAddress( + ripemd::Ripemd160::digest( + sha2::Sha256::digest( + &addresses[1] + .serialize_to_vec(), + ), + ) + .into(), + ); + // Vins contain the same address, so we + // can + // just examine the first one + transp_bundle.vin.first().is_some_and( + |vin| { + vin.address + == transp_addr_commit + }, + ) + } else { + false + } + }) + .ok_or_else(|| { + Error::Other( + "Could not find source of MASP tx" + .to_string(), + ) + })?; + + let amount = transp_bundle + .vin + .iter() + .fold(Amount::zero(), |acc, vin| { + acc + Amount::from_u64(vin.value) + }); + + ( + addresses[1].to_owned(), + addresses[0].to_owned(), + amount, + ) + } else { + return Ok(()); + } + } + (0, _) => { + // Unshielding + let token = balance_keys + .iter() + .find(|addresses| { + if addresses[1] != &MASP { + let transp_addr_commit = TransparentAddress( + ripemd::Ripemd160::digest( + sha2::Sha256::digest( + &addresses[1] + .serialize_to_vec(), + ), + ) + .into(), + ); + + // Vouts contain the same address, so we + // can + // just examine the first one + transp_bundle.vout.first().is_some_and( + |vout| { + vout.address == transp_addr_commit + }, + ) + } else { + false + } + }) + .ok_or_else(|| { + Error::Other( + "Could not find target of MASP tx" + .to_string(), + ) + })?[0]; + + let amount = transp_bundle + .vout + .iter() + .fold(Amount::zero(), |acc, vout| { + acc + Amount::from_u64(vout.value) + }); + (MASP, token.to_owned(), amount) + } + (_, _) => { + return Err(Error::Other( + "MASP transaction cannot contain both transparent \ + inputs and outputs" + .to_string(), + )); + } + } + } + None => { + // Shielded transfer + (MASP, native_token, Amount::zero()) + } + }; + + transfer_delta.insert( + source, + MaspChange { + asset: token, + change: -amount.change(), + }, + ); + self.delta_map + .insert(indexed_tx, (epoch, transfer_delta, transaction_delta)); Ok(()) } + /// Summarize the effects on shielded and transparent accounts of each + /// Transfer in this context + pub fn get_tx_deltas( + &self, + ) -> &BTreeMap { + &self.delta_map + } + /// Compute the total unspent notes associated with the viewing key in the /// context. If the key is not in the context, then we do not know the /// balance and hence we return None. @@ -1338,6 +981,139 @@ impl ShieldedContext { Ok((val_acc, notes, conversions)) } + /// Compute the combined value of the output notes of the transaction pinned + /// at the given payment address. This computation uses the supplied viewing + /// keys to try to decrypt the output notes. If no transaction is pinned at + /// the given payment address fails with + /// `PinnedBalanceError::NoTransactionPinned`. + pub async fn compute_pinned_balance( + client: &C, + owner: PaymentAddress, + viewing_key: &ViewingKey, + ) -> Result<(I128Sum, Epoch), Error> { + // Check that the supplied viewing key corresponds to given payment + // address + let counter_owner = viewing_key.to_payment_address( + *masp_primitives::sapling::PaymentAddress::diversifier( + &owner.into(), + ), + ); + match counter_owner { + Some(counter_owner) if counter_owner == owner.into() => {} + _ => { + return Err(Error::from(PinnedBalanceError::InvalidViewingKey)); + } + } + // Construct the key for where the transaction ID would be stored + let pin_key = namada_token::storage_key::masp_pin_tx_key(&owner.hash()); + // Obtain the transaction pointer at the key + // If we don't discard the error message then a test fails, + // however the error underlying this will go undetected + let indexed_tx = + rpc::query_storage_value::(client, &pin_key) + .await + .map_err(|_| PinnedBalanceError::NoTransactionPinned)?; + let tx_epoch = query_epoch_at_height(client, indexed_tx.height) + .await? + .ok_or_else(|| { + Error::from(QueryError::General( + "Queried height is greater than the last committed block \ + height" + .to_string(), + )) + })?; + + let block = client + .block(indexed_tx.height.0 as u32) + .await + .map_err(|e| Error::from(QueryError::General(e.to_string())))? + .block + .data; + + let tx = Tx::try_from(block[indexed_tx.index.0 as usize].as_ref()) + .map_err(|e| Error::Other(e.to_string()))?; + let (_, shielded) = extract_masp_tx( + &tx, + ExtractShieldedActionArg::Request::(( + client, + indexed_tx.height, + Some(indexed_tx.index), + )), + false, + ) + .await? + .inner_tx + .ok_or_else(|| { + Error::Other("Missing shielded inner portion of pinned tx".into()) + })?;; + + // Accumulate the combined output note value into this Amount + let mut val_acc = I128Sum::zero(); + for so in shielded + .sapling_bundle() + .map_or(&vec![], |x| &x.shielded_outputs) + { + // Let's try to see if our viewing key can decrypt current note + let decres = try_sapling_note_decryption::<_, OutputDescription<<::SaplingAuth as masp_primitives::transaction::components::sapling::Authorization>::Proof>>( + &NETWORK, + 1.into(), + &PreparedIncomingViewingKey::new(&viewing_key.ivk()), + so, + ); + match decres { + // So the given viewing key does decrypt this current note... + Some((note, pa, _memo)) if pa == owner.into() => { + val_acc += I128Sum::from_nonnegative( + note.asset_type, + note.value as i128, + ) + .map_err(|()| { + Error::Other( + "found note with invalid value or asset type" + .to_string(), + ) + })?; + } + _ => {} + } + } + Ok((val_acc, tx_epoch)) + } + + /// Compute the combined value of the output notes of the pinned transaction + /// at the given payment address if there's any. The asset types may be from + /// the epoch of the transaction or even before, so exchange all these + /// amounts to the epoch of the transaction in order to get the value that + /// would have been displayed in the epoch of the transaction. + pub async fn compute_exchanged_pinned_balance( + &mut self, + context: &impl Namada, + owner: PaymentAddress, + viewing_key: &ViewingKey, + ) -> Result<(ValueSum, I128Sum, Epoch), Error> { + // Obtain the balance that will be exchanged + let (amt, ep) = + Self::compute_pinned_balance(context.client(), owner, viewing_key) + .await?; + display_line!(context.io(), "Pinned balance: {:?}", amt); + // Finally, exchange the balance to the transaction's epoch + let computed_amount = self + .compute_exchanged_amount( + context.client(), + context.io(), + amt, + ep, + BTreeMap::new(), + ) + .await? + .0; + display_line!(context.io(), "Exchanged amount: {:?}", computed_amount); + let (decoded, undecoded) = self + .decode_combine_sum_to_epoch(context.client(), computed_amount, ep) + .await; + Ok((decoded, undecoded, ep)) + } + /// Convert an amount whose units are AssetTypes to one whose units are /// Addresses that they decode to. All asset types not corresponding to /// the given epoch are ignored. @@ -1475,16 +1251,15 @@ impl ShieldedContext { let memo = MemoBytes::empty(); // Try to get a seed from env var, if any. - #[allow(unused_mut)] - let mut rng = StdRng::from_rng(OsRng).unwrap(); + let rng = StdRng::from_rng(OsRng).unwrap(); #[cfg(feature = "testing")] - let mut rng = if let Ok(seed) = env::var(ENV_VAR_MASP_TEST_SEED) + let rng = if let Ok(seed) = env::var(ENV_VAR_MASP_TEST_SEED) .map_err(|e| Error::Other(e.to_string())) .and_then(|seed| { let exp_str = format!("Env var {ENV_VAR_MASP_TEST_SEED} must be a u64."); - let parsed_seed: u64 = - seed.parse().map_err(|_| Error::Other(exp_str))?; + let parsed_seed: u64 = FromStr::from_str(&seed) + .map_err(|_| Error::Other(exp_str))?; Ok(parsed_seed) }) { tracing::warn!( @@ -1543,12 +1318,13 @@ impl ShieldedContext { u32::MAX - 20 } }; - let mut builder = Builder::::new( + let mut builder = Builder::::new_with_rng( NETWORK, // NOTE: this is going to add 20 more blocks to the actual // expiration but there's no other exposed function that we could // use from the masp crate to specify the expiration better expiration_height.into(), + rng, ); // Convert transaction amount into MASP types @@ -1800,18 +1576,16 @@ impl ShieldedContext { let prover = context.shielded().await.utils.local_tx_prover(); #[cfg(feature = "testing")] let prover = testing::MockTxProver(std::sync::Mutex::new(OsRng)); - let (masp_tx, metadata) = builder.build( - &prover, - &FeeRule::non_standard(U64Sum::zero()), - &mut rng, - &mut RngBuildParams::new(OsRng), - )?; + let (masp_tx, metadata) = + builder.build(&prover, &FeeRule::non_standard(U64Sum::zero()))?; if update_ctx { // Cache the generated transfer let mut shielded_ctx = context.shielded_mut().await; shielded_ctx - .pre_cache_transaction(context, &masp_tx) + .pre_cache_transaction( + context, &masp_tx, source, target, token, epoch, + ) .await?; } @@ -1824,14 +1598,30 @@ impl ShieldedContext { } // Updates the internal state with the data of the newly generated - // transaction. More specifically invalidate the spent notes, but do not - // cache the newly produced output descriptions and therefore the merkle - // tree + // transaction. More specifically invalidate the spent notes and the + // transparent balances, but do not cache the newly produced output + // descriptions and therefore the merkle tree async fn pre_cache_transaction( &mut self, context: &impl Namada, masp_tx: &Transaction, + source: &TransferSource, + target: &TransferTarget, + token: &Address, + epoch: Epoch, ) -> Result<(), Error> { + // Need to mock the changed balance keys + let mut changed_balance_keys = BTreeSet::default(); + match (source.effective_address(), target.effective_address()) { + // Shielded transactions don't write balance keys + (MASP, MASP) => (), + (source, target) => { + changed_balance_keys.insert(balance_key(token, &source)); + changed_balance_keys.insert(balance_key(token, &target)); + } + } + + let native_token = query_native_token(context.client()).await?; let vks: Vec<_> = context .wallet() .await @@ -1850,10 +1640,7 @@ impl ShieldedContext { }, |indexed| IndexedTx { height: indexed.height, - index: indexed - .index - .checked_add(1) - .expect("Tx index shouldn't overflow"), + index: indexed.index + 1, is_wrapper: false, }, ); @@ -1861,7 +1648,14 @@ impl ShieldedContext { for vk in vks { self.vk_heights.entry(vk).or_default(); - self.scan_tx(indexed_tx, masp_tx, &vk)?; + self.scan_tx( + indexed_tx, + epoch, + &changed_balance_keys, + masp_tx, + &vk, + native_token.clone(), + )?; } // Save the speculative state for future usage self.save().await.map_err(|e| Error::Other(e.to_string()))?; @@ -1929,10 +1723,11 @@ impl ShieldedContext { } } -impl ShieldedContext { +impl ShieldedContext { /// Fetch the current state of the multi-asset shielded pool into a /// ShieldedContext #[allow(clippy::too_many_arguments)] + #[cfg(not(target_family = "wasm"))] pub async fn fetch< C: Client + Sync, IO: Io + Send + Sync, @@ -1994,7 +1789,6 @@ impl ShieldedContext { let (task_scheduler, mut task_manager) = TaskManager::::new(self.clone()); - std::thread::scope(|s| { loop { let (fetch_send, fetch_recv) = @@ -2054,4 +1848,138 @@ impl ShieldedContext { } }) } + + /// Obtain the known effects of all accepted shielded and transparent + /// transactions. If an owner is specified, then restrict the set to only + /// transactions crediting/debiting the given owner. If token is specified, + /// then restrict set to only transactions involving the given token. + #[cfg(not(target_family = "wasm"))] + pub async fn query_tx_deltas( + &mut self, + client: &C, + io: &IO, + query_owner: &Either>, + query_token: &Option
, + viewing_keys: &HashMap, + ) -> Result< + BTreeMap, + Error, + > { + const TXS_PER_PAGE: u8 = 100; + let _ = self.load().await; + let vks = viewing_keys; + let fvks: Vec<_> = vks + .values() + .map(|fvk| ExtendedFullViewingKey::from(*fvk).fvk.vk) + .collect(); + // Required for filtering out rejected transactions from Tendermint + // responses + let block_results = rpc::query_results(client).await?; + self.fetch(client, &DefaultLogger::new(io), None, None, 1, &[], &fvks) + .await?; + // Save the update state so that future fetches can be short-circuited + let _ = self.save().await; + + let mut transfers = self.get_tx_deltas().clone(); + // Construct the set of addresses relevant to user's query + let relevant_addrs = match &query_owner { + Either::Left(BalanceOwner::Address(owner)) => vec![owner.clone()], + // MASP objects are dealt with outside of tx_search + Either::Left(BalanceOwner::FullViewingKey(_viewing_key)) => vec![], + Either::Left(BalanceOwner::PaymentAddress(_owner)) => vec![], + // Unspecified owner means all known addresses are considered + // relevant + Either::Right(addrs) => addrs.clone(), + }; + // Find all transactions to or from the relevant address set + for addr in relevant_addrs { + for prop in ["transfer.source", "transfer.target"] { + // Query transactions involving the current address + let mut tx_query = Query::eq(prop, addr.encode()); + // Elaborate the query if requested by the user + if let Some(token) = &query_token { + tx_query = + tx_query.and_eq("transfer.token", token.encode()); + } + for page in 1.. { + let txs = &client + .tx_search( + tx_query.clone(), + true, + page, + TXS_PER_PAGE, + Order::Ascending, + ) + .await + .map_err(|e| { + Error::from(QueryError::General(format!( + "for transaction: {e}" + ))) + })? + .txs; + for response_tx in txs { + let height = BlockHeight(response_tx.height.value()); + let idx = TxIndex(response_tx.index); + // Only process yet unprocessed transactions which have + // been accepted by node VPs + // TODO: Check that wrappers shouldn't be considered + // here + let should_process = + !transfers.contains_key(&IndexedTx { + height, + index: idx, + is_wrapper: false, + }) && block_results[u64::from(height) as usize] + .is_accepted(idx.0 as usize); + if !should_process { + continue; + } + let tx = Tx::try_from(response_tx.tx.as_ref()) + .map_err(|e| Error::Other(e.to_string()))?; + let mut wrapper = None; + let mut transfer = None; + extract_payload(tx, &mut wrapper, &mut transfer)?; + // Epoch data is not needed for transparent transactions + let epoch = + wrapper.map(|x| x.epoch).unwrap_or_default(); + if let Some(transfer) = transfer { + // Skip MASP addresses as they are already handled + // by ShieldedContext + if transfer.source == MASP + || transfer.target == MASP + { + continue; + } + // Describe how a Transfer simply subtracts from one + // account and adds the same to another + + let delta = TransferDelta::from([( + transfer.source.clone(), + MaspChange { + asset: transfer.token.clone(), + change: -transfer.amount.amount().change(), + }, + )]); + + // No shielded accounts are affected by this + // Transfer + transfers.insert( + IndexedTx { + height, + index: idx, + is_wrapper: false, + }, + (epoch, delta, TransactionDelta::new()), + ); + } + } + // An incomplete page signifies no more transactions + if (txs.len() as u8) < TXS_PER_PAGE { + break; + } + } + } + } + Ok(transfers) + } } diff --git a/crates/sdk/src/masp/types.rs b/crates/sdk/src/masp/types.rs index 50fdfe31f3..fb35d15f0b 100644 --- a/crates/sdk/src/masp/types.rs +++ b/crates/sdk/src/masp/types.rs @@ -2,7 +2,6 @@ use std::collections::{BTreeMap, BTreeSet, HashMap}; use std::io::{Read, Write}; use std::sync::{Arc, Mutex}; -use bls12_381::Bls12; use borsh_ext::BorshSerializeExt; use masp_primitives::asset_type::AssetType; use masp_primitives::convert::AllowedConversion; @@ -16,6 +15,7 @@ use masp_primitives::transaction::{ }; use masp_primitives::zip32::{ExtendedFullViewingKey, ExtendedSpendingKey}; use masp_proofs::bellman::groth16::PreparedVerifyingKey; +use masp_proofs::bls12_381::Bls12; use namada_core::address::Address; use namada_core::borsh::{BorshDeserialize, BorshSerialize}; use namada_core::dec::Dec; diff --git a/crates/sdk/src/masp/utils.rs b/crates/sdk/src/masp/utils.rs index 5b3850dc62..832c6d5ed2 100644 --- a/crates/sdk/src/masp/utils.rs +++ b/crates/sdk/src/masp/utils.rs @@ -130,7 +130,7 @@ pub(super) fn extract_payload( // Retrieves all the indexes and tx events at the specified height which refer // to a valid masp transaction. If an index is given, it filters only the // transactions with an index equal or greater to the provided one. -pub(super) async fn get_indexed_masp_events_at_height( +pub(super) async fn get_indexed_masp_events_at_height( client: &C, height: BlockHeight, first_idx_to_query: Option, @@ -281,7 +281,10 @@ pub(super) async fn extract_masp_tx<'args, C: Client + Sync>( } // Extract the changed keys and Transaction hash from a masp over ibc message -pub(super) async fn extract_payload_from_shielded_action<'args, C: Client>( +pub(super) async fn extract_payload_from_shielded_action< + 'args, + C: Client + Sync +>( tx_data: &[u8], mut args: ExtractShieldedActionArg<'args, C>, ) -> Result<(BTreeSet, Transfer), Error> { @@ -522,12 +525,10 @@ pub(super) struct TaskRunner { ctx: Arc>>, } -impl TaskManager { +impl TaskManager { /// Create a client proxy and spawn a process to forward /// proxy requests. - pub(super) fn new( - ctx: ShieldedContext, - ) -> (TaskRunner, Self) { + pub(super) fn new(ctx: ShieldedContext) -> (TaskRunner, Self) { let (save_send, save_recv) = tokio::sync::mpsc::channel(100); ( TaskRunner { @@ -555,8 +556,7 @@ impl TaskManager { } } -impl TaskRunner { - +impl TaskRunner { pub(super) fn complete(&self) { self.action.blocking_send(Action::Complete).unwrap() } @@ -651,14 +651,14 @@ pub trait ProgressLogger { #[derive(Debug, Clone)] pub struct DefaultLogger<'io, IO: Io> { io: &'io IO, - progress: Arc> + progress: Arc>, } impl<'io, IO: Io> DefaultLogger<'io, IO> { pub fn new(io: &'io IO) -> Self { Self { io, - progress: Arc::new(Mutex::new(Default::default())) + progress: Arc::new(Mutex::new(Default::default())), } } } @@ -671,13 +671,13 @@ struct IterProgress { struct DefaultFetchIterator where - I: Iterator, + I: Iterator, { inner: I, - progress: Arc> + progress: Arc>, } -impl> Iterator for DefaultFetchIterator { +impl> Iterator for DefaultFetchIterator { type Item = u64; fn next(&mut self) -> Option { @@ -689,14 +689,13 @@ impl> Iterator for DefaultFetchIterator { } impl<'io, IO: Io> ProgressLogger for DefaultLogger<'io, IO> { - fn io(&self) -> &IO { self.io } fn fetch(&self, items: I) -> impl Iterator - where - I: Iterator, + where + I: Iterator, { { let mut locked = self.progress.lock().unwrap(); @@ -709,8 +708,8 @@ impl<'io, IO: Io> ProgressLogger for DefaultLogger<'io, IO> { } fn scan(&self, items: I) -> impl Iterator - where - I: IntoIterator, + where + I: IntoIterator, { let items: Vec<_> = items.into_iter().collect(); items.into_iter() @@ -720,4 +719,4 @@ impl<'io, IO: Io> ProgressLogger for DefaultLogger<'io, IO> { let locked = self.progress.lock().unwrap(); locked.length - locked.index } -} \ No newline at end of file +} From ccc0ce9ed76005e9cdde67bcc969a3205228b740 Mon Sep 17 00:00:00 2001 From: satan Date: Tue, 16 Apr 2024 10:49:08 +0200 Subject: [PATCH 04/29] Cleaned up shielded sync --- crates/apps_lib/src/client/masp.rs | 12 +- crates/sdk/src/masp/shielded_ctx.rs | 544 ++++++++++++++++------------ crates/sdk/src/masp/types.rs | 120 +++++- crates/sdk/src/masp/utils.rs | 179 ++++----- 4 files changed, 539 insertions(+), 316 deletions(-) diff --git a/crates/apps_lib/src/client/masp.rs b/crates/apps_lib/src/client/masp.rs index bbdba7c755..e9c3b23ac7 100644 --- a/crates/apps_lib/src/client/masp.rs +++ b/crates/apps_lib/src/client/masp.rs @@ -174,6 +174,12 @@ impl<'io, IO: Io> StdoutDrawer<'io, IO> { } } +impl<'io, IO: Io> Drop for StdoutDrawer<'io, IO> { + fn drop(&mut self) { + display_line!(self.io, "\n\n"); + } +} + pub struct CliLogging<'io, T, I, IO> where T: Debug, @@ -268,7 +274,7 @@ impl<'io, IO: Io> CliLogger<'io, IO> { } } -impl<'io, IO: Io> ProgressLogger for CliLogger<'io, IO> { +impl<'io, IO: Io + Send + Sync> ProgressLogger for CliLogger<'io, IO> { fn io(&self) -> &IO { let io = { let locked = self.drawer.lock().unwrap(); @@ -284,9 +290,9 @@ impl<'io, IO: Io> ProgressLogger for CliLogger<'io, IO> { CliLogging::new(items, ProgressType::Fetch, self.drawer.clone()) } - fn scan(&self, items: I) -> impl Iterator + fn scan(&self, items: I) -> impl Iterator + Send where - I: Iterator, + I: Iterator + Send, { CliLogging::new(items, ProgressType::Scan, self.drawer.clone()) } diff --git a/crates/sdk/src/masp/shielded_ctx.rs b/crates/sdk/src/masp/shielded_ctx.rs index dfe55e50a1..0cced04627 100644 --- a/crates/sdk/src/masp/shielded_ctx.rs +++ b/crates/sdk/src/masp/shielded_ctx.rs @@ -10,7 +10,6 @@ use itertools::Either; use masp_primitives::asset_type::AssetType; use masp_primitives::consensus::TestNetwork; use masp_primitives::convert::AllowedConversion; -use masp_primitives::ff::PrimeField; use masp_primitives::memo::MemoBytes; use masp_primitives::merkle_tree::{ CommitmentTree, IncrementalWitness, MerklePath, @@ -41,6 +40,7 @@ use namada_core::token::Amount; use namada_token::{self as token, Denomination, MaspDigitPos}; use namada_tx::Tx; use rand_core::OsRng; +use rayon::prelude::*; use ripemd::Digest as RipemdDigest; use sha2::Digest; use tendermint_rpc::query::Query; @@ -52,8 +52,9 @@ use crate::eth_bridge::token::storage_key::{ }; use crate::io::Io; use crate::masp::types::{ - ContextSyncStatus, Conversions, MaspAmount, MaspChange, ShieldedTransfer, - TransactionDelta, TransferDelta, TransferErr, Unscanned, WalletMap, + ContextSyncStatus, Conversions, DecryptedData, DecryptedDataCache, + MaspAmount, MaspChange, ScannedData, ShieldedTransfer, TransactionDelta, + TransferDelta, TransferErr, Unscanned, WalletMap, }; use crate::masp::utils::{ cloned_pair, extract_masp_tx, extract_payload, fetch_channel, @@ -108,6 +109,10 @@ pub struct ShieldedContext { pub tx_note_map: BTreeMap, /// A cache of fetched indexed txs. pub unscanned: Unscanned, + /// We cannot update spent notes until all fetched notes have been + /// decrypted. This temporarily stores the relevant encrypted data in + /// case syncing is interrupted. + pub decrypted_note_cache: DecryptedDataCache, /// The sync state of the context pub sync_status: ContextSyncStatus, } @@ -132,6 +137,7 @@ impl Default for ShieldedContext { asset_types: HashMap::default(), vk_map: HashMap::default(), unscanned: Default::default(), + decrypted_note_cache: Default::default(), sync_status: ContextSyncStatus::Confirmed, } } @@ -162,36 +168,33 @@ impl ShieldedContext { /// Update the merkle tree of witnesses the first time we /// scan a new MASP transaction. - pub(crate) fn update_witness_map( - &mut self, - indexed_tx: IndexedTx, - shielded: &Transaction, - ) -> Result<(), Error> { - let mut note_pos = self.tree.size(); - self.tx_note_map.insert(indexed_tx, note_pos); - for so in shielded - .sapling_bundle() - .map_or(&vec![], |x| &x.shielded_outputs) - { - // Create merkle tree leaf node from note commitment - let node = Node::new(so.cmu.to_repr()); - // Update each merkle tree in the witness map with the latest - // addition - for (_, witness) in self.witness_map.iter_mut() { - witness.append(node).map_err(|()| { - Error::Other("note commitment tree is full".to_string()) - })?; - } - self.tree.append(node).map_err(|()| { - Error::Other("note commitment tree is full".to_string()) - })?; - // Finally, make it easier to construct merkle paths to this new - // note - let witness = IncrementalWitness::::from_tree(&self.tree); - self.witness_map.insert(note_pos, witness); - note_pos += 1; - } - Ok(()) + pub(crate) fn update_witness_map(&mut self) -> Result<(), Error> { + // let mut note_pos = self.tree.size(); + // self.tx_note_map.insert(indexed_tx, note_pos); + // for so in shielded + // .sapling_bundle() + // .map_or(&vec![], |x| &x.shielded_outputs) + // { + // Create merkle tree leaf node from note commitment + // let node = Node::new(so.cmu.to_repr()); + // Update each merkle tree in the witness map with the latest + // addition + // for (_, witness) in self.witness_map.iter_mut() { + // witness.append(node).map_err(|()| { + // Error::Other("note commitment tree is full".to_string()) + // })?; + // } + // self.tree.append(node).map_err(|()| { + // Error::Other("note commitment tree is full".to_string()) + // })?; + // Finally, make it easier to construct merkle paths to this new + // note + // let witness = IncrementalWitness::::from_tree(&self.tree); + // self.witness_map.insert(note_pos, witness); + // note_pos += 1; + // } + // Ok(()) + todo!(); } /// Obtain a chronologically-ordered list of all accepted shielded @@ -282,27 +285,28 @@ impl ShieldedContext { Ok(()) } - /// Applies the given transaction to the supplied context. More precisely, - /// the shielded transaction's outputs are added to the commitment tree. - /// Newly discovered notes are associated to the supplied viewing keys. Note + /// Attempts to decrypt the note in each transaction. Successfully + /// decrypted notes are associated to the supplied viewing keys. Note /// nullifiers are mapped to their originating notes. Note positions are - /// associated to notes, memos, and diversifiers. And the set of notes that - /// we have spent are updated. The witness map is maintained to make it - /// easier to construct note merkle paths in other code. See - /// - pub fn scan_tx( - &mut self, + /// associated to notes, memos, and diversifiers. + /// + /// An append-only idempotent diff of these changes is returned. This + /// allows this function to be run in parallel. The diffs are collected + /// and applied by a separate process. + /// + /// See + pub(super) fn scan_tx( + sync_status: ContextSyncStatus, indexed_tx: IndexedTx, - epoch: Epoch, - tx_changed_keys: &BTreeSet, + tx_note_map: &BTreeMap, shielded: &Transaction, vk: &ViewingKey, - native_token: Address, - ) -> Result<(), Error> { + ) -> Result<(ScannedData, TransactionDelta), Error> { // For tracking the account changes caused by this Transaction let mut transaction_delta = TransactionDelta::new(); - if let ContextSyncStatus::Confirmed = self.sync_status { - let mut note_pos = self.tx_note_map[&indexed_tx]; + let mut scanned_data = ScannedData::default(); + if let ContextSyncStatus::Confirmed = sync_status { + let mut note_pos = tx_note_map[&indexed_tx]; // Listen for notes sent to our viewing keys, only if we are syncing // (i.e. in a confirmed status) for so in shielded @@ -311,7 +315,7 @@ impl ShieldedContext { { // Let's try to see if this viewing key can decrypt latest // note - let notes = self.pos_map.entry(*vk).or_default(); + let notes = scanned_data.pos_map.entry(*vk).or_default(); let decres = try_sapling_note_decryption::<_, OutputDescription<<::SaplingAuth as masp_primitives::transaction::components::sapling::Authorization>::Proof>>( &NETWORK, 1.into(), @@ -330,12 +334,12 @@ impl ShieldedContext { Error::Other("Can not get nullifier".to_string()) })?, ); - self.note_map.insert(note_pos, note); - self.memo_map.insert(note_pos, memo); + scanned_data.note_map.insert(note_pos, note); + scanned_data.memo_map.insert(note_pos, memo); // The payment address' diversifier is required to spend // note - self.div_map.insert(note_pos, *pa.diversifier()); - self.nf_map.insert(nf, note_pos); + scanned_data.div_map.insert(note_pos, *pa.diversifier()); + scanned_data.nf_map.insert(nf, note_pos); // Note the account changes let balance = transaction_delta .entry(*vk) @@ -350,61 +354,133 @@ impl ShieldedContext { .to_string(), ) })?; - self.vk_map.insert(note_pos, *vk); + scanned_data.vk_map.insert(note_pos, *vk); } note_pos += 1; } } + Ok((scanned_data, transaction_delta)) + } - // Cancel out those of our notes that have been spent - for ss in shielded - .sapling_bundle() - .map_or(&vec![], |x| &x.shielded_spends) + /// Parse the cache of decrypted notes: + /// * nullify notes that have been spent + /// * update balances of each viewing key + pub(super) fn nullify_spent_notes( + &mut self, + native_token: &Address, + ) -> Result<(), Error> { + for ((indexed_tx, _vk), decrypted_data) in + self.decrypted_note_cache.drain() { - // If the shielded spend's nullifier is in our map, then target note - // is rendered unusable - if let Some(note_pos) = self.nf_map.get(&ss.nullifier) { - self.spents.insert(*note_pos); - // Note the account changes - let balance = transaction_delta - .entry(self.vk_map[note_pos]) - .or_insert_with(I128Sum::zero); - let note = self.note_map[note_pos]; - - *balance -= I128Sum::from_nonnegative( - note.asset_type, - note.value as i128, - ) - .map_err(|()| { - Error::Other( - "found note with invalid value or asset type" - .to_string(), + let DecryptedData { + tx: shielded, + keys: tx_changed_keys, + delta: mut transaction_delta, + epoch, + } = decrypted_data; + + // Cancel out those of our notes that have been spent + for ss in shielded + .sapling_bundle() + .map_or(&vec![], |x| &x.shielded_spends) + { + // If the shielded spend's nullifier is in our map, then target + // note is rendered unusable + if let Some(note_pos) = self.nf_map.get(&ss.nullifier) { + self.spents.insert(*note_pos); + // Note the account changes + let balance = transaction_delta + .entry(self.vk_map[note_pos]) + .or_insert_with(I128Sum::zero); + let note = self.note_map[note_pos]; + + *balance -= I128Sum::from_nonnegative( + note.asset_type, + note.value as i128, ) - })?; + .map_err(|_| { + Error::Other( + "found note with invalid value or asset type" + .to_string(), + ) + })?; + } } - } - // Record the changes to the transparent accounts - let mut transfer_delta = TransferDelta::new(); - let balance_keys: Vec<_> = tx_changed_keys - .iter() - .filter_map(is_any_shielded_action_balance_key) - .collect(); - let (source, token, amount) = match shielded.transparent_bundle() { - Some(transp_bundle) => { - // Shielding/Unshielding transfer - match (transp_bundle.vin.len(), transp_bundle.vout.len()) { - (0, 0) => { - return Err(Error::Other( - "Expected shielding/unshielding transaction" - .to_string(), - )); - } - (_, 0) => { - // Shielding, only if we are syncing. If in - // speculative context do not update - if let ContextSyncStatus::Confirmed = self.sync_status { - let addresses = balance_keys + let mut transfer_delta = TransferDelta::new(); + let balance_keys: Vec<_> = tx_changed_keys + .iter() + .filter_map(is_any_shielded_action_balance_key) + .collect(); + let (source, token, amount) = match shielded.transparent_bundle() { + Some(transp_bundle) => { + // Shielding/Unshielding transfer + match (transp_bundle.vin.len(), transp_bundle.vout.len()) { + (0, 0) => { + return Err(Error::Other( + "Expected shielding/unshielding transaction" + .to_string(), + )); + } + (_, 0) => { + // Shielding, only if we are syncing. If in + // speculative context do not update + if let ContextSyncStatus::Confirmed = + self.sync_status + { + let addresses = balance_keys + .iter() + .find(|addresses| { + if addresses[1] != &MASP { + let transp_addr_commit = + TransparentAddress( + ripemd::Ripemd160::digest( + sha2::Sha256::digest( + &addresses[1] + .serialize_to_vec(), + ), + ) + .into(), + ); + // Vins contain the same address, so we + // can + // just examine the first one + transp_bundle.vin.first().is_some_and( + |vin| { + vin.address + == transp_addr_commit + }, + ) + } else { + false + } + }) + .ok_or_else(|| { + Error::Other( + "Could not find source of MASP tx" + .to_string(), + ) + })?; + + let amount = transp_bundle.vin.iter().fold( + Amount::zero(), + |acc, vin| { + acc + Amount::from_u64(vin.value) + }, + ); + + ( + addresses[1].to_owned(), + addresses[0].to_owned(), + amount, + ) + } else { + return Ok(()); + } + } + (0, _) => { + // Unshielding + let token = balance_keys .iter() .find(|addresses| { if addresses[1] != &MASP { @@ -418,12 +494,12 @@ impl ShieldedContext { ) .into(), ); - // Vins contain the same address, so we + // Vouts contain the same address, so we // can // just examine the first one - transp_bundle.vin.first().is_some_and( - |vin| { - vin.address + transp_bundle.vout.first().is_some_and( + |vout| { + vout.address == transp_addr_commit }, ) @@ -433,94 +509,42 @@ impl ShieldedContext { }) .ok_or_else(|| { Error::Other( - "Could not find source of MASP tx" + "Could not find target of MASP tx" .to_string(), ) - })?; - + })?[0]; let amount = transp_bundle - .vin + .vout .iter() - .fold(Amount::zero(), |acc, vin| { - acc + Amount::from_u64(vin.value) + .fold(Amount::zero(), |acc, vout| { + acc + Amount::from_u64(vout.value) }); - - ( - addresses[1].to_owned(), - addresses[0].to_owned(), - amount, - ) - } else { - return Ok(()); + (MASP, token.to_owned(), amount) + } + (_, _) => { + return Err(Error::Other( + "MASP transaction cannot contain both \ + transparent inputs and outputs" + .to_string(), + )); } - } - (0, _) => { - // Unshielding - let token = balance_keys - .iter() - .find(|addresses| { - if addresses[1] != &MASP { - let transp_addr_commit = TransparentAddress( - ripemd::Ripemd160::digest( - sha2::Sha256::digest( - &addresses[1] - .serialize_to_vec(), - ), - ) - .into(), - ); - - // Vouts contain the same address, so we - // can - // just examine the first one - transp_bundle.vout.first().is_some_and( - |vout| { - vout.address == transp_addr_commit - }, - ) - } else { - false - } - }) - .ok_or_else(|| { - Error::Other( - "Could not find target of MASP tx" - .to_string(), - ) - })?[0]; - - let amount = transp_bundle - .vout - .iter() - .fold(Amount::zero(), |acc, vout| { - acc + Amount::from_u64(vout.value) - }); - (MASP, token.to_owned(), amount) - } - (_, _) => { - return Err(Error::Other( - "MASP transaction cannot contain both transparent \ - inputs and outputs" - .to_string(), - )); } } - } - None => { - // Shielded transfer - (MASP, native_token, Amount::zero()) - } - }; - - transfer_delta.insert( - source, - MaspChange { - asset: token, - change: -amount.change(), - }, - ); - self.delta_map - .insert(indexed_tx, (epoch, transfer_delta, transaction_delta)); + None => { + // Shielded transfer + (MASP, native_token.clone(), Amount::zero()) + } + }; + transfer_delta.insert( + source, + MaspChange { + asset: token, + change: -amount.change(), + }, + ); + self.delta_map + .insert(indexed_tx, (epoch, transfer_delta, transaction_delta)); + } Ok(()) } @@ -1645,18 +1669,30 @@ impl ShieldedContext { }, ); self.sync_status = ContextSyncStatus::Speculative; + let mut scanned_data = ScannedData::default(); for vk in vks { self.vk_heights.entry(vk).or_default(); - self.scan_tx( + let (scanned, tx_delta) = Self::scan_tx( + ContextSyncStatus::Speculative, indexed_tx, - epoch, - &changed_balance_keys, + &self.tx_note_map, masp_tx, &vk, - native_token.clone(), )?; + scanned_data.merge(scanned); + scanned_data.decrypted_note_cache.insert((indexed_tx, vk), DecryptedData { + tx: masp_tx.clone(), + keys: changed_balance_keys.clone(), + delta: tx_delta, + epoch, + }); } + let mut temp_cache = DecryptedDataCache::default(); + std::mem::swap(&mut temp_cache, &mut self.decrypted_note_cache); + scanned_data.apply_to(self); + self.nullify_spent_notes(&native_token)?; + std::mem::swap(&mut temp_cache, &mut self.decrypted_note_cache); // Save the speculative state for future usage self.save().await.map_err(|e| Error::Other(e.to_string()))?; @@ -1764,6 +1800,7 @@ impl ShieldedContext { for vk in fvks { self.vk_heights.entry(*vk).or_default(); } + // Save the context to persist newly added keys let _ = self.save().await; let native_token = query_native_token(client).await?; @@ -1774,9 +1811,9 @@ impl ShieldedContext { }; let last_witnessed_tx = self.tx_note_map.keys().max().cloned(); // get the bounds on the block heights to fetch - let start_idx = - std::cmp::min(last_witnessed_tx, least_idx).map(|ix| ix.height); - let start_idx = start_query_height.or(start_idx); + let start_height = + std::cmp::min(last_witnessed_tx, least_idx).map(|idx| idx.height); + let start_height = start_query_height.or(start_height); // Query for the last produced block height let last_block_height = query_block(client) .await? @@ -1785,60 +1822,109 @@ impl ShieldedContext { let last_query_height = last_query_height.unwrap_or(last_block_height); let last_query_height = std::cmp::min(last_query_height, last_block_height); + self.update_witness_map()?; + let vk_heights = self.vk_heights.clone(); + // the task scheduler allows the thread performing trial decryptions to + // communicate errors and actions (such as saving and updating state). + // The task manager runs on the main thread and performs the tasks + // scheduled by the scheduler. let (task_scheduler, mut task_manager) = TaskManager::::new(self.clone()); + // The main loop that performs + // * fetching and caching MASP txs in sequence + // * trial decryption of each note to determine if it + // is owned by a viewing key in this context and caching + // the result. + // * Nullifying spent notes and updating balances for each + // viewing key + // * Regular saving of the context to disk in case of process + // interrupts std::thread::scope(|s| { loop { + // a stateful channel that communicates notes fetched to the + // trial decryption process let (fetch_send, fetch_recv) = fetch_channel::new(self.unscanned.clone()); + + // we trial-decrypt all notes fetched in parallel and schedule + // the state changes to be applied to the shielded context + // back on the main thread let decryption_handle = s.spawn(|| { let txs = logger.scan(fetch_recv); - for (indexed_tx, (epoch, tx, stx)) in txs { - if Some(indexed_tx) > last_witnessed_tx { - task_scheduler - .update_witness_map(indexed_tx, &stx)?; - } - let mut vk_heights = task_scheduler.get_vk_heights(); - for (vk, h) in vk_heights - .iter_mut() - .filter(|(_vk, h)| **h < Some(indexed_tx)) - { - task_scheduler.scan_tx( - indexed_tx, - epoch, - &tx, - &stx, - vk, - native_token.clone(), - )?; - *h = Some(indexed_tx); - } - // possibly remove unneeded elements from the cache. - self.unscanned.scanned(&indexed_tx); - task_scheduler.set_vk_heights(vk_heights); - task_scheduler.save(indexed_tx.height); - } - task_scheduler.complete(); + txs.par_bridge().try_for_each( + |(indexed_tx, (epoch, tx, stx))| { + let mut scanned_data = ScannedData::default(); + for (vk, _) in vk_heights + .iter() + .filter(|(_vk, h)| **h < Some(indexed_tx)) + { + // if this note is in the cache, skip it. + if scanned_data.decrypted_note_cache.contains(&indexed_tx, vk) { + continue; + } + // attempt to decrypt the note and get the state changes + let (scanned, tx_delta) = task_scheduler + .scan_tx( + self.sync_status, + indexed_tx, + &self.tx_note_map, + &stx, + vk, + )?; + // add the new state changes to the aggregated + scanned_data.merge(scanned); + // add the note to the cache + scanned_data.decrypted_note_cache.insert( + (indexed_tx, *vk), + DecryptedData { + tx: stx.clone(), + keys: tx.clone(), + delta: tx_delta, + epoch, + }, + ); + } + // save the aggregated state changes + task_scheduler.save(scanned_data, indexed_tx); + Ok::<(), Error>(()) + }, + )?; + // signal that the process has finished without error + task_scheduler.complete(false); Ok::<(), Error>(()) }); - _ = tokio::task::block_in_place(|| { - tokio::runtime::Handle::current().block_on(async { - tokio::join!( - task_manager.run(), - Self::fetch_shielded_transfers( - fetch_send, - client, - logger, - start_idx, - last_query_height, + // fetch MASP txs and coordinate the state changes from + // scanning fetched txs asynchronously. + let (decrypt_res, fetch_res) = + tokio::task::block_in_place(|| { + tokio::runtime::Handle::current().block_on(async { + tokio::join!( + task_manager.run(&native_token), + Self::fetch_shielded_transfers( + fetch_send, + client, + logger, + start_height, + last_query_height, + ) ) - ) - }) - }); + }) + }); + // if the scanning process errored, return that error here and exit. + decrypt_res?; + // shut down the scanning thread. decryption_handle.join().unwrap()?; + // if fetching errored, log it. But this is recoverable. + if let Err(e) = fetch_res { + display_line!( + logger.io(), + "Error encountered while fetching: {}", + e.to_string() + ); + } // if fetching failed for before completing, we restart // the fetch process. Otherwise, we can break the loop. diff --git a/crates/sdk/src/masp/types.rs b/crates/sdk/src/masp/types.rs index fb35d15f0b..48da0a4a8f 100644 --- a/crates/sdk/src/masp/types.rs +++ b/crates/sdk/src/masp/types.rs @@ -5,8 +5,11 @@ use std::sync::{Arc, Mutex}; use borsh_ext::BorshSerializeExt; use masp_primitives::asset_type::AssetType; use masp_primitives::convert::AllowedConversion; +use masp_primitives::memo::MemoBytes; use masp_primitives::merkle_tree::MerklePath; -use masp_primitives::sapling::{Node, ViewingKey}; +use masp_primitives::sapling::{ + Diversifier, Node, Note, Nullifier, ViewingKey, +}; use masp_primitives::transaction::builder::{Builder, MapBuilder}; use masp_primitives::transaction::components::sapling::builder::SaplingMetadata; use masp_primitives::transaction::components::{I128Sum, ValueSum}; @@ -28,6 +31,7 @@ use namada_token as token; use thiserror::Error; use crate::error::Error; +use crate::masp::{ShieldedContext, ShieldedUtils}; /// Type alias for convenience and profit pub type IndexedNoteData = BTreeMap< @@ -133,6 +137,120 @@ pub struct MaspChange { pub change: token::Change, } +#[derive(Debug, Default)] +/// Data returned by successfully scanning a tx +pub(super) struct ScannedData { + pub div_map: HashMap, + pub memo_map: HashMap, + pub note_map: HashMap, + pub nf_map: HashMap, + pub pos_map: HashMap>, + pub vk_map: HashMap, + pub decrypted_note_cache: DecryptedDataCache, +} + +impl ScannedData { + pub(super) fn apply_to( + mut self, + ctx: &mut ShieldedContext, + ) { + for (k, v) in self.note_map.drain() { + ctx.note_map.insert(k, v); + } + for (k, v) in self.nf_map.drain() { + ctx.nf_map.insert(k, v); + } + for (k, v) in self.pos_map.drain() { + let map = ctx.pos_map.entry(k).or_default(); + for ix in v { + map.insert(ix); + } + } + for (k, v) in self.div_map.drain() { + ctx.div_map.insert(k, v); + } + for (k, v) in self.vk_map.drain() { + ctx.vk_map.insert(k, v); + } + for (k, v) in self.memo_map.drain() { + ctx.memo_map.insert(k, v); + } + ctx.decrypted_note_cache.merge(self.decrypted_note_cache); + } + + pub(super) fn merge(&mut self, mut other: Self) { + for (k, v) in other.note_map.drain() { + self.note_map.insert(k, v); + } + for (k, v) in other.nf_map.drain() { + self.nf_map.insert(k, v); + } + for (k, v) in other.pos_map.drain() { + let map = self.pos_map.entry(k).or_default(); + for ix in v { + map.insert(ix); + } + } + for (k, v) in other.div_map.drain() { + self.div_map.insert(k, v); + } + for (k, v) in other.vk_map.drain() { + self.vk_map.insert(k, v); + } + for (k, v) in other.memo_map.drain() { + self.memo_map.insert(k, v); + } + for (k, v) in other.decrypted_note_cache.inner { + self.decrypted_note_cache.insert(k, v); + } + } +} + +#[derive(Debug, Clone, BorshSerialize, BorshDeserialize)] +/// Data extracted from a successfully decrypted MASP note +pub struct DecryptedData { + pub tx: Transaction, + pub keys: BTreeSet, + pub delta: TransactionDelta, + pub epoch: Epoch, +} + +/// A cache of decrypted txs that have not yet been +/// updated to the shielded ctx. Necessary in case +/// scanning gets interrupted. +#[derive(Debug, Clone, Default, BorshSerialize, BorshDeserialize)] +pub struct DecryptedDataCache { + inner: HashMap<(IndexedTx, ViewingKey), DecryptedData>, +} + +impl DecryptedDataCache { + pub fn insert( + &mut self, + key: (IndexedTx, ViewingKey), + value: DecryptedData, + ) { + self.inner.insert(key, value); + } + + pub fn merge(&mut self, mut other: Self) { + for (k, v) in other.inner.drain() { + self.insert(k, v); + } + } + + pub fn contains(&self, ix: &IndexedTx, vk: &ViewingKey) -> bool { + self.inner + .keys() + .find_map(|(i, v)| (i==ix && v==vk).then_some(())) + .is_some() + + } + + pub fn drain(&mut self) -> std::collections::hash_map::Drain<'_, (IndexedTx, ViewingKey), DecryptedData>{ + self.inner.drain() + } +} + /// A cache of fetched indexed transactions. /// /// The cache is designed so that it either contains diff --git a/crates/sdk/src/masp/utils.rs b/crates/sdk/src/masp/utils.rs index 832c6d5ed2..039e6da582 100644 --- a/crates/sdk/src/masp/utils.rs +++ b/crates/sdk/src/masp/utils.rs @@ -1,6 +1,7 @@ use core::str::FromStr; use std::collections::{BTreeMap, BTreeSet}; use std::env; +use std::marker::PhantomData; use std::path::PathBuf; use std::sync::{Arc, Mutex}; @@ -12,7 +13,7 @@ use masp_primitives::transaction::Transaction; use masp_primitives::zip32::{ExtendedFullViewingKey, ExtendedSpendingKey}; use masp_proofs::prover::LocalTxProver; use namada_core::address::Address; -use namada_core::storage::{BlockHeight, Epoch, IndexedTx, Key, TxIndex}; +use namada_core::storage::{BlockHeight, IndexedTx, TxIndex}; use namada_core::token::Transfer; use namada_ibc::IbcMessage; use namada_tx::data::{TxResult, WrapperTx}; @@ -23,7 +24,10 @@ use tokio::sync::mpsc::{Receiver, Sender}; use crate::error::{Error, QueryError}; use crate::io::Io; use crate::masp::shielded_ctx::ShieldedContext; -use crate::masp::types::{IndexedNoteEntry, PVKs, Unscanned}; +use crate::masp::types::{ + ContextSyncStatus, IndexedNoteEntry, PVKs, ScannedData, TransactionDelta, + Unscanned, +}; use crate::masp::{ENV_VAR_MASP_PARAMS_DIR, VERIFIYING_KEYS}; use crate::queries::Client; use crate::{MaybeSend, MaybeSync}; @@ -128,7 +132,7 @@ pub(super) fn extract_payload( } // Retrieves all the indexes and tx events at the specified height which refer -// to a valid masp transaction. If an index is given, it filters only the +// to a valid MASP transaction. If an index is given, it filters only the // transactions with an index equal or greater to the provided one. pub(super) async fn get_indexed_masp_events_at_height( client: &C, @@ -177,7 +181,7 @@ pub(super) enum ExtractShieldedActionArg<'args, C: Client> { Request((&'args C, BlockHeight, Option)), } -/// Extract the relevant shield portions of a [`Tx`], if any. +/// Extract the relevant shielded portions of a [`Tx`], if any. pub(super) async fn extract_masp_tx<'args, C: Client + Sync>( tx: &Tx, action_arg: ExtractShieldedActionArg<'args, C>, @@ -280,7 +284,7 @@ pub(super) async fn extract_masp_tx<'args, C: Client + Sync>( }) } -// Extract the changed keys and Transaction hash from a masp over ibc message +// Extract the changed keys and Transaction hash from a MASP over ibc message pub(super) async fn extract_payload_from_shielded_action< 'args, C: Client + Sync @@ -510,119 +514,128 @@ pub mod fetch_channel { } } -enum Action { - Complete, - Data(Arc>>, BlockHeight), +/// The actions that the scanning process can +/// schedule to be run on the main thread. +#[allow(clippy::large_enum_variant)] +enum Action { + /// Signal that the scanning process has ended and if it did so with + /// an error + Complete { with_error: bool }, + /// Send a diff of data to be applied to the ctx before + /// persisting. + Data(ScannedData, IndexedTx), } + +/// A process on the main thread that listens for +/// progress updates from the scanning process +/// and applies all state changes that it +/// schedules. pub struct TaskManager { - action: Receiver>, - pub(super) latest_height: BlockHeight, + action: Receiver, + pub(super) latest_idx: IndexedTx, + ctx: Arc>>, } #[derive(Clone)] -pub(super) struct TaskRunner { - action: Sender>, - ctx: Arc>>, +/// A struct that allows the scanning process +/// thread to communicate errors and actions back to +/// the main process where they will be handled by +/// a [`TaskManager`]. +pub(super) struct TaskScheduler { + action: Sender, + _phantom: PhantomData, } impl TaskManager { - /// Create a client proxy and spawn a process to forward - /// proxy requests. - pub(super) fn new(ctx: ShieldedContext) -> (TaskRunner, Self) { - let (save_send, save_recv) = tokio::sync::mpsc::channel(100); + pub(super) fn new(ctx: ShieldedContext) -> (TaskScheduler, Self) { + let (action_send, action_recv) = tokio::sync::mpsc::channel(100); ( - TaskRunner { - action: save_send, - ctx: Arc::new(futures_locks::Mutex::new(ctx)), + TaskScheduler { + action: action_send, + _phantom: PhantomData, }, TaskManager { - action: save_recv, - latest_height: Default::default(), + action: action_recv, + latest_idx: Default::default(), + ctx: Arc::new(futures_locks::Mutex::new(ctx)), }, ) } - pub async fn run(&mut self) { + /// Run all actions scheduled by the scanning thread until + /// that process indicates it has finished. + pub async fn run(&mut self, native_token: &Address) -> Result<(), Error> { while let Some(action) = self.action.recv().await { match action { - Action::Complete => return, - Action::Data(data, height) => { - self.latest_height = height; - let locked = data.lock().await; + // On completion, update the height to which all keys have been + // synced and then save. + Action::Complete { with_error } => { + if !with_error { + let mut locked = self.ctx.lock().await; + // update each key to be synced to the latest scanned height. + for (_, h) in locked.vk_heights.iter_mut() { + *h = Some(self.latest_idx); + } + // updated the spent notes and balances + locked.nullify_spent_notes(native_token)?; + _ = locked.save().await; + } + } + Action::Data(scanned, idx) => { + // track the latest scanned height + self.latest_idx = idx; + // apply state changes from the scanning process + let mut locked = self.ctx.lock().await; + scanned.apply_to(&mut locked); + // possibly remove unneeded elements from the cache. + locked.unscanned.scanned(&idx); + // persist the changes _ = locked.save().await; } } } + Ok(()) } } -impl TaskRunner { - pub(super) fn complete(&self) { - self.action.blocking_send(Action::Complete).unwrap() +impl TaskScheduler { + /// Signal the [`TaskManager`] that the scanning thread has completed + pub(super) fn complete(&self, with_error: bool) { + self.action + .blocking_send(Action::Complete { with_error }) + .unwrap() } - pub(super) fn save(&self, latest_height: BlockHeight) { + /// Schedule the [`TaskManager`] to save the latest context + /// state changes. + pub(super) fn save(&self, data: ScannedData, latest_idx: IndexedTx) { self.action - .blocking_send(Action::Data(self.ctx.clone(), latest_height)) + .blocking_send(Action::Data(data, latest_idx)) .unwrap(); } - pub(super) fn update_witness_map( - &self, - indexed_tx: IndexedTx, - stx: &Transaction, - ) -> Result<(), Error> { - let mut locked = self.acquire(); - let res = locked.update_witness_map(indexed_tx, stx); - if res.is_err() { - self.complete() - } - res - } - + /// Calls the `scan_tx` method of the shielded context + /// and sends any error to the [`TaskManager`] pub(super) fn scan_tx( &self, + sync_status: ContextSyncStatus, indexed_tx: IndexedTx, - epoch: Epoch, - tx: &BTreeSet, - stx: &Transaction, + tx_note_map: &BTreeMap, + shielded: &Transaction, vk: &ViewingKey, - native_token: Address, - ) -> Result<(), Error> { - let mut locked = self.acquire(); - let res = locked.scan_tx(indexed_tx, epoch, tx, stx, vk, native_token); + ) -> Result<(ScannedData, TransactionDelta), Error> { + let res = ShieldedContext::::scan_tx( + sync_status, + indexed_tx, + tx_note_map, + shielded, + vk, + ); if res.is_err() { - self.complete(); + self.complete(true); } res } - - pub(super) fn get_vk_heights( - &self, - ) -> BTreeMap> { - let mut locked = self.acquire(); - let mut vk_heights = BTreeMap::new(); - std::mem::swap(&mut vk_heights, &mut locked.vk_heights); - vk_heights - } - - pub(super) fn set_vk_heights( - &self, - mut vk_heights: BTreeMap>, - ) { - let mut locked = self.acquire(); - std::mem::swap(&mut vk_heights, &mut locked.vk_heights); - } - - /// Kids, don't try this at home. - fn acquire(&self) -> futures_locks::MutexGuard> { - loop { - if let Ok(ctx) = self.ctx.try_lock() { - return ctx; - } - std::hint::spin_loop(); - } - } } /// An enum to indicate how to log sync progress depending on @@ -640,9 +653,9 @@ pub trait ProgressLogger { where I: Iterator; - fn scan(&self, items: I) -> impl Iterator + fn scan(&self, items: I) -> impl Iterator + Send where - I: Iterator; + I: Iterator + Send; fn left_to_fetch(&self) -> usize; } @@ -707,7 +720,7 @@ impl<'io, IO: Io> ProgressLogger for DefaultLogger<'io, IO> { } } - fn scan(&self, items: I) -> impl Iterator + fn scan(&self, items: I) -> impl Iterator + Send where I: IntoIterator, { From dbb8dbf0ce226734faab6387662b15f628997c22 Mon Sep 17 00:00:00 2001 From: satan Date: Thu, 25 Apr 2024 11:46:58 +0200 Subject: [PATCH 05/29] Added first unit test for shielded sync --- Cargo.lock | 1 + crates/apps_lib/src/client/masp.rs | 3 +- crates/sdk/Cargo.toml | 1 + crates/sdk/src/masp/mod.rs | 1 + crates/sdk/src/masp/shielded_ctx.rs | 302 +++++++++++++++------------- crates/sdk/src/masp/test_utils.rs | 113 +++++++++++ crates/sdk/src/masp/types.rs | 13 +- crates/sdk/src/masp/utils.rs | 260 +++++++++++++++++++++++- crates/sdk/src/queries/mod.rs | 2 +- 9 files changed, 542 insertions(+), 154 deletions(-) create mode 100644 crates/sdk/src/masp/test_utils.rs diff --git a/Cargo.lock b/Cargo.lock index 1b99f2fad7..0c189121f3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5307,6 +5307,7 @@ dependencies = [ "tiny-bip39", "tiny-hderive", "tokio", + "tokio-test", "toml 0.5.11", "tracing", "wasmtimer", diff --git a/crates/apps_lib/src/client/masp.rs b/crates/apps_lib/src/client/masp.rs index e9c3b23ac7..a7305b5bd1 100644 --- a/crates/apps_lib/src/client/masp.rs +++ b/crates/apps_lib/src/client/masp.rs @@ -6,7 +6,7 @@ use masp_primitives::zip32::ExtendedSpendingKey; use namada_sdk::error::Error; use namada_sdk::io::Io; use namada_sdk::masp::types::IndexedNoteEntry; -use namada_sdk::masp::utils::{ProgressLogger, ProgressType}; +use namada_sdk::masp::utils::{ProgressLogger, ProgressType, RetryStrategy}; use namada_sdk::masp::{ShieldedContext, ShieldedUtils}; use namada_sdk::queries::Client; use namada_sdk::storage::BlockHeight; @@ -40,6 +40,7 @@ pub async fn syncing< .fetch( client, &logger, + RetryStrategy::Forever, start_query_height, last_query_height, batch_size, diff --git a/crates/sdk/Cargo.toml b/crates/sdk/Cargo.toml index 66c0977b13..cbff2d3bb0 100644 --- a/crates/sdk/Cargo.toml +++ b/crates/sdk/Cargo.toml @@ -173,3 +173,4 @@ jubjub.workspace = true masp_primitives = { workspace = true, features = ["test-dependencies"] } proptest.workspace = true tempfile.workspace = true +tokio-test.workspace = true diff --git a/crates/sdk/src/masp/mod.rs b/crates/sdk/src/masp/mod.rs index bfc7b61c99..6327c81f44 100644 --- a/crates/sdk/src/masp/mod.rs +++ b/crates/sdk/src/masp/mod.rs @@ -3,6 +3,7 @@ pub mod shielded_ctx; pub mod types; pub mod utils; +mod test_utils; use std::collections::HashMap; use std::env; diff --git a/crates/sdk/src/masp/shielded_ctx.rs b/crates/sdk/src/masp/shielded_ctx.rs index 0cced04627..1856d240e5 100644 --- a/crates/sdk/src/masp/shielded_ctx.rs +++ b/crates/sdk/src/masp/shielded_ctx.rs @@ -1,8 +1,6 @@ use std::cmp::Ordering; use std::collections::{btree_map, BTreeMap, BTreeSet, HashMap, HashSet}; use std::convert::TryInto; -use std::env; -use std::str::FromStr; use borsh::{BorshDeserialize, BorshSerialize}; use borsh_ext::BorshSerializeExt; @@ -56,15 +54,8 @@ use crate::masp::types::{ MaspAmount, MaspChange, ScannedData, ShieldedTransfer, TransactionDelta, TransferDelta, TransferErr, Unscanned, WalletMap, }; -use crate::masp::utils::{ - cloned_pair, extract_masp_tx, extract_payload, fetch_channel, - get_indexed_masp_events_at_height, is_amount_required, to_viewing_key, - DefaultLogger, ExtractShieldedActionArg, FetchQueueSender, ProgressLogger, - ShieldedUtils, TaskManager, -}; +use crate::masp::utils::{cloned_pair, extract_masp_tx, extract_payload, fetch_channel, is_amount_required, to_viewing_key, DefaultLogger, ExtractShieldedActionArg, FetchQueueSender, MaspClient, ProgressLogger, RetryStrategy, ShieldedUtils, TaskManager, LedgerMaspClient}; use crate::masp::NETWORK; -#[cfg(any(test, feature = "testing"))] -use crate::masp::{testing, ENV_VAR_MASP_TEST_SEED}; use crate::queries::Client; use crate::rpc::{ query_block, query_conversion, query_denom, query_epoch_at_height, @@ -168,121 +159,55 @@ impl ShieldedContext { /// Update the merkle tree of witnesses the first time we /// scan a new MASP transaction. - pub(crate) fn update_witness_map(&mut self) -> Result<(), Error> { - // let mut note_pos = self.tree.size(); - // self.tx_note_map.insert(indexed_tx, note_pos); - // for so in shielded - // .sapling_bundle() - // .map_or(&vec![], |x| &x.shielded_outputs) - // { - // Create merkle tree leaf node from note commitment - // let node = Node::new(so.cmu.to_repr()); - // Update each merkle tree in the witness map with the latest - // addition - // for (_, witness) in self.witness_map.iter_mut() { - // witness.append(node).map_err(|()| { - // Error::Other("note commitment tree is full".to_string()) - // })?; - // } - // self.tree.append(node).map_err(|()| { - // Error::Other("note commitment tree is full".to_string()) - // })?; - // Finally, make it easier to construct merkle paths to this new - // note - // let witness = IncrementalWitness::::from_tree(&self.tree); - // self.witness_map.insert(note_pos, witness); - // note_pos += 1; - // } - // Ok(()) - todo!(); + pub(crate) async fn update_witness_map< + 'a, + C: Client, + IO: Io, + F: MaspClient<'a, C> + 'a, + >( + &mut self, + client: &'a C, + io: &IO, + last_witnessed_tx: IndexedTx, + last_query_height: BlockHeight, + ) -> Result<(), Error> { + let client = F::new(client); + client + .update_commitment_tree( + self, + io, + last_witnessed_tx, + last_query_height, + ) + .await } /// Obtain a chronologically-ordered list of all accepted shielded /// transactions from a node. - async fn fetch_shielded_transfers( - mut block_sender: FetchQueueSender, - client: &C, + async fn fetch_shielded_transfers< + 'a, + C: Client + Sync, + IO: Io, + F: MaspClient<'a, C> + 'a, + >( + block_sender: FetchQueueSender, + client: &'a C, logger: &impl ProgressLogger, last_indexed_tx: Option, last_query_height: BlockHeight, ) -> Result<(), Error> { + let client = F::new(client); // Fetch all the transactions we do not have yet let first_height_to_query = last_indexed_tx.map_or_else(|| 1, |last| last.0); - for height in logger.fetch(first_height_to_query..=last_query_height.0) - { - if block_sender.contains_height(height) { - continue; - } - // Get the valid masp transactions at the specified height - let epoch = query_epoch_at_height(client, height.into()) - .await? - .ok_or_else(|| { - Error::from(QueryError::General( - "Queried height is greater than the last committed \ - block height" - .to_string(), - )) - })?; - - let txs_results = match get_indexed_masp_events_at_height( - client, - height.into(), - None, + client + .fetch_shielded_transfer( + logger, + block_sender, + first_height_to_query, + last_query_height.0, ) - .await? - { - Some(events) => events, - None => continue, - }; - - // Query the actual block to get the txs bytes. If we only need one - // tx it might be slightly better to query the /tx endpoint to - // reduce the amount of data sent over the network, but this is a - // minimal improvement and it's even hard to tell how many times - // we'd need a single masp tx to make this worth it - let block = client - .block(height as u32) - .await - .map_err(|e| Error::from(QueryError::General(e.to_string())))? - .block - .data; - - for (idx, tx_event) in txs_results { - let tx = Tx::try_from(block[idx.0 as usize].as_ref()) - .map_err(|e| Error::Other(e.to_string()))?; - let ExtractedMaspTx { - fee_unshielding, - inner_tx, - } = extract_masp_tx::( - &tx, - ExtractShieldedActionArg::Event(&tx_event), - true, - ) - .await?; - fee_unshielding.and_then(|(changed_keys, masp_transaction)| { - block_sender.send(( - IndexedTx { - height: height.into(), - index: idx, - is_wrapper: true, - }, - (epoch, changed_keys, masp_transaction), - )); - }); - inner_tx.and_then(|(changed_keys, masp_transaction)| { - block_sender.send(( - IndexedTx { - height: height.into(), - index: idx, - is_wrapper: false, - }, - (epoch, changed_keys, masp_transaction), - )); - }) - } - } - Ok(()) + .await } /// Attempts to decrypt the note in each transaction. Successfully @@ -1681,14 +1606,17 @@ impl ShieldedContext { &vk, )?; scanned_data.merge(scanned); - scanned_data.decrypted_note_cache.insert((indexed_tx, vk), DecryptedData { - tx: masp_tx.clone(), - keys: changed_balance_keys.clone(), - delta: tx_delta, - epoch, - }); + scanned_data.decrypted_note_cache.insert( + (indexed_tx, vk), + DecryptedData { + tx: masp_tx.clone(), + keys: changed_balance_keys.clone(), + delta: tx_delta, + epoch, + }, + ); } - let mut temp_cache = DecryptedDataCache::default(); + let mut temp_cache = DecryptedDataCache::default(); std::mem::swap(&mut temp_cache, &mut self.decrypted_note_cache); scanned_data.apply_to(self); self.nullify_spent_notes(&native_token)?; @@ -1765,13 +1693,16 @@ impl ShieldedContext { #[allow(clippy::too_many_arguments)] #[cfg(not(target_family = "wasm"))] pub async fn fetch< + 'a, C: Client + Sync, IO: Io + Send + Sync, L: ProgressLogger + Sync, + M: MaspClient<'a, C> + 'a, >( &mut self, - client: &C, + client: &'a C, logger: &L, + retry: RetryStrategy, start_query_height: Option, last_query_height: Option, _batch_size: u64, @@ -1822,7 +1753,13 @@ impl ShieldedContext { let last_query_height = last_query_height.unwrap_or(last_block_height); let last_query_height = std::cmp::min(last_query_height, last_block_height); - self.update_witness_map()?; + self.update_witness_map::<_, _, M>( + client, + logger.io(), + last_witnessed_tx.unwrap_or_default(), + last_query_height, + ) + .await?; let vk_heights = self.vk_heights.clone(); // the task scheduler allows the thread performing trial decryptions to @@ -1834,15 +1771,12 @@ impl ShieldedContext { // The main loop that performs // * fetching and caching MASP txs in sequence - // * trial decryption of each note to determine if it - // is owned by a viewing key in this context and caching - // the result. - // * Nullifying spent notes and updating balances for each - // viewing key - // * Regular saving of the context to disk in case of process - // interrupts + // * trial decryption of each note to determine if it is owned by a + // viewing key in this context and caching the result. + // * Nullifying spent notes and updating balances for each viewing key + // * Regular saving of the context to disk in case of process interrupts std::thread::scope(|s| { - loop { + for _ in retry { // a stateful channel that communicates notes fetched to the // trial decryption process let (fetch_send, fetch_recv) = @@ -1861,10 +1795,14 @@ impl ShieldedContext { .filter(|(_vk, h)| **h < Some(indexed_tx)) { // if this note is in the cache, skip it. - if scanned_data.decrypted_note_cache.contains(&indexed_tx, vk) { + if scanned_data + .decrypted_note_cache + .contains(&indexed_tx, vk) + { continue; } - // attempt to decrypt the note and get the state changes + // attempt to decrypt the note and get the state + // changes let (scanned, tx_delta) = task_scheduler .scan_tx( self.sync_status, @@ -1903,7 +1841,11 @@ impl ShieldedContext { tokio::runtime::Handle::current().block_on(async { tokio::join!( task_manager.run(&native_token), - Self::fetch_shielded_transfers( + Self::fetch_shielded_transfers::< + _, + _, + M, + >( fetch_send, client, logger, @@ -1913,7 +1855,8 @@ impl ShieldedContext { ) }) }); - // if the scanning process errored, return that error here and exit. + // if the scanning process errored, return that error here and + // exit. decrypt_res?; // shut down the scanning thread. decryption_handle.join().unwrap()?; @@ -1929,9 +1872,14 @@ impl ShieldedContext { // if fetching failed for before completing, we restart // the fetch process. Otherwise, we can break the loop. if logger.left_to_fetch() == 0 { - break Ok(()); + break; } } + if logger.left_to_fetch() != 0 { + Err(Error::Other("After retrying, could not fetch all MASP txs.".to_string())) + } else { + Ok(()) + } }) } @@ -1961,8 +1909,17 @@ impl ShieldedContext { // Required for filtering out rejected transactions from Tendermint // responses let block_results = rpc::query_results(client).await?; - self.fetch(client, &DefaultLogger::new(io), None, None, 1, &[], &fvks) - .await?; + self.fetch::<_, _, _, LedgerMaspClient>( + client, + &DefaultLogger::new(io), + RetryStrategy::Forever, + None, + None, + 1, + &[], + &fvks, + ) + .await?; // Save the update state so that future fetches can be short-circuited let _ = self.save().await; @@ -2069,3 +2026,74 @@ impl ShieldedContext { Ok(transfers) } } + +#[cfg(test)] +mod shielded_ctx_tests { + use core::str::FromStr; + use std::collections::BTreeSet; + use masp_primitives::transaction::{Transaction, TransactionData}; + use masp_primitives::zip32::ExtendedFullViewingKey; + use rand::seq::index::BTreeSet; + use tempfile::tempdir; + use namada_core::masp::ExtendedViewingKey; + use namada_core::storage::{Epoch, IndexedTx}; + use crate::error::Error; + use crate::io::StdIo; + + use crate::masp::fs::FsShieldedUtils; + use crate::masp::test_utils::{test_client, TestingMaspClient}; + use crate::masp::types::IndexedNoteEntry; + use crate::masp::utils::{DefaultLogger, RetryStrategy}; + + // A viewing key derived from A_SPENDING_KEY + pub const AA_VIEWING_KEY: &str = "zvknam1qqqqqqqqqqqqqq9v0sls5r5de7njx8ehu49pqgmqr9ygelg87l5x8y4s9r0pjlvu6x74w9gjpw856zcu826qesdre628y6tjc26uhgj6d9zqur9l5u3p99d9ggc74ald6s8y3sdtka74qmheyqvdrasqpwyv2fsmxlz57lj4grm2pthzj3sflxc0jx0edrakx3vdcngrfjmru8ywkguru8mxss2uuqxdlglaz6undx5h8w7g70t2es850g48xzdkqay5qs0yw06rtxcpjdve6"; + + + /// Test that if fetching fails before finishing, + /// we re-establish the fetching process + #[tokio::test(flavor = "multi_thread", worker_threads=2)] + async fn test_retry_fetch() { + let temp_dir = tempdir().unwrap(); + let mut shielded_ctx = + FsShieldedUtils::new(temp_dir.path().to_path_buf()); + let (client, masp_tx_sender) = test_client(2.into()); + let io = StdIo::default(); + let logger = DefaultLogger::new(&io); + let vk = ExtendedFullViewingKey::from( + ExtendedViewingKey::from_str(AA_VIEWING_KEY).expect("Test failed") + ) + .fvk + .vk; + let unscanned = shielded_ctx.unscanned.clone(); + masp_tx_sender.send(None).expect("Test failed"); + + // we first test that with no retries, a fetching failure + // stops process + let result = shielded_ctx.fetch::<_, _, _, TestingMaspClient>( + &client, + &logger, + RetryStrategy::Times(1), + None, + None, + 0, + &[], + &[vk], + ).await.unwrap_err(); + match result { + Error::Other(msg) => assert_eq!(msg.as_str(), "After retrying, could not fetch all MASP txs."), + other => panic!("{:?} does not match Error::Other(_)", other), + } + + let result = shielded_ctx.fetch::<_, _, _, TestingMaspClient>( + &client, + &logger, + RetryStrategy::Times(2), + None, + None, + 0, + &[], + &[vk], + ).await.unwrap_err(); + + } +} diff --git a/crates/sdk/src/masp/test_utils.rs b/crates/sdk/src/masp/test_utils.rs new file mode 100644 index 0000000000..a6fdaa6c14 --- /dev/null +++ b/crates/sdk/src/masp/test_utils.rs @@ -0,0 +1,113 @@ +use std::ops::{Deref, DerefMut}; + +use masp_primitives::merkle_tree::CommitmentTree; +use masp_primitives::sapling::Node; +use tendermint_rpc::SimpleRequest; +use namada_core::storage::{BlockHeight, IndexedTx}; +use namada_state::LastBlock; + +use crate::error::Error; +use crate::io::Io; +use crate::masp::{ShieldedContext, ShieldedUtils}; +use crate::masp::types::IndexedNoteEntry; +use crate::masp::utils::{CommitmentTreeUpdates, FetchQueueSender, MaspClient, ProgressLogger}; +use crate::queries::{Client, EncodedResponseQuery, Rpc, RPC}; +use crate::queries::testing::TestClient; + +/// A client for testing the shielded-sync functionality +pub struct TestingClient { + /// An actual mocked client for querying + inner: TestClient, + /// Used to inject a channel that we control into + /// the fetch algorithm. The option is to mock connection + /// failures. + next_masp_txs: flume::Receiver>, +} + +impl Deref for TestingClient { + type Target = TestClient; + + fn deref(&self) -> &Self::Target { + &self.inner + } +} + +impl DerefMut for TestingClient { + + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} + +#[cfg(any(test, feature = "async-client"))] +#[cfg_attr(feature = "async-send", async_trait::async_trait)] +#[cfg_attr(not(feature = "async-send"), async_trait::async_trait(?Send))] +impl Client for TestingClient { + type Error = std::io::Error; + + async fn request(&self, path: String, data: Option>, height: Option, prove: bool) -> Result { + self.inner.request(path, data, height, prove).await + } + + async fn perform(&self, request: R) -> Result where R: SimpleRequest { + self.inner.perform(request).await + } +} +pub fn test_client(last_height: BlockHeight) -> (TestingClient, flume::Sender>) { + let (sender, recv) = flume::unbounded(); + let mut client = TestClient::new(RPC); + client.state.in_mem_mut().last_block = Some(LastBlock { + height: last_height, + hash: Default::default(), + time: Default::default(), + }); + (TestingClient { + inner: client, + next_masp_txs: recv, + }, sender) +} + +#[derive(Debug, Clone)] +pub struct TestingMaspClient { + next_masp_txs: flume::Receiver>, +} + +impl<'a> MaspClient<'a, TestingClient> for TestingMaspClient { + fn new(client: &'a TestingClient) -> Self where Self: 'a { + Self { + next_masp_txs: client.next_masp_txs.clone(), + } + } + + async fn witness_map_updates( + &self, + _: &ShieldedContext, + _: &IO, + _: IndexedTx, + _: BlockHeight + ) -> Result { + Ok(CommitmentTreeUpdates { + commitment_tree: CommitmentTree::::empty(), + witness_map: Default::default(), + note_map_delta: Default::default(), + }) + } + + async fn fetch_shielded_transfer( + &self, + logger: &impl ProgressLogger, + mut tx_sender: FetchQueueSender, + from: u64, + to: u64, + ) -> Result<(), Error> { + // N.B. this assumes one masp tx per block + for _ in logger.fetch(from..=to) { + let next_tx = self.next_masp_txs + .recv() + .expect("Test failed") + .ok_or_else(|| Error::Other("Connection to fetch MASP txs failed".to_string()))?; + tx_sender.send(next_tx); + } + Ok(()) + } +} \ No newline at end of file diff --git a/crates/sdk/src/masp/types.rs b/crates/sdk/src/masp/types.rs index 48da0a4a8f..7e406329d5 100644 --- a/crates/sdk/src/masp/types.rs +++ b/crates/sdk/src/masp/types.rs @@ -103,7 +103,7 @@ pub struct MaspTokenRewardData { } #[derive(Debug, Clone)] -struct ExtractedMaspTx { +pub(super) struct ExtractedMaspTx { fee_unshielding: Option<(BTreeSet, Transaction)>, inner_tx: Option<(BTreeSet, Transaction)>, } @@ -241,12 +241,17 @@ impl DecryptedDataCache { pub fn contains(&self, ix: &IndexedTx, vk: &ViewingKey) -> bool { self.inner .keys() - .find_map(|(i, v)| (i==ix && v==vk).then_some(())) + .find_map(|(i, v)| (i == ix && v == vk).then_some(())) .is_some() - } - pub fn drain(&mut self) -> std::collections::hash_map::Drain<'_, (IndexedTx, ViewingKey), DecryptedData>{ + pub fn drain( + &mut self, + ) -> std::collections::hash_map::Drain< + '_, + (IndexedTx, ViewingKey), + DecryptedData, + > { self.inner.drain() } } diff --git a/crates/sdk/src/masp/utils.rs b/crates/sdk/src/masp/utils.rs index 039e6da582..8f069851aa 100644 --- a/crates/sdk/src/masp/utils.rs +++ b/crates/sdk/src/masp/utils.rs @@ -1,13 +1,15 @@ use core::str::FromStr; -use std::collections::{BTreeMap, BTreeSet}; +use std::collections::{BTreeMap, BTreeSet, HashMap}; use std::env; use std::marker::PhantomData; use std::path::PathBuf; use std::sync::{Arc, Mutex}; use borsh::{BorshDeserialize, BorshSerialize}; +use masp_primitives::ff::PrimeField; +use masp_primitives::merkle_tree::{CommitmentTree, IncrementalWitness}; use masp_primitives::sapling::keys::FullViewingKey; -use masp_primitives::sapling::{Diversifier, ViewingKey}; +use masp_primitives::sapling::{Diversifier, Node, ViewingKey}; use masp_primitives::transaction::components::I128Sum; use masp_primitives::transaction::Transaction; use masp_primitives::zip32::{ExtendedFullViewingKey, ExtendedSpendingKey}; @@ -24,12 +26,10 @@ use tokio::sync::mpsc::{Receiver, Sender}; use crate::error::{Error, QueryError}; use crate::io::Io; use crate::masp::shielded_ctx::ShieldedContext; -use crate::masp::types::{ - ContextSyncStatus, IndexedNoteEntry, PVKs, ScannedData, TransactionDelta, - Unscanned, -}; +use crate::masp::types::{ContextSyncStatus, ExtractedMaspTx, IndexedNoteEntry, PVKs, ScannedData, TransactionDelta, Unscanned}; use crate::masp::{ENV_VAR_MASP_PARAMS_DIR, VERIFIYING_KEYS}; use crate::queries::Client; +use crate::rpc::query_epoch_at_height; use crate::{MaybeSend, MaybeSync}; /// Make sure the MASP params are present and load verifying keys into memory @@ -425,6 +425,213 @@ fn get_tx_result( }) } +pub(super) struct CommitmentTreeUpdates { + pub commitment_tree: CommitmentTree, + pub witness_map: HashMap>, + pub note_map_delta: BTreeMap, +} + +pub trait MaspClient<'a, C: Client> { + fn new(client: &'a C) -> Self + where + Self: 'a; + + async fn witness_map_updates( + &self, + ctx: &ShieldedContext, + io: &IO, + last_witnessed_tx: IndexedTx, + last_query_height: BlockHeight, + ) -> Result; + async fn update_commitment_tree( + &self, + ctx: &mut ShieldedContext, + io: &IO, + last_witnessed_tx: IndexedTx, + last_query_height: BlockHeight, + ) -> Result<(), Error> { + let CommitmentTreeUpdates { + commitment_tree, + witness_map, + mut note_map_delta, + } = self + .witness_map_updates(ctx, io, last_witnessed_tx, last_query_height) + .await?; + ctx.tree = commitment_tree; + ctx.witness_map = witness_map; + ctx.tx_note_map.append(&mut note_map_delta); + Ok(()) + } + async fn fetch_shielded_transfer( + &self, + logger: &impl ProgressLogger, + tx_sender: FetchQueueSender, + from: u64, + to: u64, + ) -> Result<(), Error>; +} + +/// An inefficient MASP client which simply uses a +/// client to the blockchain to query it directly. +pub(super) struct LedgerMaspClient<'a, C: Client> { + client: &'a C, +} + +impl<'a, C: Client + Sync> MaspClient<'a, C> for LedgerMaspClient<'a, C> +where + LedgerMaspClient<'a, C>: 'a, +{ + fn new(client: &'a C) -> Self + where + Self: 'a, + { + Self { client } + } + + async fn witness_map_updates( + &self, + ctx: &ShieldedContext, + io: &IO, + last_witnessed_tx: IndexedTx, + last_query_height: BlockHeight, + ) -> Result { + let (tx_sender, tx_receiver) = fetch_channel::new(Default::default()); + let logger = DefaultLogger::new(io); + let (res, updates) = tokio::join!( + self.fetch_shielded_transfer( + &logger, + tx_sender, + last_witnessed_tx.height.0, + last_query_height.0, + ), + async { + let mut updates = CommitmentTreeUpdates { + commitment_tree: ctx.tree.clone(), + witness_map: ctx.witness_map.clone(), + note_map_delta: Default::default(), + }; + for (indexed_tx, (_, _, ref shielded)) in tx_receiver { + let mut note_pos = updates.commitment_tree.size(); + updates.note_map_delta.insert(indexed_tx, note_pos); + for so in shielded + .sapling_bundle() + .map_or(&vec![], |x| &x.shielded_outputs) + { + // Create merkle tree leaf node from note commitment + let node = Node::new(so.cmu.to_repr()); + // Update each merkle tree in the witness map with the + // latest addition + for (_, witness) in updates.witness_map.iter_mut() { + witness.append(node).map_err(|()| { + Error::Other( + "note commitment tree is full".to_string(), + ) + })?; + } + updates.commitment_tree.append(node).map_err(|()| { + Error::Other( + "note commitment tree is full".to_string(), + ) + })?; + // Finally, make it easier to construct merkle paths to + // this new note + let witness = IncrementalWitness::::from_tree( + &updates.commitment_tree, + ); + updates.witness_map.insert(note_pos, witness); + note_pos += 1; + } + } + Ok(updates) + } + ); + res?; + updates + } + + async fn fetch_shielded_transfer( + &self, + logger: &impl ProgressLogger, + mut tx_sender: FetchQueueSender, + from: u64, + to: u64, + ) -> Result<(), Error> { + // Fetch all the transactions we do not have yet + for height in logger.fetch(from..=to) { + if tx_sender.contains_height(height) { + continue; + } + // Get the valid masp transactions at the specified height + let epoch = query_epoch_at_height(self.client, height.into()) + .await? + .ok_or_else(|| { + Error::from(QueryError::General( + "Queried height is greater than the last committed \ + block height" + .to_string(), + )) + })?; + let txs_results = match get_indexed_masp_events_at_height( + &self.client, + height.into(), + None, + ) + .await? + { + Some(events) => events, + None => continue, + }; + + // Query the actual block to get the txs bytes. If we only need one + // tx it might be slightly better to query the /tx endpoint to + // reduce the amount of data sent over the network, but this is a + // minimal improvement and it's even hard to tell how many times + // we'd need a single masp tx to make this worth it + let block = self.client + .block(height as u32) + .await + .map_err(|e| Error::from(QueryError::General(e.to_string())))? + .block + .data; + + for (idx, tx_event) in txs_results { + let tx = Tx::try_from(block[idx.0 as usize].as_ref()) + .map_err(|e| Error::Other(e.to_string()))?; + let ExtractedMaspTx { + fee_unshielding, + inner_tx, + } = extract_masp_tx::( + &tx, + ExtractShieldedActionArg::Event(&tx_event), + true, + ) + .await?; + fee_unshielding.and_then(|(changed_keys, masp_transaction)| { + tx_sender.send(( + IndexedTx { + height: height.into(), + index: idx, + is_wrapper: true, + }, + (epoch, changed_keys, masp_transaction), + )); + }); + inner_tx.and_then(|(changed_keys, masp_transaction)| { + tx_sender.send(( + IndexedTx { + height: height.into(), + index: idx, + is_wrapper: false, + }, + (epoch, changed_keys, masp_transaction), + )); + }) + } + } + Ok(()) + } +} + /// A channel-like struct for "sending" newly fetched blocks /// to the scanning algorithm. /// @@ -433,7 +640,7 @@ fn get_tx_result( /// 1. The process in possession of the channel is still alive /// 2. Quickly updating the latest block height scanned. #[derive(Clone)] -pub(super) struct FetchQueueSender { +pub struct FetchQueueSender { cache: Unscanned, last_fetched: flume::Sender, } @@ -557,7 +764,7 @@ impl TaskManager { TaskManager { action: action_recv, latest_idx: Default::default(), - ctx: Arc::new(futures_locks::Mutex::new(ctx)), + ctx: Arc::new(futures_locks::Mutex::new(ctx)), }, ) } @@ -572,7 +779,8 @@ impl TaskManager { Action::Complete { with_error } => { if !with_error { let mut locked = self.ctx.lock().await; - // update each key to be synced to the latest scanned height. + // update each key to be synced to the latest scanned + // height. for (_, h) in locked.vk_heights.iter_mut() { *h = Some(self.latest_idx); } @@ -580,6 +788,7 @@ impl TaskManager { locked.nullify_spent_notes(native_token)?; _ = locked.save().await; } + return Ok(()) } Action::Data(scanned, idx) => { // track the latest scanned height @@ -638,6 +847,32 @@ impl TaskScheduler { } } +/// When retrying to fetch all nodes in a +/// loop, this dictates the strategy for +/// how many attempts should be made. +pub enum RetryStrategy { + Forever, + Times(u64), +} + +impl Iterator for RetryStrategy { + type Item = (); + + fn next(&mut self) -> Option { + match self { + Self::Forever => Some(()), + Self::Times(ref mut count) => { + if *count == 0 { + None + } else { + *count -= 1; + Some(()) + } + } + } + } +} + /// An enum to indicate how to log sync progress depending on /// whether sync is currently fetch or scanning blocks. #[derive(Debug, Copy, Clone)] @@ -653,7 +888,10 @@ pub trait ProgressLogger { where I: Iterator; - fn scan(&self, items: I) -> impl Iterator + Send + fn scan( + &self, + items: I, + ) -> impl Iterator + Send where I: Iterator + Send; @@ -732,4 +970,4 @@ impl<'io, IO: Io> ProgressLogger for DefaultLogger<'io, IO> { let locked = self.progress.lock().unwrap(); locked.length - locked.index } -} +} \ No newline at end of file diff --git a/crates/sdk/src/queries/mod.rs b/crates/sdk/src/queries/mod.rs index 97806e7022..4ecef2dab3 100644 --- a/crates/sdk/src/queries/mod.rs +++ b/crates/sdk/src/queries/mod.rs @@ -95,7 +95,7 @@ pub fn require_no_data(request: &RequestQuery) -> namada_storage::Result<()> { /// Queries testing helpers #[cfg(any(test, feature = "testing"))] -mod testing { +pub(crate) mod testing { use borsh_ext::BorshSerializeExt; use namada_state::testing::TestState; use tendermint_rpc::Response; From b024060bfbb2ccebac782899c26a3e9755cbcb8c Mon Sep 17 00:00:00 2001 From: satan Date: Thu, 25 Apr 2024 13:04:20 +0200 Subject: [PATCH 06/29] Rebasing --- crates/apps_lib/src/client/masp.rs | 6 +- crates/sdk/src/masp/mod.rs | 53 +++++++------ crates/sdk/src/masp/shielded_ctx.rs | 113 +++++++++++++++------------- crates/sdk/src/masp/test_utils.rs | 63 +++++++++++----- crates/sdk/src/masp/types.rs | 44 +++++------ crates/sdk/src/masp/utils.rs | 86 ++++++++++++--------- 6 files changed, 210 insertions(+), 155 deletions(-) diff --git a/crates/apps_lib/src/client/masp.rs b/crates/apps_lib/src/client/masp.rs index a7305b5bd1..4ec5ab301b 100644 --- a/crates/apps_lib/src/client/masp.rs +++ b/crates/apps_lib/src/client/masp.rs @@ -6,7 +6,9 @@ use masp_primitives::zip32::ExtendedSpendingKey; use namada_sdk::error::Error; use namada_sdk::io::Io; use namada_sdk::masp::types::IndexedNoteEntry; -use namada_sdk::masp::utils::{ProgressLogger, ProgressType, RetryStrategy}; +use namada_sdk::masp::utils::{ + LedgerMaspClient, ProgressLogger, ProgressType, RetryStrategy, +}; use namada_sdk::masp::{ShieldedContext, ShieldedUtils}; use namada_sdk::queries::Client; use namada_sdk::storage::BlockHeight; @@ -37,7 +39,7 @@ pub async fn syncing< let logger = CliLogger::new(io); let sync = async move { shielded - .fetch( + .fetch::<_, _, _, LedgerMaspClient>( client, &logger, RetryStrategy::Forever, diff --git a/crates/sdk/src/masp/mod.rs b/crates/sdk/src/masp/mod.rs index 6327c81f44..e1d7b1e35a 100644 --- a/crates/sdk/src/masp/mod.rs +++ b/crates/sdk/src/masp/mod.rs @@ -1,11 +1,11 @@ //! MASP verification wrappers. pub mod shielded_ctx; +#[cfg(test)] +mod test_utils; pub mod types; pub mod utils; -mod test_utils; -use std::collections::HashMap; use std::env; use std::fmt::Debug; use std::ops::Deref; @@ -13,31 +13,21 @@ use std::path::PathBuf; use borsh::{BorshDeserialize, BorshSerialize}; use lazy_static::lazy_static; -use masp_primitives::asset_type::AssetType; #[cfg(feature = "mainnet")] use masp_primitives::consensus::MainNetwork; #[cfg(not(feature = "mainnet"))] use masp_primitives::consensus::TestNetwork; -use masp_primitives::convert::AllowedConversion; -use masp_primitives::ff::PrimeField; use masp_primitives::group::GroupEncoding; -use masp_primitives::memo::MemoBytes; -use masp_primitives::merkle_tree::MerklePath; -use masp_primitives::sapling::note_encryption::*; use masp_primitives::sapling::redjubjub::PublicKey; -use masp_primitives::sapling::{Diversifier, Node, Note}; use masp_primitives::transaction::components::transparent::builder::TransparentBuilder; use masp_primitives::transaction::components::{ ConvertDescription, I128Sum, OutputDescription, SpendDescription, TxOut, - U64Sum, }; -use masp_primitives::transaction::fees::fixed::FeeRule; use masp_primitives::transaction::sighash::{signature_hash, SignableInput}; use masp_primitives::transaction::txid::TxIdDigester; use masp_primitives::transaction::{ - Authorization, Authorized, Transaction, TransactionData, TransparentAddress, + Authorization, Authorized, Transaction, TransactionData, }; -use masp_primitives::zip32::{ExtendedFullViewingKey, ExtendedSpendingKey}; use masp_proofs::bellman::groth16::PreparedVerifyingKey; use masp_proofs::bls12_381::Bls12; use masp_proofs::prover::LocalTxProver; @@ -47,11 +37,14 @@ pub use namada_core::masp::{ encode_asset_type, AssetData, BalanceOwner, ExtendedViewingKey, PaymentAddress, TransferSource, TransferTarget, }; -use namada_token::MaspDigitPos; +use namada_state::StorageError; pub use shielded_ctx::ShieldedContext; -pub use utils::ShieldedUtils; +pub use types::PVKs; +pub use utils::{ + find_valid_diversifier, preload_verifying_keys, ShieldedUtils, +}; -use crate::masp::types::{PVKs, PartialAuthorized}; +use crate::masp::types::PartialAuthorized; use crate::masp::utils::{get_params_dir, load_pvks}; use crate::{MaybeSend, MaybeSync}; @@ -223,8 +216,8 @@ pub fn verify_shielded_tx( transaction: &Transaction, mut consume_verify_gas: F, ) -> Result<(), StorageError> - where - F: FnMut(u64) -> std::result::Result<(), StorageError>, +where + F: FnMut(u64) -> std::result::Result<(), StorageError>, { tracing::info!("entered verify_shielded_tx()"); @@ -261,9 +254,9 @@ pub fn verify_shielded_tx( } = load_pvks(); #[cfg(not(feature = "testing"))] - let mut ctx = SaplingVerificationContext::new(true); + let mut ctx = SaplingVerificationContext::new(true); #[cfg(feature = "testing")] - let mut ctx = testing::MockSaplingVerificationContext::new(true); + let mut ctx = testing::MockSaplingVerificationContext::new(true); for spend in &sapling_bundle.shielded_spends { consume_verify_gas(namada_gas::MASP_VERIFY_SPEND_GAS)?; if !check_spend(spend, sighash.as_ref(), &mut ctx, spend_vk) { @@ -425,15 +418,29 @@ pub mod testing { use std::sync::Mutex; use bls12_381::{G1Affine, G2Affine}; + use masp_primitives::asset_type::AssetType; use masp_primitives::consensus::testing::arb_height; use masp_primitives::constants::SPENDING_KEY_GENERATOR; - use masp_primitives::ff::Field; + use masp_primitives::convert::AllowedConversion; + use masp_primitives::ff::{Field, PrimeField}; + use masp_primitives::memo::MemoBytes; + use masp_primitives::merkle_tree::MerklePath; + use masp_primitives::sapling::note_encryption::{ + try_sapling_note_decryption, PreparedIncomingViewingKey, + }; use masp_primitives::sapling::prover::TxProver; use masp_primitives::sapling::redjubjub::Signature; - use masp_primitives::sapling::{ProofGenerationKey, Rseed}; + use masp_primitives::sapling::{ + Diversifier, Node, Note, ProofGenerationKey, Rseed, + }; use masp_primitives::transaction::builder::Builder; - use masp_primitives::transaction::components::GROTH_PROOF_SIZE; + use masp_primitives::transaction::components::{U64Sum, GROTH_PROOF_SIZE}; + use masp_primitives::transaction::fees::fixed::FeeRule; + use masp_primitives::transaction::TransparentAddress; + use masp_primitives::zip32::{ExtendedFullViewingKey, ExtendedSpendingKey}; use masp_proofs::bellman::groth16::Proof; + use namada_core::collections::HashMap; + use namada_core::token::MaspDigitPos; use proptest::prelude::*; use proptest::sample::SizeRange; use proptest::test_runner::TestRng; diff --git a/crates/sdk/src/masp/shielded_ctx.rs b/crates/sdk/src/masp/shielded_ctx.rs index 1856d240e5..a5e57b11a2 100644 --- a/crates/sdk/src/masp/shielded_ctx.rs +++ b/crates/sdk/src/masp/shielded_ctx.rs @@ -1,5 +1,5 @@ use std::cmp::Ordering; -use std::collections::{btree_map, BTreeMap, BTreeSet, HashMap, HashSet}; +use std::collections::{btree_map, BTreeMap, BTreeSet}; use std::convert::TryInto; use borsh::{BorshDeserialize, BorshSerialize}; @@ -28,6 +28,7 @@ use masp_primitives::transaction::{ }; use masp_primitives::zip32::{ExtendedFullViewingKey, ExtendedSpendingKey}; use namada_core::address::{Address, MASP}; +use namada_core::collections::{HashMap, HashSet}; use namada_core::masp::{ encode_asset_type, AssetData, BalanceOwner, ExtendedViewingKey, PaymentAddress, TransferSource, TransferTarget, @@ -54,7 +55,12 @@ use crate::masp::types::{ MaspAmount, MaspChange, ScannedData, ShieldedTransfer, TransactionDelta, TransferDelta, TransferErr, Unscanned, WalletMap, }; -use crate::masp::utils::{cloned_pair, extract_masp_tx, extract_payload, fetch_channel, is_amount_required, to_viewing_key, DefaultLogger, ExtractShieldedActionArg, FetchQueueSender, MaspClient, ProgressLogger, RetryStrategy, ShieldedUtils, TaskManager, LedgerMaspClient}; +use crate::masp::utils::{ + cloned_pair, extract_masp_tx, extract_payload, fetch_channel, + is_amount_required, to_viewing_key, DefaultLogger, + ExtractShieldedActionArg, FetchQueueSender, LedgerMaspClient, MaspClient, + ProgressLogger, RetryStrategy, ShieldedUtils, TaskManager, +}; use crate::masp::NETWORK; use crate::queries::Client; use crate::rpc::{ @@ -994,7 +1000,7 @@ impl ShieldedContext { .inner_tx .ok_or_else(|| { Error::Other("Missing shielded inner portion of pinned tx".into()) - })?;; + })?; // Accumulate the combined output note value into this Amount let mut val_acc = I128Sum::zero(); @@ -1202,18 +1208,20 @@ impl ShieldedContext { // Try to get a seed from env var, if any. let rng = StdRng::from_rng(OsRng).unwrap(); #[cfg(feature = "testing")] - let rng = if let Ok(seed) = env::var(ENV_VAR_MASP_TEST_SEED) + let rng = if let Ok(seed) = std::env::var(super::ENV_VAR_MASP_TEST_SEED) .map_err(|e| Error::Other(e.to_string())) .and_then(|seed| { - let exp_str = - format!("Env var {ENV_VAR_MASP_TEST_SEED} must be a u64."); - let parsed_seed: u64 = FromStr::from_str(&seed) + let exp_str = format!( + "Env var {} must be a u64.", + super::ENV_VAR_MASP_TEST_SEED + ); + let parsed_seed: u64 = std::str::FromStr::from_str(&seed) .map_err(|_| Error::Other(exp_str))?; Ok(parsed_seed) }) { tracing::warn!( - "UNSAFE: Using a seed from {ENV_VAR_MASP_TEST_SEED} env var \ - to build proofs." + "UNSAFE: Using a seed from {} env var to build proofs.", + super::ENV_VAR_MASP_TEST_SEED, ); StdRng::seed_from_u64(seed) } else { @@ -1524,7 +1532,7 @@ impl ShieldedContext { #[cfg(not(feature = "testing"))] let prover = context.shielded().await.utils.local_tx_prover(); #[cfg(feature = "testing")] - let prover = testing::MockTxProver(std::sync::Mutex::new(OsRng)); + let prover = super::testing::MockTxProver(std::sync::Mutex::new(OsRng)); let (masp_tx, metadata) = builder.build(&prover, &FeeRule::non_standard(U64Sum::zero()))?; @@ -1841,11 +1849,7 @@ impl ShieldedContext { tokio::runtime::Handle::current().block_on(async { tokio::join!( task_manager.run(&native_token), - Self::fetch_shielded_transfers::< - _, - _, - M, - >( + Self::fetch_shielded_transfers::<_, _, M>( fetch_send, client, logger, @@ -1876,7 +1880,9 @@ impl ShieldedContext { } } if logger.left_to_fetch() != 0 { - Err(Error::Other("After retrying, could not fetch all MASP txs.".to_string())) + Err(Error::Other( + "After retrying, could not fetch all MASP txs.".to_string(), + )) } else { Ok(()) } @@ -2030,70 +2036,73 @@ impl ShieldedContext { #[cfg(test)] mod shielded_ctx_tests { use core::str::FromStr; - use std::collections::BTreeSet; - use masp_primitives::transaction::{Transaction, TransactionData}; + use masp_primitives::zip32::ExtendedFullViewingKey; - use rand::seq::index::BTreeSet; - use tempfile::tempdir; use namada_core::masp::ExtendedViewingKey; - use namada_core::storage::{Epoch, IndexedTx}; + use tempfile::tempdir; + use crate::error::Error; use crate::io::StdIo; - use crate::masp::fs::FsShieldedUtils; use crate::masp::test_utils::{test_client, TestingMaspClient}; - use crate::masp::types::IndexedNoteEntry; use crate::masp::utils::{DefaultLogger, RetryStrategy}; // A viewing key derived from A_SPENDING_KEY pub const AA_VIEWING_KEY: &str = "zvknam1qqqqqqqqqqqqqq9v0sls5r5de7njx8ehu49pqgmqr9ygelg87l5x8y4s9r0pjlvu6x74w9gjpw856zcu826qesdre628y6tjc26uhgj6d9zqur9l5u3p99d9ggc74ald6s8y3sdtka74qmheyqvdrasqpwyv2fsmxlz57lj4grm2pthzj3sflxc0jx0edrakx3vdcngrfjmru8ywkguru8mxss2uuqxdlglaz6undx5h8w7g70t2es850g48xzdkqay5qs0yw06rtxcpjdve6"; - /// Test that if fetching fails before finishing, /// we re-establish the fetching process - #[tokio::test(flavor = "multi_thread", worker_threads=2)] + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn test_retry_fetch() { let temp_dir = tempdir().unwrap(); let mut shielded_ctx = - FsShieldedUtils::new(temp_dir.path().to_path_buf()); + FsShieldedUtils::new(temp_dir.path().to_path_buf()); let (client, masp_tx_sender) = test_client(2.into()); - let io = StdIo::default(); + let io = StdIo; let logger = DefaultLogger::new(&io); - let vk = ExtendedFullViewingKey::from( - ExtendedViewingKey::from_str(AA_VIEWING_KEY).expect("Test failed") + let vk = ExtendedFullViewingKey::from( + ExtendedViewingKey::from_str(AA_VIEWING_KEY).expect("Test failed"), ) .fvk .vk; - let unscanned = shielded_ctx.unscanned.clone(); + let _unscanned = shielded_ctx.unscanned.clone(); masp_tx_sender.send(None).expect("Test failed"); // we first test that with no retries, a fetching failure // stops process - let result = shielded_ctx.fetch::<_, _, _, TestingMaspClient>( - &client, - &logger, - RetryStrategy::Times(1), - None, - None, - 0, - &[], - &[vk], - ).await.unwrap_err(); + let result = shielded_ctx + .fetch::<_, _, _, TestingMaspClient>( + &client, + &logger, + RetryStrategy::Times(1), + None, + None, + 0, + &[], + &[vk], + ) + .await + .unwrap_err(); match result { - Error::Other(msg) => assert_eq!(msg.as_str(), "After retrying, could not fetch all MASP txs."), + Error::Other(msg) => assert_eq!( + msg.as_str(), + "After retrying, could not fetch all MASP txs." + ), other => panic!("{:?} does not match Error::Other(_)", other), } - let result = shielded_ctx.fetch::<_, _, _, TestingMaspClient>( - &client, - &logger, - RetryStrategy::Times(2), - None, - None, - 0, - &[], - &[vk], - ).await.unwrap_err(); - + let _result = shielded_ctx + .fetch::<_, _, _, TestingMaspClient>( + &client, + &logger, + RetryStrategy::Times(2), + None, + None, + 0, + &[], + &[vk], + ) + .await + .unwrap_err(); } } diff --git a/crates/sdk/src/masp/test_utils.rs b/crates/sdk/src/masp/test_utils.rs index a6fdaa6c14..ee23b05d44 100644 --- a/crates/sdk/src/masp/test_utils.rs +++ b/crates/sdk/src/masp/test_utils.rs @@ -2,17 +2,19 @@ use std::ops::{Deref, DerefMut}; use masp_primitives::merkle_tree::CommitmentTree; use masp_primitives::sapling::Node; -use tendermint_rpc::SimpleRequest; use namada_core::storage::{BlockHeight, IndexedTx}; use namada_state::LastBlock; +use tendermint_rpc::SimpleRequest; use crate::error::Error; use crate::io::Io; -use crate::masp::{ShieldedContext, ShieldedUtils}; use crate::masp::types::IndexedNoteEntry; -use crate::masp::utils::{CommitmentTreeUpdates, FetchQueueSender, MaspClient, ProgressLogger}; -use crate::queries::{Client, EncodedResponseQuery, Rpc, RPC}; +use crate::masp::utils::{ + CommitmentTreeUpdates, FetchQueueSender, MaspClient, ProgressLogger, +}; +use crate::masp::{ShieldedContext, ShieldedUtils}; use crate::queries::testing::TestClient; +use crate::queries::{Client, EncodedResponseQuery, Rpc, RPC}; /// A client for testing the shielded-sync functionality pub struct TestingClient { @@ -33,7 +35,6 @@ impl Deref for TestingClient { } impl DerefMut for TestingClient { - fn deref_mut(&mut self) -> &mut Self::Target { &mut self.inner } @@ -45,15 +46,29 @@ impl DerefMut for TestingClient { impl Client for TestingClient { type Error = std::io::Error; - async fn request(&self, path: String, data: Option>, height: Option, prove: bool) -> Result { + async fn request( + &self, + path: String, + data: Option>, + height: Option, + prove: bool, + ) -> Result { self.inner.request(path, data, height, prove).await } - async fn perform(&self, request: R) -> Result where R: SimpleRequest { + async fn perform( + &self, + request: R, + ) -> Result + where + R: SimpleRequest, + { self.inner.perform(request).await } } -pub fn test_client(last_height: BlockHeight) -> (TestingClient, flume::Sender>) { +pub fn test_client( + last_height: BlockHeight, +) -> (TestingClient, flume::Sender>) { let (sender, recv) = flume::unbounded(); let mut client = TestClient::new(RPC); client.state.in_mem_mut().last_block = Some(LastBlock { @@ -61,10 +76,13 @@ pub fn test_client(last_height: BlockHeight) -> (TestingClient, flume::Sender MaspClient<'a, TestingClient> for TestingMaspClient { - fn new(client: &'a TestingClient) -> Self where Self: 'a { + fn new(client: &'a TestingClient) -> Self + where + Self: 'a, + { Self { next_masp_txs: client.next_masp_txs.clone(), } @@ -84,7 +105,7 @@ impl<'a> MaspClient<'a, TestingClient> for TestingMaspClient { _: &ShieldedContext, _: &IO, _: IndexedTx, - _: BlockHeight + _: BlockHeight, ) -> Result { Ok(CommitmentTreeUpdates { commitment_tree: CommitmentTree::::empty(), @@ -102,12 +123,16 @@ impl<'a> MaspClient<'a, TestingClient> for TestingMaspClient { ) -> Result<(), Error> { // N.B. this assumes one masp tx per block for _ in logger.fetch(from..=to) { - let next_tx = self.next_masp_txs - .recv() - .expect("Test failed") - .ok_or_else(|| Error::Other("Connection to fetch MASP txs failed".to_string()))?; + let next_tx = + self.next_masp_txs.recv().expect("Test failed").ok_or_else( + || { + Error::Other( + "Connection to fetch MASP txs failed".to_string(), + ) + }, + )?; tx_sender.send(next_tx); } Ok(()) } -} \ No newline at end of file +} diff --git a/crates/sdk/src/masp/types.rs b/crates/sdk/src/masp/types.rs index 7e406329d5..ca15c57d8f 100644 --- a/crates/sdk/src/masp/types.rs +++ b/crates/sdk/src/masp/types.rs @@ -1,4 +1,4 @@ -use std::collections::{BTreeMap, BTreeSet, HashMap}; +use std::collections::{BTreeMap, BTreeSet}; use std::io::{Read, Write}; use std::sync::{Arc, Mutex}; @@ -21,6 +21,7 @@ use masp_proofs::bellman::groth16::PreparedVerifyingKey; use masp_proofs::bls12_381::Bls12; use namada_core::address::Address; use namada_core::borsh::{BorshDeserialize, BorshSerialize}; +use namada_core::collections::HashMap; use namada_core::dec::Dec; use namada_core::storage::{BlockHeight, Epoch, IndexedTx}; use namada_core::uint::Uint; @@ -104,8 +105,10 @@ pub struct MaspTokenRewardData { #[derive(Debug, Clone)] pub(super) struct ExtractedMaspTx { - fee_unshielding: Option<(BTreeSet, Transaction)>, - inner_tx: Option<(BTreeSet, Transaction)>, + pub(crate) fee_unshielding: + Option<(BTreeSet, Transaction)>, + pub(crate) inner_tx: + Option<(BTreeSet, Transaction)>, } /// MASP verifying keys @@ -154,50 +157,50 @@ impl ScannedData { mut self, ctx: &mut ShieldedContext, ) { - for (k, v) in self.note_map.drain() { + for (k, v) in self.note_map.drain(..) { ctx.note_map.insert(k, v); } - for (k, v) in self.nf_map.drain() { + for (k, v) in self.nf_map.drain(..) { ctx.nf_map.insert(k, v); } - for (k, v) in self.pos_map.drain() { + for (k, v) in self.pos_map.drain(..) { let map = ctx.pos_map.entry(k).or_default(); for ix in v { map.insert(ix); } } - for (k, v) in self.div_map.drain() { + for (k, v) in self.div_map.drain(..) { ctx.div_map.insert(k, v); } - for (k, v) in self.vk_map.drain() { + for (k, v) in self.vk_map.drain(..) { ctx.vk_map.insert(k, v); } - for (k, v) in self.memo_map.drain() { + for (k, v) in self.memo_map.drain(..) { ctx.memo_map.insert(k, v); } ctx.decrypted_note_cache.merge(self.decrypted_note_cache); } pub(super) fn merge(&mut self, mut other: Self) { - for (k, v) in other.note_map.drain() { + for (k, v) in other.note_map.drain(..) { self.note_map.insert(k, v); } - for (k, v) in other.nf_map.drain() { + for (k, v) in other.nf_map.drain(..) { self.nf_map.insert(k, v); } - for (k, v) in other.pos_map.drain() { + for (k, v) in other.pos_map.drain(..) { let map = self.pos_map.entry(k).or_default(); for ix in v { map.insert(ix); } } - for (k, v) in other.div_map.drain() { + for (k, v) in other.div_map.drain(..) { self.div_map.insert(k, v); } - for (k, v) in other.vk_map.drain() { + for (k, v) in other.vk_map.drain(..) { self.vk_map.insert(k, v); } - for (k, v) in other.memo_map.drain() { + for (k, v) in other.memo_map.drain(..) { self.memo_map.insert(k, v); } for (k, v) in other.decrypted_note_cache.inner { @@ -233,7 +236,7 @@ impl DecryptedDataCache { } pub fn merge(&mut self, mut other: Self) { - for (k, v) in other.inner.drain() { + for (k, v) in other.inner.drain(..) { self.insert(k, v); } } @@ -247,12 +250,9 @@ impl DecryptedDataCache { pub fn drain( &mut self, - ) -> std::collections::hash_map::Drain< - '_, - (IndexedTx, ViewingKey), - DecryptedData, - > { - self.inner.drain() + ) -> impl Iterator + '_ + { + self.inner.drain(..) } } diff --git a/crates/sdk/src/masp/utils.rs b/crates/sdk/src/masp/utils.rs index 8f069851aa..cd88ee4d13 100644 --- a/crates/sdk/src/masp/utils.rs +++ b/crates/sdk/src/masp/utils.rs @@ -1,5 +1,5 @@ use core::str::FromStr; -use std::collections::{BTreeMap, BTreeSet, HashMap}; +use std::collections::{BTreeMap, BTreeSet}; use std::env; use std::marker::PhantomData; use std::path::PathBuf; @@ -15,6 +15,7 @@ use masp_primitives::transaction::Transaction; use masp_primitives::zip32::{ExtendedFullViewingKey, ExtendedSpendingKey}; use masp_proofs::prover::LocalTxProver; use namada_core::address::Address; +use namada_core::collections::HashMap; use namada_core::storage::{BlockHeight, IndexedTx, TxIndex}; use namada_core::token::Transfer; use namada_ibc::IbcMessage; @@ -26,7 +27,10 @@ use tokio::sync::mpsc::{Receiver, Sender}; use crate::error::{Error, QueryError}; use crate::io::Io; use crate::masp::shielded_ctx::ShieldedContext; -use crate::masp::types::{ContextSyncStatus, ExtractedMaspTx, IndexedNoteEntry, PVKs, ScannedData, TransactionDelta, Unscanned}; +use crate::masp::types::{ + ContextSyncStatus, ExtractedMaspTx, IndexedNoteEntry, PVKs, ScannedData, + TransactionDelta, Unscanned, +}; use crate::masp::{ENV_VAR_MASP_PARAMS_DIR, VERIFIYING_KEYS}; use crate::queries::Client; use crate::rpc::query_epoch_at_height; @@ -256,27 +260,27 @@ pub(super) async fn extract_masp_tx<'args, C: Client + Sync>( .ok() } } - .map(|(changed_keys, transfer)| { - if let Some(hash) = transfer.shielded { - let masp_tx = tx - .get_section(&hash) - .ok_or_else(|| { - Error::Other( - "Missing masp section in transaction".to_string(), - ) - })? - .masp_tx() - .ok_or_else(|| { - Error::Other("Missing masp transaction".to_string()) - })?; + .map(|(changed_keys, transfer)| { + if let Some(hash) = transfer.shielded { + let masp_tx = tx + .get_section(&hash) + .ok_or_else(|| { + Error::Other( + "Missing masp section in transaction".to_string(), + ) + })? + .masp_tx() + .ok_or_else(|| { + Error::Other("Missing masp transaction".to_string()) + })?; - Ok::<_, Error>(Some((changed_keys, masp_tx))) - } else { - Ok(None) - } - }) - .transpose()? - .flatten(); + Ok::<_, Error>(Some((changed_keys, masp_tx))) + } else { + Ok(None) + } + }) + .transpose()? + .flatten(); Ok(ExtractedMaspTx { fee_unshielding: maybe_fee_unshield, @@ -287,10 +291,10 @@ pub(super) async fn extract_masp_tx<'args, C: Client + Sync>( // Extract the changed keys and Transaction hash from a MASP over ibc message pub(super) async fn extract_payload_from_shielded_action< 'args, - C: Client + Sync + C: Client + Sync, >( tx_data: &[u8], - mut args: ExtractShieldedActionArg<'args, C>, + args: ExtractShieldedActionArg<'args, C>, ) -> Result<(BTreeSet, Transfer), Error> { let message = namada_ibc::decode_message(tx_data) .map_err(|e| Error::Other(e.to_string()))?; @@ -425,17 +429,19 @@ fn get_tx_result( }) } -pub(super) struct CommitmentTreeUpdates { +pub struct CommitmentTreeUpdates { pub commitment_tree: CommitmentTree, pub witness_map: HashMap>, pub note_map_delta: BTreeMap, } +/// TODO: Used the sealed pattern? pub trait MaspClient<'a, C: Client> { fn new(client: &'a C) -> Self where Self: 'a; + #[allow(async_fn_in_trait)] async fn witness_map_updates( &self, ctx: &ShieldedContext, @@ -443,6 +449,8 @@ pub trait MaspClient<'a, C: Client> { last_witnessed_tx: IndexedTx, last_query_height: BlockHeight, ) -> Result; + + #[allow(async_fn_in_trait)] async fn update_commitment_tree( &self, ctx: &mut ShieldedContext, @@ -462,6 +470,8 @@ pub trait MaspClient<'a, C: Client> { ctx.tx_note_map.append(&mut note_map_delta); Ok(()) } + + #[allow(async_fn_in_trait)] async fn fetch_shielded_transfer( &self, logger: &impl ProgressLogger, @@ -473,7 +483,7 @@ pub trait MaspClient<'a, C: Client> { /// An inefficient MASP client which simply uses a /// client to the blockchain to query it directly. -pub(super) struct LedgerMaspClient<'a, C: Client> { +pub struct LedgerMaspClient<'a, C: Client> { client: &'a C, } @@ -571,12 +581,12 @@ where .to_string(), )) })?; - let txs_results = match get_indexed_masp_events_at_height( - &self.client, + let txs_results = match get_indexed_masp_events_at_height::( + self.client, height.into(), None, ) - .await? + .await? { Some(events) => events, None => continue, @@ -587,7 +597,8 @@ where // reduce the amount of data sent over the network, but this is a // minimal improvement and it's even hard to tell how many times // we'd need a single masp tx to make this worth it - let block = self.client + let block = self + .client .block(height as u32) .await .map_err(|e| Error::from(QueryError::General(e.to_string())))? @@ -605,8 +616,9 @@ where ExtractShieldedActionArg::Event(&tx_event), true, ) - .await?; - fee_unshielding.and_then(|(changed_keys, masp_transaction)| { + .await?; + if let Some((changed_keys, masp_transaction)) = fee_unshielding + { tx_sender.send(( IndexedTx { height: height.into(), @@ -615,8 +627,8 @@ where }, (epoch, changed_keys, masp_transaction), )); - }); - inner_tx.and_then(|(changed_keys, masp_transaction)| { + } + if let Some((changed_keys, masp_transaction)) = inner_tx { tx_sender.send(( IndexedTx { height: height.into(), @@ -625,7 +637,7 @@ where }, (epoch, changed_keys, masp_transaction), )); - }) + } } } Ok(()) @@ -788,7 +800,7 @@ impl TaskManager { locked.nullify_spent_notes(native_token)?; _ = locked.save().await; } - return Ok(()) + return Ok(()); } Action::Data(scanned, idx) => { // track the latest scanned height @@ -970,4 +982,4 @@ impl<'io, IO: Io> ProgressLogger for DefaultLogger<'io, IO> { let locked = self.progress.lock().unwrap(); locked.length - locked.index } -} \ No newline at end of file +} From c2c07bec9c35b74cf1a77274972adcbe49a3c4b6 Mon Sep 17 00:00:00 2001 From: satan Date: Thu, 25 Apr 2024 19:34:24 +0200 Subject: [PATCH 07/29] Fixed unit test for retrying fetch and fixed the bugs it caught --- crates/apps_lib/src/client/masp.rs | 33 ++++- crates/sdk/src/masp/shielded_ctx.rs | 188 ++++++++++++++++++++++++++-- crates/sdk/src/masp/test_utils.rs | 61 ++++++--- crates/sdk/src/masp/types.rs | 2 +- crates/sdk/src/masp/utils.rs | 77 +++++++++--- 5 files changed, 313 insertions(+), 48 deletions(-) diff --git a/crates/apps_lib/src/client/masp.rs b/crates/apps_lib/src/client/masp.rs index 4ec5ab301b..75489c4126 100644 --- a/crates/apps_lib/src/client/masp.rs +++ b/crates/apps_lib/src/client/masp.rs @@ -7,7 +7,7 @@ use namada_sdk::error::Error; use namada_sdk::io::Io; use namada_sdk::masp::types::IndexedNoteEntry; use namada_sdk::masp::utils::{ - LedgerMaspClient, ProgressLogger, ProgressType, RetryStrategy, + LedgerMaspClient, PeekableIter, ProgressLogger, ProgressType, RetryStrategy, }; use namada_sdk::masp::{ShieldedContext, ShieldedUtils}; use namada_sdk::queries::Client; @@ -192,6 +192,7 @@ where items: I, drawer: Arc>>, r#type: ProgressType, + peeked: Option, } impl<'io, T, I, IO> CliLogging<'io, T, I, IO> @@ -221,6 +222,7 @@ where items, drawer, r#type, + peeked: None, } } @@ -243,22 +245,41 @@ where } } -impl<'io, T, I, IO> Iterator for CliLogging<'io, T, I, IO> +impl<'io, T, I, IO> PeekableIter for CliLogging<'io, T, I, IO> where T: Debug, I: Iterator, IO: Io, { - type Item = T; + fn peek(&mut self) -> Option<&T> { + if self.peeked.is_none() { + self.peeked = self.items.next(); + } + self.peeked.as_ref() + } - fn next(&mut self) -> Option { - let next_item = self.items.next()?; + fn next(&mut self) -> Option { + self.peek(); + let next_item = self.peeked.take()?; self.advance_index(); self.draw(); Some(next_item) } } +impl<'io, T, I, IO> Iterator for CliLogging<'io, T, I, IO> +where + T: Debug, + I: Iterator, + IO: Io, +{ + type Item = T; + + fn next(&mut self) -> Option { + >::next(self) + } +} + /// A progress logger for the CLI #[derive(Clone)] pub struct CliLogger<'io, IO: Io> { @@ -286,7 +307,7 @@ impl<'io, IO: Io + Send + Sync> ProgressLogger for CliLogger<'io, IO> { io } - fn fetch(&self, items: I) -> impl Iterator + fn fetch(&self, items: I) -> impl PeekableIter where I: Iterator, { diff --git a/crates/sdk/src/masp/shielded_ctx.rs b/crates/sdk/src/masp/shielded_ctx.rs index a5e57b11a2..4920e57d54 100644 --- a/crates/sdk/src/masp/shielded_ctx.rs +++ b/crates/sdk/src/masp/shielded_ctx.rs @@ -237,7 +237,14 @@ impl ShieldedContext { let mut transaction_delta = TransactionDelta::new(); let mut scanned_data = ScannedData::default(); if let ContextSyncStatus::Confirmed = sync_status { - let mut note_pos = tx_note_map[&indexed_tx]; + let mut note_pos = + *tx_note_map.get(&indexed_tx).ok_or_else(|| { + Error::Other(format!( + "The scanning algorithm could not find the input {:?} \ + in the shielded context.", + indexed_tx + )) + })?; // Listen for notes sent to our viewing keys, only if we are syncing // (i.e. in a confirmed status) for so in shielded @@ -371,7 +378,7 @@ impl ShieldedContext { .serialize_to_vec(), ), ) - .into(), + .into(), ); // Vins contain the same address, so we // can @@ -1180,10 +1187,8 @@ impl ShieldedContext { ) -> Result, TransferErr> { // No shielded components are needed when neither source nor destination // are shielded - use rand::rngs::StdRng; use rand_core::SeedableRng; - let spending_key = source.spending_key(); let payment_address = target.payment_address(); // No shielded components are needed when neither source nor @@ -1794,6 +1799,8 @@ impl ShieldedContext { // the state changes to be applied to the shielded context // back on the main thread let decryption_handle = s.spawn(|| { + // N.B. DON'T GO PANICKING IN HERE. DON'T DO IT. SERIOUSLY. + // YOU COULD ACCIDENTALLY FREEZE EVERYTHING let txs = logger.scan(fetch_recv); txs.par_bridge().try_for_each( |(indexed_tx, (epoch, tx, stx))| { @@ -1859,11 +1866,12 @@ impl ShieldedContext { ) }) }); + // shut down the scanning thread. + decryption_handle.join().unwrap()?; // if the scanning process errored, return that error here and // exit. decrypt_res?; - // shut down the scanning thread. - decryption_handle.join().unwrap()?; + // if fetching errored, log it. But this is recoverable. if let Err(e) = fetch_res { display_line!( @@ -2038,9 +2046,12 @@ mod shielded_ctx_tests { use core::str::FromStr; use masp_primitives::zip32::ExtendedFullViewingKey; + use namada_core::address::InternalAddress; use namada_core::masp::ExtendedViewingKey; + use namada_core::storage::Key; use tempfile::tempdir; + use super::*; use crate::error::Error; use crate::io::StdIo; use crate::masp::fs::FsShieldedUtils; @@ -2050,6 +2061,110 @@ mod shielded_ctx_tests { // A viewing key derived from A_SPENDING_KEY pub const AA_VIEWING_KEY: &str = "zvknam1qqqqqqqqqqqqqq9v0sls5r5de7njx8ehu49pqgmqr9ygelg87l5x8y4s9r0pjlvu6x74w9gjpw856zcu826qesdre628y6tjc26uhgj6d9zqur9l5u3p99d9ggc74ald6s8y3sdtka74qmheyqvdrasqpwyv2fsmxlz57lj4grm2pthzj3sflxc0jx0edrakx3vdcngrfjmru8ywkguru8mxss2uuqxdlglaz6undx5h8w7g70t2es850g48xzdkqay5qs0yw06rtxcpjdve6"; + /// A serialized transaction that will work for testing. + /// Would love to do this in a less opaque fashion, but + /// making these things is a misery not worth my time. + /// + /// This a tx sending 1 BTC from Albert to Albert's PA + fn arbitrary_masp_tx() -> (Transaction, BTreeSet) { + const ALBERT: &str = "tnam1qxfj3sf6a0meahdu9t6znp05g8zx4dkjtgyn9gfu"; + const BTC: &str = "tnam1qy88jaykzw8tay6svmu6kkxxj5xd53w6qvqkw20u"; + let albert = Address::from_str(ALBERT).unwrap(); + let btc = Address::from_str(BTC).unwrap(); + let mut changed_keys = BTreeSet::default(); + changed_keys.insert(balance_key(&btc, &albert)); + changed_keys.insert(balance_key( + &btc, + &Address::Internal(InternalAddress::Masp), + )); + + let tx = Transaction::try_from_slice(&[ + 2, 0, 0, 0, 10, 39, 167, 38, 166, 117, 255, 233, 0, 0, 0, 0, 255, + 255, 255, 255, 1, 162, 120, 217, 193, 173, 117, 92, 126, 107, 199, + 182, 72, 95, 60, 122, 52, 9, 134, 72, 4, 167, 41, 187, 171, 17, + 124, 114, 84, 191, 75, 37, 2, 0, 225, 245, 5, 0, 0, 0, 0, 93, 213, + 181, 21, 38, 32, 230, 52, 155, 4, 203, 26, 70, 63, 59, 179, 142, 7, + 72, 76, 0, 0, 0, 1, 132, 100, 41, 23, 128, 97, 116, 40, 195, 40, + 46, 55, 79, 106, 234, 32, 4, 216, 106, 88, 173, 65, 140, 99, 239, + 71, 103, 201, 111, 149, 166, 13, 73, 224, 253, 98, 27, 199, 11, + 142, 56, 214, 4, 96, 35, 72, 83, 86, 194, 107, 163, 194, 238, 37, + 19, 171, 8, 129, 53, 246, 64, 220, 155, 47, 177, 165, 109, 232, 84, + 247, 128, 184, 40, 26, 113, 196, 190, 181, 57, 213, 45, 144, 46, + 12, 145, 128, 169, 116, 65, 51, 208, 239, 50, 217, 224, 98, 179, + 53, 18, 130, 183, 114, 225, 21, 34, 175, 144, 125, 239, 240, 82, + 100, 174, 1, 192, 32, 187, 208, 205, 31, 108, 59, 87, 201, 148, + 214, 244, 255, 8, 150, 100, 225, 11, 245, 221, 170, 85, 241, 110, + 50, 90, 151, 210, 169, 41, 3, 23, 160, 196, 117, 211, 217, 121, 9, + 42, 236, 19, 149, 94, 62, 163, 222, 172, 128, 197, 56, 100, 233, + 227, 239, 60, 182, 191, 55, 148, 17, 0, 168, 198, 84, 87, 191, 89, + 229, 9, 129, 165, 98, 200, 127, 225, 192, 58, 0, 92, 104, 97, 26, + 125, 169, 209, 40, 170, 29, 93, 16, 114, 174, 23, 233, 218, 112, + 26, 175, 196, 198, 197, 159, 167, 157, 16, 232, 247, 193, 44, 82, + 143, 238, 179, 77, 87, 153, 3, 33, 207, 215, 142, 104, 179, 17, + 252, 148, 215, 150, 76, 56, 169, 13, 240, 4, 195, 221, 45, 250, 24, + 51, 243, 174, 176, 47, 117, 38, 1, 124, 193, 191, 55, 11, 164, 97, + 83, 188, 92, 202, 229, 106, 236, 165, 85, 236, 95, 255, 28, 71, 18, + 173, 202, 47, 63, 226, 129, 203, 154, 54, 155, 177, 161, 106, 210, + 220, 193, 142, 44, 105, 46, 164, 83, 136, 63, 24, 172, 157, 117, 9, + 202, 99, 223, 144, 36, 26, 154, 84, 175, 119, 12, 102, 71, 33, 14, + 131, 250, 86, 215, 153, 18, 94, 213, 61, 196, 67, 132, 204, 89, + 235, 241, 188, 147, 236, 92, 46, 83, 169, 236, 12, 34, 33, 65, 243, + 18, 23, 29, 41, 252, 207, 17, 196, 55, 56, 141, 158, 116, 227, 195, + 159, 233, 72, 26, 69, 72, 213, 50, 101, 161, 127, 213, 35, 210, + 223, 201, 219, 198, 192, 125, 129, 222, 178, 241, 116, 59, 255, 72, + 163, 46, 21, 222, 74, 202, 117, 217, 22, 188, 203, 2, 150, 38, 78, + 78, 250, 45, 36, 225, 240, 227, 115, 33, 114, 189, 25, 9, 219, 239, + 57, 103, 19, 109, 11, 5, 156, 43, 35, 53, 219, 250, 215, 185, 173, + 11, 101, 221, 29, 130, 74, 110, 225, 183, 77, 13, 52, 90, 183, 93, + 212, 175, 132, 21, 229, 109, 188, 124, 103, 3, 39, 174, 140, 115, + 67, 49, 100, 231, 129, 32, 24, 201, 196, 247, 33, 155, 20, 139, 34, + 3, 183, 12, 164, 6, 10, 219, 207, 151, 160, 4, 201, 160, 12, 156, + 82, 142, 226, 19, 134, 144, 53, 220, 140, 61, 74, 151, 129, 102, + 214, 73, 107, 147, 4, 98, 68, 79, 225, 103, 242, 187, 170, 102, + 225, 114, 4, 87, 96, 7, 212, 150, 127, 211, 158, 54, 86, 15, 191, + 21, 116, 202, 195, 60, 65, 134, 22, 2, 44, 133, 64, 181, 121, 66, + 218, 227, 72, 148, 63, 108, 227, 33, 66, 239, 77, 127, 139, 31, 16, + 150, 119, 198, 119, 229, 88, 188, 113, 80, 222, 86, 122, 181, 142, + 186, 130, 125, 236, 166, 95, 134, 243, 128, 65, 169, 33, 65, 73, + 182, 183, 156, 248, 39, 46, 199, 181, 85, 96, 126, 155, 189, 10, + 211, 145, 230, 94, 69, 232, 74, 87, 211, 46, 216, 30, 24, 38, 104, + 192, 165, 28, 73, 36, 227, 194, 41, 168, 5, 181, 176, 112, 67, 92, + 158, 212, 129, 207, 182, 223, 59, 185, 84, 210, 147, 32, 29, 61, + 56, 185, 21, 156, 114, 34, 115, 29, 25, 89, 152, 56, 55, 238, 43, + 0, 114, 89, 79, 95, 104, 143, 180, 51, 53, 108, 223, 236, 59, 47, + 188, 174, 196, 101, 180, 207, 162, 198, 104, 52, 67, 132, 178, 9, + 40, 10, 88, 206, 25, 132, 60, 136, 13, 213, 223, 81, 196, 131, 118, + 15, 53, 125, 165, 177, 170, 170, 17, 94, 53, 151, 51, 16, 170, 23, + 118, 255, 26, 46, 47, 37, 73, 165, 26, 43, 10, 221, 4, 132, 15, 78, + 214, 161, 3, 220, 10, 87, 139, 85, 61, 39, 131, 242, 216, 235, 52, + 93, 46, 180, 196, 151, 54, 207, 80, 223, 90, 252, 77, 10, 122, 175, + 229, 7, 144, 41, 1, 162, 120, 217, 193, 173, 117, 92, 126, 107, + 199, 182, 72, 95, 60, 122, 52, 9, 134, 72, 4, 167, 41, 187, 171, + 17, 124, 114, 84, 191, 75, 37, 2, 0, 31, 10, 250, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 151, 241, 211, 167, + 49, 151, 215, 148, 38, 149, 99, 140, 79, 169, 172, 15, 195, 104, + 140, 79, 151, 116, 185, 5, 161, 78, 58, 63, 23, 27, 172, 88, 108, + 85, 232, 63, 249, 122, 26, 239, 251, 58, 240, 10, 219, 34, 198, + 187, 147, 224, 43, 96, 82, 113, 159, 96, 125, 172, 211, 160, 136, + 39, 79, 101, 89, 107, 208, 208, 153, 32, 182, 26, 181, 218, 97, + 187, 220, 127, 80, 73, 51, 76, 241, 18, 19, 148, 93, 87, 229, 172, + 125, 5, 93, 4, 43, 126, 2, 74, 162, 178, 240, 143, 10, 145, 38, 8, + 5, 39, 45, 197, 16, 81, 198, 228, 122, 212, 250, 64, 59, 2, 180, + 81, 11, 100, 122, 227, 209, 119, 11, 172, 3, 38, 168, 5, 187, 239, + 212, 128, 86, 200, 193, 33, 189, 184, 151, 241, 211, 167, 49, 151, + 215, 148, 38, 149, 99, 140, 79, 169, 172, 15, 195, 104, 140, 79, + 151, 116, 185, 5, 161, 78, 58, 63, 23, 27, 172, 88, 108, 85, 232, + 63, 249, 122, 26, 239, 251, 58, 240, 10, 219, 34, 198, 187, 37, + 197, 248, 90, 113, 62, 149, 117, 145, 118, 42, 241, 60, 208, 83, + 57, 96, 143, 17, 128, 92, 118, 158, 188, 77, 37, 184, 164, 135, + 246, 196, 57, 198, 106, 139, 33, 15, 207, 0, 101, 143, 92, 178, + 132, 19, 106, 221, 246, 176, 100, 20, 114, 26, 55, 163, 14, 173, + 255, 121, 181, 58, 121, 140, 3, + ]) + .expect("Test failed"); + (tx, changed_keys) + } + /// Test that if fetching fails before finishing, /// we re-establish the fetching process #[tokio::test(flavor = "multi_thread", worker_threads = 2)] @@ -2065,7 +2180,6 @@ mod shielded_ctx_tests { ) .fvk .vk; - let _unscanned = shielded_ctx.unscanned.clone(); masp_tx_sender.send(None).expect("Test failed"); // we first test that with no retries, a fetching failure @@ -2091,7 +2205,33 @@ mod shielded_ctx_tests { other => panic!("{:?} does not match Error::Other(_)", other), } - let _result = shielded_ctx + // We now have a fetch failure followed by two successful + // masp txs from the same block. + let (masp_tx, changed_keys) = arbitrary_masp_tx(); + masp_tx_sender.send(None).expect("Test failed"); + masp_tx_sender + .send(Some(( + IndexedTx { + height: 1.into(), + index: TxIndex(1), + is_wrapper: false, + }, + (Default::default(), changed_keys.clone(), masp_tx.clone()), + ))) + .expect("Test failed"); + masp_tx_sender + .send(Some(( + IndexedTx { + height: 1.into(), + index: TxIndex(2), + is_wrapper: false, + }, + (Default::default(), changed_keys, masp_tx.clone()), + ))) + .expect("Test failed"); + + // This should complete successfully + shielded_ctx .fetch::<_, _, _, TestingMaspClient>( &client, &logger, @@ -2103,6 +2243,36 @@ mod shielded_ctx_tests { &[vk], ) .await - .unwrap_err(); + .expect("Test failed"); + + shielded_ctx.load_confirmed().await.expect("Test failed"); + let keys = shielded_ctx + .tx_note_map + .keys() + .cloned() + .collect::>(); + let expected = BTreeSet::from([ + IndexedTx { + height: 1.into(), + index: TxIndex(1), + is_wrapper: false, + }, + IndexedTx { + height: 1.into(), + index: TxIndex(2), + is_wrapper: false, + }, + ]); + + assert_eq!(keys, expected); + assert_eq!( + shielded_ctx.vk_heights[&vk].unwrap(), + IndexedTx { + height: 1.into(), + index: TxIndex(2), + is_wrapper: false, + } + ); + assert_eq!(shielded_ctx.note_map.len(), 2); } } diff --git a/crates/sdk/src/masp/test_utils.rs b/crates/sdk/src/masp/test_utils.rs index ee23b05d44..fbc6a4e801 100644 --- a/crates/sdk/src/masp/test_utils.rs +++ b/crates/sdk/src/masp/test_utils.rs @@ -1,3 +1,4 @@ +use std::collections::BTreeMap; use std::ops::{Deref, DerefMut}; use masp_primitives::merkle_tree::CommitmentTree; @@ -10,7 +11,8 @@ use crate::error::Error; use crate::io::Io; use crate::masp::types::IndexedNoteEntry; use crate::masp::utils::{ - CommitmentTreeUpdates, FetchQueueSender, MaspClient, ProgressLogger, + CommitmentTreeUpdates, FetchQueueSender, MaspClient, PeekableIter, + ProgressLogger, }; use crate::masp::{ShieldedContext, ShieldedUtils}; use crate::queries::testing::TestClient; @@ -24,6 +26,9 @@ pub struct TestingClient { /// the fetch algorithm. The option is to mock connection /// failures. next_masp_txs: flume::Receiver>, + /// We sometimes want to iterate over values in the above + /// channel more than once. Thus we need to resend them. + send_masp_txs: flume::Sender>, } impl Deref for TestingClient { @@ -80,24 +85,23 @@ pub fn test_client( TestingClient { inner: client, next_masp_txs: recv, + send_masp_txs: sender.clone(), }, sender, ) } -#[derive(Debug, Clone)] -pub struct TestingMaspClient { - next_masp_txs: flume::Receiver>, +#[derive(Clone)] +pub struct TestingMaspClient<'a> { + client: &'a TestingClient, } -impl<'a> MaspClient<'a, TestingClient> for TestingMaspClient { +impl<'a> MaspClient<'a, TestingClient> for TestingMaspClient<'a> { fn new(client: &'a TestingClient) -> Self where Self: 'a, { - Self { - next_masp_txs: client.next_masp_txs.clone(), - } + Self { client } } async fn witness_map_updates( @@ -107,10 +111,26 @@ impl<'a> MaspClient<'a, TestingClient> for TestingMaspClient { _: IndexedTx, _: BlockHeight, ) -> Result { + let mut note_map_delta: BTreeMap = Default::default(); + let mut channel_temp = vec![]; + let mut note_pos = 0; + for msg in self.client.next_masp_txs.drain() { + if let Some((ix, _)) = msg.as_ref() { + note_map_delta.insert(*ix, note_pos); + note_pos += 1; + } + channel_temp.push(msg); + } + for msg in channel_temp.drain(..) { + self.client + .send_masp_txs + .send(msg) + .map_err(|e| Error::Other(e.to_string()))?; + } Ok(CommitmentTreeUpdates { commitment_tree: CommitmentTree::::empty(), witness_map: Default::default(), - note_map_delta: Default::default(), + note_map_delta, }) } @@ -122,16 +142,21 @@ impl<'a> MaspClient<'a, TestingClient> for TestingMaspClient { to: u64, ) -> Result<(), Error> { // N.B. this assumes one masp tx per block - for _ in logger.fetch(from..=to) { - let next_tx = - self.next_masp_txs.recv().expect("Test failed").ok_or_else( - || { - Error::Other( - "Connection to fetch MASP txs failed".to_string(), - ) - }, - )?; + let mut fetch_iter = logger.fetch(from..=to); + + while fetch_iter.peek().is_some() { + let next_tx = self + .client + .next_masp_txs + .recv() + .expect("Test failed") + .ok_or_else(|| { + Error::Other( + "Connection to fetch MASP txs failed".to_string(), + ) + })?; tx_sender.send(next_tx); + fetch_iter.next(); } Ok(()) } diff --git a/crates/sdk/src/masp/types.rs b/crates/sdk/src/masp/types.rs index ca15c57d8f..6afc649dfb 100644 --- a/crates/sdk/src/masp/types.rs +++ b/crates/sdk/src/masp/types.rs @@ -262,7 +262,7 @@ impl DecryptedDataCache { /// all transactions from a given height, or none. #[derive(Debug, Default, Clone)] pub struct Unscanned { - txs: Arc>, + pub(super) txs: Arc>, } impl BorshSerialize for Unscanned { diff --git a/crates/sdk/src/masp/utils.rs b/crates/sdk/src/masp/utils.rs index cd88ee4d13..42a69117b4 100644 --- a/crates/sdk/src/masp/utils.rs +++ b/crates/sdk/src/masp/utils.rs @@ -567,8 +567,12 @@ where to: u64, ) -> Result<(), Error> { // Fetch all the transactions we do not have yet - for height in logger.fetch(from..=to) { + let mut fetch_iter = logger.fetch(from..=to); + + while let Some(height) = fetch_iter.peek() { + let height = *height; if tx_sender.contains_height(height) { + fetch_iter.next(); continue; } // Get the valid masp transactions at the specified height @@ -589,7 +593,10 @@ where .await? { Some(events) => events, - None => continue, + None => { + fetch_iter.next(); + continue; + } }; // Query the actual block to get the txs bytes. If we only need one @@ -639,7 +646,9 @@ where )); } } + fetch_iter.next(); } + Ok(()) } } @@ -822,17 +831,13 @@ impl TaskManager { impl TaskScheduler { /// Signal the [`TaskManager`] that the scanning thread has completed pub(super) fn complete(&self, with_error: bool) { - self.action - .blocking_send(Action::Complete { with_error }) - .unwrap() + _ = self.action.blocking_send(Action::Complete { with_error }); } /// Schedule the [`TaskManager`] to save the latest context /// state changes. pub(super) fn save(&self, data: ScannedData, latest_idx: IndexedTx) { - self.action - .blocking_send(Action::Data(data, latest_idx)) - .unwrap(); + _ = self.action.blocking_send(Action::Data(data, latest_idx)); } /// Calls the `scan_tx` method of the shielded context @@ -893,10 +898,28 @@ pub enum ProgressType { Scan, } +pub trait PeekableIter { + fn peek(&mut self) -> Option<&I>; + fn next(&mut self) -> Option; +} + +impl PeekableIter for std::iter::Peekable +where + I: Iterator, +{ + fn peek(&mut self) -> Option<&J> { + self.peek() + } + + fn next(&mut self) -> Option { + ::next(self) + } +} + pub trait ProgressLogger { fn io(&self) -> &IO; - fn fetch(&self, items: I) -> impl Iterator + fn fetch(&self, items: I) -> impl PeekableIter where I: Iterator; @@ -938,13 +961,23 @@ where { inner: I, progress: Arc>, + peeked: Option, } -impl> Iterator for DefaultFetchIterator { - type Item = u64; +impl PeekableIter for DefaultFetchIterator +where + I: Iterator, +{ + fn peek(&mut self) -> Option<&u64> { + if self.peeked.is_none() { + self.peeked = self.inner.next(); + } + self.peeked.as_ref() + } - fn next(&mut self) -> Option { - let item = self.inner.next()?; + fn next(&mut self) -> Option { + self.peek(); + let item = self.peeked.take()?; let mut locked = self.progress.lock().unwrap(); locked.index += 1; Some(item) @@ -956,7 +989,7 @@ impl<'io, IO: Io> ProgressLogger for DefaultLogger<'io, IO> { self.io } - fn fetch(&self, items: I) -> impl Iterator + fn fetch(&self, items: I) -> impl PeekableIter where I: Iterator, { @@ -967,6 +1000,7 @@ impl<'io, IO: Io> ProgressLogger for DefaultLogger<'io, IO> { DefaultFetchIterator { inner: items, progress: self.progress.clone(), + peeked: None, } } @@ -983,3 +1017,18 @@ impl<'io, IO: Io> ProgressLogger for DefaultLogger<'io, IO> { locked.length - locked.index } } + +#[cfg(test)] +mod util_tests { + use crate::masp::utils::RetryStrategy; + + #[test] + fn test_retry_strategy() { + let strategy = RetryStrategy::Times(3); + let mut counter = 0; + for _ in strategy { + counter += 1; + } + assert_eq!(counter, 3); + } +} From 514f6f04082db01a5be43ec22bf65dfa15e9ca58 Mon Sep 17 00:00:00 2001 From: satan Date: Fri, 26 Apr 2024 16:03:58 +0200 Subject: [PATCH 08/29] More unit tests --- crates/apps_lib/src/client/masp.rs | 28 ++-- crates/sdk/src/masp/shielded_ctx.rs | 248 +++++++++++++++++++++++++--- crates/sdk/src/masp/test_utils.rs | 83 +++++++++- crates/sdk/src/masp/utils.rs | 45 +++-- 4 files changed, 353 insertions(+), 51 deletions(-) diff --git a/crates/apps_lib/src/client/masp.rs b/crates/apps_lib/src/client/masp.rs index 75489c4126..74c0b47d26 100644 --- a/crates/apps_lib/src/client/masp.rs +++ b/crates/apps_lib/src/client/masp.rs @@ -7,7 +7,8 @@ use namada_sdk::error::Error; use namada_sdk::io::Io; use namada_sdk::masp::types::IndexedNoteEntry; use namada_sdk::masp::utils::{ - LedgerMaspClient, PeekableIter, ProgressLogger, ProgressType, RetryStrategy, + LedgerMaspClient, PeekableIter, ProgressTracker, ProgressType, + RetryStrategy, }; use namada_sdk::masp::{ShieldedContext, ShieldedUtils}; use namada_sdk::queries::Client; @@ -36,7 +37,7 @@ pub async fn syncing< }; display_line!(io, "\n\n"); - let logger = CliLogger::new(io); + let logger = CliProgressTracker::new(io); let sync = async move { shielded .fetch::<_, _, _, LedgerMaspClient>( @@ -183,7 +184,7 @@ impl<'io, IO: Io> Drop for StdoutDrawer<'io, IO> { } } -pub struct CliLogging<'io, T, I, IO> +pub struct LoggingIterator<'io, T, I, IO> where T: Debug, I: Iterator, @@ -195,7 +196,8 @@ where peeked: Option, } -impl<'io, T, I, IO> CliLogging<'io, T, I, IO> +/// An iterator that logs to screen the progress it tracks +impl<'io, T, I, IO> LoggingIterator<'io, T, I, IO> where T: Debug, I: Iterator, @@ -245,7 +247,7 @@ where } } -impl<'io, T, I, IO> PeekableIter for CliLogging<'io, T, I, IO> +impl<'io, T, I, IO> PeekableIter for LoggingIterator<'io, T, I, IO> where T: Debug, I: Iterator, @@ -267,7 +269,7 @@ where } } -impl<'io, T, I, IO> Iterator for CliLogging<'io, T, I, IO> +impl<'io, T, I, IO> Iterator for LoggingIterator<'io, T, I, IO> where T: Debug, I: Iterator, @@ -280,13 +282,13 @@ where } } -/// A progress logger for the CLI +/// A progress tracker for the CLI #[derive(Clone)] -pub struct CliLogger<'io, IO: Io> { +pub struct CliProgressTracker<'io, IO: Io> { drawer: Arc>>, } -impl<'io, IO: Io> CliLogger<'io, IO> { +impl<'io, IO: Io> CliProgressTracker<'io, IO> { pub fn new(io: &'io IO) -> Self { Self { drawer: Arc::new(Mutex::new(StdoutDrawer { @@ -298,7 +300,9 @@ impl<'io, IO: Io> CliLogger<'io, IO> { } } -impl<'io, IO: Io + Send + Sync> ProgressLogger for CliLogger<'io, IO> { +impl<'io, IO: Io + Send + Sync> ProgressTracker + for CliProgressTracker<'io, IO> +{ fn io(&self) -> &IO { let io = { let locked = self.drawer.lock().unwrap(); @@ -311,14 +315,14 @@ impl<'io, IO: Io + Send + Sync> ProgressLogger for CliLogger<'io, IO> { where I: Iterator, { - CliLogging::new(items, ProgressType::Fetch, self.drawer.clone()) + LoggingIterator::new(items, ProgressType::Fetch, self.drawer.clone()) } fn scan(&self, items: I) -> impl Iterator + Send where I: Iterator + Send, { - CliLogging::new(items, ProgressType::Scan, self.drawer.clone()) + LoggingIterator::new(items, ProgressType::Scan, self.drawer.clone()) } fn left_to_fetch(&self) -> usize { diff --git a/crates/sdk/src/masp/shielded_ctx.rs b/crates/sdk/src/masp/shielded_ctx.rs index 4920e57d54..3a4610ea09 100644 --- a/crates/sdk/src/masp/shielded_ctx.rs +++ b/crates/sdk/src/masp/shielded_ctx.rs @@ -57,9 +57,9 @@ use crate::masp::types::{ }; use crate::masp::utils::{ cloned_pair, extract_masp_tx, extract_payload, fetch_channel, - is_amount_required, to_viewing_key, DefaultLogger, + is_amount_required, to_viewing_key, DefaultTracker, ExtractShieldedActionArg, FetchQueueSender, LedgerMaspClient, MaspClient, - ProgressLogger, RetryStrategy, ShieldedUtils, TaskManager, + ProgressTracker, RetryStrategy, ShieldedUtils, TaskManager, }; use crate::masp::NETWORK; use crate::queries::Client; @@ -198,7 +198,7 @@ impl ShieldedContext { >( block_sender: FetchQueueSender, client: &'a C, - logger: &impl ProgressLogger, + progress: &impl ProgressTracker, last_indexed_tx: Option, last_query_height: BlockHeight, ) -> Result<(), Error> { @@ -208,7 +208,7 @@ impl ShieldedContext { last_indexed_tx.map_or_else(|| 1, |last| last.0); client .fetch_shielded_transfer( - logger, + progress, block_sender, first_height_to_query, last_query_height.0, @@ -1709,12 +1709,12 @@ impl ShieldedContext { 'a, C: Client + Sync, IO: Io + Send + Sync, - L: ProgressLogger + Sync, + T: ProgressTracker + Sync, M: MaspClient<'a, C> + 'a, >( &mut self, client: &'a C, - logger: &L, + progress: &T, retry: RetryStrategy, start_query_height: Option, last_query_height: Option, @@ -1768,7 +1768,7 @@ impl ShieldedContext { std::cmp::min(last_query_height, last_block_height); self.update_witness_map::<_, _, M>( client, - logger.io(), + progress.io(), last_witnessed_tx.unwrap_or_default(), last_query_height, ) @@ -1801,7 +1801,7 @@ impl ShieldedContext { let decryption_handle = s.spawn(|| { // N.B. DON'T GO PANICKING IN HERE. DON'T DO IT. SERIOUSLY. // YOU COULD ACCIDENTALLY FREEZE EVERYTHING - let txs = logger.scan(fetch_recv); + let txs = progress.scan(fetch_recv); txs.par_bridge().try_for_each( |(indexed_tx, (epoch, tx, stx))| { let mut scanned_data = ScannedData::default(); @@ -1859,7 +1859,7 @@ impl ShieldedContext { Self::fetch_shielded_transfers::<_, _, M>( fetch_send, client, - logger, + progress, start_height, last_query_height, ) @@ -1875,7 +1875,7 @@ impl ShieldedContext { // if fetching errored, log it. But this is recoverable. if let Err(e) = fetch_res { display_line!( - logger.io(), + progress.io(), "Error encountered while fetching: {}", e.to_string() ); @@ -1883,11 +1883,11 @@ impl ShieldedContext { // if fetching failed for before completing, we restart // the fetch process. Otherwise, we can break the loop. - if logger.left_to_fetch() == 0 { + if progress.left_to_fetch() == 0 { break; } } - if logger.left_to_fetch() != 0 { + if progress.left_to_fetch() != 0 { Err(Error::Other( "After retrying, could not fetch all MASP txs.".to_string(), )) @@ -1925,7 +1925,7 @@ impl ShieldedContext { let block_results = rpc::query_results(client).await?; self.fetch::<_, _, _, LedgerMaspClient>( client, - &DefaultLogger::new(io), + &DefaultTracker::new(io), RetryStrategy::Forever, None, None, @@ -2055,8 +2055,10 @@ mod shielded_ctx_tests { use crate::error::Error; use crate::io::StdIo; use crate::masp::fs::FsShieldedUtils; - use crate::masp::test_utils::{test_client, TestingMaspClient}; - use crate::masp::utils::{DefaultLogger, RetryStrategy}; + use crate::masp::test_utils::{ + test_client, TestUnscannedTracker, TestingMaspClient, + }; + use crate::masp::utils::{DefaultTracker, RetryStrategy}; // A viewing key derived from A_SPENDING_KEY pub const AA_VIEWING_KEY: &str = "zvknam1qqqqqqqqqqqqqq9v0sls5r5de7njx8ehu49pqgmqr9ygelg87l5x8y4s9r0pjlvu6x74w9gjpw856zcu826qesdre628y6tjc26uhgj6d9zqur9l5u3p99d9ggc74ald6s8y3sdtka74qmheyqvdrasqpwyv2fsmxlz57lj4grm2pthzj3sflxc0jx0edrakx3vdcngrfjmru8ywkguru8mxss2uuqxdlglaz6undx5h8w7g70t2es850g48xzdkqay5qs0yw06rtxcpjdve6"; @@ -2166,7 +2168,7 @@ mod shielded_ctx_tests { } /// Test that if fetching fails before finishing, - /// we re-establish the fetching process + /// we re-establish the fetching process. #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn test_retry_fetch() { let temp_dir = tempdir().unwrap(); @@ -2174,7 +2176,7 @@ mod shielded_ctx_tests { FsShieldedUtils::new(temp_dir.path().to_path_buf()); let (client, masp_tx_sender) = test_client(2.into()); let io = StdIo; - let logger = DefaultLogger::new(&io); + let progress = DefaultTracker::new(&io); let vk = ExtendedFullViewingKey::from( ExtendedViewingKey::from_str(AA_VIEWING_KEY).expect("Test failed"), ) @@ -2187,7 +2189,7 @@ mod shielded_ctx_tests { let result = shielded_ctx .fetch::<_, _, _, TestingMaspClient>( &client, - &logger, + &progress, RetryStrategy::Times(1), None, None, @@ -2234,7 +2236,7 @@ mod shielded_ctx_tests { shielded_ctx .fetch::<_, _, _, TestingMaspClient>( &client, - &logger, + &progress, RetryStrategy::Times(2), None, None, @@ -2275,4 +2277,212 @@ mod shielded_ctx_tests { ); assert_eq!(shielded_ctx.note_map.len(), 2); } + + /// Test that the progress tracker correctly keeps + /// track of how many blocks there are left to fetch + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] + async fn test_left_to_fetch() { + let temp_dir = tempdir().unwrap(); + let mut shielded_ctx = + FsShieldedUtils::new(temp_dir.path().to_path_buf()); + let (client, masp_tx_sender) = test_client(2.into()); + let io = StdIo; + let progress = DefaultTracker::new(&io); + let vk = ExtendedFullViewingKey::from( + ExtendedViewingKey::from_str(AA_VIEWING_KEY).expect("Test failed"), + ) + .fvk + .vk; + let (masp_tx, changed_keys) = arbitrary_masp_tx(); + + // first fetch no blocks + masp_tx_sender.send(None).expect("Test failed"); + shielded_ctx + .fetch::<_, _, _, TestingMaspClient>( + &client, + &progress, + RetryStrategy::Times(1), + None, + None, + 0, + &[], + &[vk], + ) + .await + .unwrap_err(); + assert_eq!(progress.left_to_fetch(), 2); + + // fetch one of the two blocks + masp_tx_sender + .send(Some(( + IndexedTx { + height: 1.into(), + index: Default::default(), + is_wrapper: false, + }, + (Default::default(), changed_keys.clone(), masp_tx.clone()), + ))) + .expect("Test failed"); + masp_tx_sender.send(None).expect("Test failed"); + shielded_ctx + .fetch::<_, _, _, TestingMaspClient>( + &client, + &progress, + RetryStrategy::Times(1), + None, + None, + 0, + &[], + &[vk], + ) + .await + .unwrap_err(); + assert_eq!(progress.left_to_fetch(), 1); + + // fetch no blocks + masp_tx_sender.send(None).expect("Test failed"); + shielded_ctx + .fetch::<_, _, _, TestingMaspClient>( + &client, + &progress, + RetryStrategy::Times(1), + None, + None, + 0, + &[], + &[vk], + ) + .await + .unwrap_err(); + assert_eq!(progress.left_to_fetch(), 1); + + // fetch no blocks, but increase the latest block height + // thus the amount left to fetch should increase + let (client, masp_tx_sender) = test_client(3.into()); + masp_tx_sender.send(None).expect("Test failed"); + shielded_ctx + .fetch::<_, _, _, TestingMaspClient>( + &client, + &progress, + RetryStrategy::Times(1), + None, + None, + 0, + &[], + &[vk], + ) + .await + .unwrap_err(); + assert_eq!(progress.left_to_fetch(), 2); + + // fetch remaining block + masp_tx_sender + .send(Some(( + IndexedTx { + height: 2.into(), + index: Default::default(), + is_wrapper: false, + }, + (Default::default(), changed_keys.clone(), masp_tx.clone()), + ))) + .expect("Test failed"); + masp_tx_sender + .send(Some(( + IndexedTx { + height: 3.into(), + index: Default::default(), + is_wrapper: false, + }, + (Default::default(), changed_keys.clone(), masp_tx.clone()), + ))) + .expect("Test failed"); + // this should not produce an error since we have fetched + // all expected blocks + masp_tx_sender.send(None).expect("Test failed"); + shielded_ctx + .fetch::<_, _, _, TestingMaspClient>( + &client, + &progress, + RetryStrategy::Times(1), + None, + None, + 0, + &[], + &[vk], + ) + .await + .expect("Test failed"); + assert_eq!(progress.left_to_fetch(), 0); + } + + /// Test that if we don't scan all fetched notes, they + /// are persisted in a cached + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] + async fn test_unscanned_cache() { + let (client, masp_tx_sender) = test_client(2.into()); + let temp_dir = tempdir().unwrap(); + let mut shielded_ctx = + FsShieldedUtils::new(temp_dir.path().to_path_buf()); + + let io = StdIo; + let progress = TestUnscannedTracker::new(&io); + let vk = ExtendedFullViewingKey::from( + ExtendedViewingKey::from_str(AA_VIEWING_KEY).expect("Test failed"), + ) + .fvk + .vk; + + // the fetched txs + let (masp_tx, changed_keys) = arbitrary_masp_tx(); + masp_tx_sender + .send(Some(( + IndexedTx { + height: 1.into(), + index: TxIndex(1), + is_wrapper: false, + }, + (Default::default(), changed_keys.clone(), masp_tx.clone()), + ))) + .expect("Test failed"); + masp_tx_sender + .send(Some(( + IndexedTx { + height: 1.into(), + index: TxIndex(2), + is_wrapper: false, + }, + (Default::default(), changed_keys.clone(), masp_tx.clone()), + ))) + .expect("Test failed"); + + shielded_ctx + .fetch::<_, _, _, TestingMaspClient>( + &client, + &progress, + RetryStrategy::Times(2), + None, + None, + 0, + &[], + &[vk], + ) + .await + .expect("Test failed"); + + shielded_ctx.load_confirmed().await.expect("Test failed"); + let keys = shielded_ctx + .unscanned + .txs + .lock() + .unwrap() + .keys() + .cloned() + .collect::>(); + let expected = vec![IndexedTx { + height: 1.into(), + index: TxIndex(2), + is_wrapper: false, + }]; + assert_eq!(keys, expected); + } } diff --git a/crates/sdk/src/masp/test_utils.rs b/crates/sdk/src/masp/test_utils.rs index fbc6a4e801..2edc7c9159 100644 --- a/crates/sdk/src/masp/test_utils.rs +++ b/crates/sdk/src/masp/test_utils.rs @@ -1,5 +1,6 @@ use std::collections::BTreeMap; use std::ops::{Deref, DerefMut}; +use std::sync::{Arc, Mutex}; use masp_primitives::merkle_tree::CommitmentTree; use masp_primitives::sapling::Node; @@ -11,8 +12,8 @@ use crate::error::Error; use crate::io::Io; use crate::masp::types::IndexedNoteEntry; use crate::masp::utils::{ - CommitmentTreeUpdates, FetchQueueSender, MaspClient, PeekableIter, - ProgressLogger, + CommitmentTreeUpdates, FetchQueueSender, IterProgress, MaspClient, + PeekableIter, ProgressTracker, }; use crate::masp::{ShieldedContext, ShieldedUtils}; use crate::queries::testing::TestClient; @@ -136,7 +137,7 @@ impl<'a> MaspClient<'a, TestingClient> for TestingMaspClient<'a> { async fn fetch_shielded_transfer( &self, - logger: &impl ProgressLogger, + logger: &impl ProgressTracker, mut tx_sender: FetchQueueSender, from: u64, to: u64, @@ -161,3 +162,79 @@ impl<'a> MaspClient<'a, TestingClient> for TestingMaspClient<'a> { Ok(()) } } + +/// An iterator that yields its first element +/// but runs forever on the second +/// `next` call. +struct YieldOnceIterator { + first: Option, +} + +impl YieldOnceIterator { + fn new(mut iter: T) -> Self + where + T: Iterator, + { + let first = iter.next(); + Self { first } + } +} + +impl Iterator for YieldOnceIterator { + type Item = IndexedNoteEntry; + + fn next(&mut self) -> Option { + self.first.take() + } +} + +/// A progress tracker that only scans the first fetched +/// block. The rest are left in the unscanned cache +/// for the purposes of testing the persistence of +/// this cache. +pub(super) struct TestUnscannedTracker<'io, IO> { + io: &'io IO, + progress: Arc>, +} + +impl<'io, IO: Io> TestUnscannedTracker<'io, IO> { + pub fn new(io: &'io IO) -> Self { + Self { + io, + progress: Arc::new(Mutex::new(Default::default())), + } + } +} + +impl<'io, IO: Io> ProgressTracker for TestUnscannedTracker<'io, IO> { + fn io(&self) -> &IO { + self.io + } + + fn fetch(&self, items: I) -> impl PeekableIter + where + I: Iterator, + { + { + let mut locked = self.progress.lock().unwrap(); + locked.length = items.size_hint().0; + } + crate::masp::utils::DefaultFetchIterator { + inner: items, + progress: self.progress.clone(), + peeked: None, + } + } + + fn scan(&self, items: I) -> impl Iterator + Send + where + I: Iterator + Send, + { + YieldOnceIterator::new(items) + } + + fn left_to_fetch(&self) -> usize { + let locked = self.progress.lock().unwrap(); + locked.length - locked.index + } +} diff --git a/crates/sdk/src/masp/utils.rs b/crates/sdk/src/masp/utils.rs index 42a69117b4..9ecd2526ac 100644 --- a/crates/sdk/src/masp/utils.rs +++ b/crates/sdk/src/masp/utils.rs @@ -474,7 +474,7 @@ pub trait MaspClient<'a, C: Client> { #[allow(async_fn_in_trait)] async fn fetch_shielded_transfer( &self, - logger: &impl ProgressLogger, + progress: &impl ProgressTracker, tx_sender: FetchQueueSender, from: u64, to: u64, @@ -506,10 +506,10 @@ where last_query_height: BlockHeight, ) -> Result { let (tx_sender, tx_receiver) = fetch_channel::new(Default::default()); - let logger = DefaultLogger::new(io); + let progress = DefaultTracker::new(io); let (res, updates) = tokio::join!( self.fetch_shielded_transfer( - &logger, + &progress, tx_sender, last_witnessed_tx.height.0, last_query_height.0, @@ -561,13 +561,13 @@ where async fn fetch_shielded_transfer( &self, - logger: &impl ProgressLogger, + progress: &impl ProgressTracker, mut tx_sender: FetchQueueSender, from: u64, to: u64, ) -> Result<(), Error> { // Fetch all the transactions we do not have yet - let mut fetch_iter = logger.fetch(from..=to); + let mut fetch_iter = progress.fetch(from..=to); while let Some(height) = fetch_iter.peek() { let height = *height; @@ -803,7 +803,10 @@ impl TaskManager { // update each key to be synced to the latest scanned // height. for (_, h) in locked.vk_heights.iter_mut() { - *h = Some(self.latest_idx); + // Due to a failure to fetch new blocks, we + // may not have made scanning progress. Hence + // the max computation. + *h = std::cmp::max(*h, Some(self.latest_idx)); } // updated the spent notes and balances locked.nullify_spent_notes(native_token)?; @@ -916,7 +919,15 @@ where } } -pub trait ProgressLogger { +/// This trait keeps track of how much progress the +/// shielded sync algorithm has made relative to the inputs. +/// +/// It should track how much has been fetched and scanned and +/// whether the fetching has been finished. +/// +/// Additionally, it has access to IO in case the struct implementing +/// this trait wishes to log this progress. +pub trait ProgressTracker { fn io(&self) -> &IO; fn fetch(&self, items: I) -> impl PeekableIter @@ -935,12 +946,12 @@ pub trait ProgressLogger { /// The default type for logging sync progress. #[derive(Debug, Clone)] -pub struct DefaultLogger<'io, IO: Io> { +pub struct DefaultTracker<'io, IO: Io> { io: &'io IO, progress: Arc>, } -impl<'io, IO: Io> DefaultLogger<'io, IO> { +impl<'io, IO: Io> DefaultTracker<'io, IO> { pub fn new(io: &'io IO) -> Self { Self { io, @@ -950,18 +961,18 @@ impl<'io, IO: Io> DefaultLogger<'io, IO> { } #[derive(Default, Copy, Clone, Debug)] -struct IterProgress { - index: usize, - length: usize, +pub(super) struct IterProgress { + pub index: usize, + pub length: usize, } -struct DefaultFetchIterator +pub(super) struct DefaultFetchIterator where I: Iterator, { - inner: I, - progress: Arc>, - peeked: Option, + pub inner: I, + pub progress: Arc>, + pub peeked: Option, } impl PeekableIter for DefaultFetchIterator @@ -984,7 +995,7 @@ where } } -impl<'io, IO: Io> ProgressLogger for DefaultLogger<'io, IO> { +impl<'io, IO: Io> ProgressTracker for DefaultTracker<'io, IO> { fn io(&self) -> &IO { self.io } From 358e689064823c931a21632f4c9f15ad37ad86bf Mon Sep 17 00:00:00 2001 From: satan Date: Tue, 30 Apr 2024 10:40:36 +0200 Subject: [PATCH 09/29] Cleanup and docstrings --- crates/sdk/src/masp/shielded_ctx.rs | 116 +++++++++++++++++++++++++++- crates/sdk/src/masp/test_utils.rs | 18 +++-- crates/sdk/src/masp/types.rs | 31 +++++++- crates/sdk/src/masp/utils.rs | 30 ++++--- 4 files changed, 172 insertions(+), 23 deletions(-) diff --git a/crates/sdk/src/masp/shielded_ctx.rs b/crates/sdk/src/masp/shielded_ctx.rs index 3a4610ea09..fac62a46c7 100644 --- a/crates/sdk/src/masp/shielded_ctx.rs +++ b/crates/sdk/src/masp/shielded_ctx.rs @@ -1722,7 +1722,6 @@ impl ShieldedContext { sks: &[ExtendedSpendingKey], fvks: &[ViewingKey], ) -> Result<(), Error> { - // add new viewing keys // Reload the state from file to get the last confirmed state and // discard any speculative data, we cannot fetch on top of a // speculative state @@ -1737,6 +1736,7 @@ impl ShieldedContext { }; } + // add new viewing keys for esk in sks { let vk = to_viewing_key(esk).vk; self.vk_heights.entry(vk).or_default(); @@ -1748,11 +1748,12 @@ impl ShieldedContext { let _ = self.save().await; let native_token = query_native_token(client).await?; - // the latest block height which has been added to the witness Merkle - // tree + // the height of the key that is least synced let Some(least_idx) = self.vk_heights.values().min().cloned() else { return Ok(()); }; + // the latest block height which has been added to the witness Merkle + // tree let last_witnessed_tx = self.tx_note_map.keys().max().cloned(); // get the bounds on the block heights to fetch let start_height = @@ -1766,6 +1767,8 @@ impl ShieldedContext { let last_query_height = last_query_height.unwrap_or(last_block_height); let last_query_height = std::cmp::min(last_query_height, last_block_height); + + // Update the commitment tree and witnesses self.update_witness_map::<_, _, M>( client, progress.io(), @@ -2485,4 +2488,111 @@ mod shielded_ctx_tests { }]; assert_eq!(keys, expected); } + + /// Test that we cache and persist trial-decryptions + /// when the scanning process does not complete successfully. + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] + async fn test_decrypted_cache() { + let temp_dir = tempdir().unwrap(); + let mut shielded_ctx = + FsShieldedUtils::new(temp_dir.path().to_path_buf()); + let (client, masp_tx_sender) = test_client(100.into()); + let io = StdIo; + let progress = DefaultTracker::new(&io); + let vk = ExtendedFullViewingKey::from( + ExtendedViewingKey::from_str(AA_VIEWING_KEY).expect("Test failed"), + ) + .fvk + .vk; + + // Fetch a large number of MASP notes + let (masp_tx, changed_keys) = arbitrary_masp_tx(); + for h in 1..20 { + masp_tx_sender + .send(Some(( + IndexedTx { + height: h.into(), + index: TxIndex(1), + is_wrapper: false, + }, + (Default::default(), changed_keys.clone(), masp_tx.clone()), + ))) + .expect("Test failed"); + } + masp_tx_sender.send(None).expect("Test failed"); + + // we expect this to fail. + let result = shielded_ctx + .fetch::<_, _, _, TestingMaspClient>( + &client, + &progress, + RetryStrategy::Times(1), + None, + None, + 0, + &[], + &[vk], + ) + .await + .unwrap_err(); + match result { + Error::Other(msg) => assert_eq!( + msg.as_str(), + "After retrying, could not fetch all MASP txs." + ), + other => panic!("{:?} does not match Error::Other(_)", other), + } + + // reload the shielded context + shielded_ctx.load_confirmed().await.expect("Test failed"); + + // maliciously remove an entry from the shielded context + // so that one of the last fetched notes will fail to scan. + shielded_ctx.vk_heights.clear(); + shielded_ctx.tx_note_map.remove(&IndexedTx { + height: 18.into(), + index: TxIndex(1), + is_wrapper: false, + }); + shielded_ctx.save().await.expect("Test failed"); + + // refetch the same MASP notes + for h in 1..20 { + masp_tx_sender + .send(Some(( + IndexedTx { + height: h.into(), + index: TxIndex(1), + is_wrapper: false, + }, + (Default::default(), changed_keys.clone(), masp_tx.clone()), + ))) + .expect("Test failed"); + } + masp_tx_sender.send(None).expect("Test failed"); + + // we expect this to fail. + shielded_ctx + .fetch::<_, _, _, TestingMaspClient>( + &client, + &progress, + RetryStrategy::Times(1), + None, + None, + 0, + &[], + &[vk], + ) + .await + .unwrap_err(); + + // because of an error in scanning, there should be elements + // in the decrypted cache. + shielded_ctx.load_confirmed().await.expect("Test failed"); + let result: HashMap<(IndexedTx, ViewingKey), DecryptedData> = + shielded_ctx.decrypted_note_cache.drain().collect(); + // unfortunately we cannot easily assert what will be in this + // cache as scanning is done in parallel, introducing non-determinism + assert!(!result.is_empty()); + } } diff --git a/crates/sdk/src/masp/test_utils.rs b/crates/sdk/src/masp/test_utils.rs index 2edc7c9159..8ccaa0e05b 100644 --- a/crates/sdk/src/masp/test_utils.rs +++ b/crates/sdk/src/masp/test_utils.rs @@ -72,6 +72,9 @@ impl Client for TestingClient { self.inner.perform(request).await } } + +/// Creat a test client for unit testing as well +/// as a channel for communicating with it. pub fn test_client( last_height: BlockHeight, ) -> (TestingClient, flume::Sender>) { @@ -92,6 +95,9 @@ pub fn test_client( ) } +/// A client for unit tests. It "fetches" a new note +/// when a channel controlled by the unit test sends +/// it one. #[derive(Clone)] pub struct TestingMaspClient<'a> { client: &'a TestingClient, @@ -109,7 +115,7 @@ impl<'a> MaspClient<'a, TestingClient> for TestingMaspClient<'a> { &self, _: &ShieldedContext, _: &IO, - _: IndexedTx, + last_witness_tx: IndexedTx, _: BlockHeight, ) -> Result { let mut note_map_delta: BTreeMap = Default::default(); @@ -117,8 +123,10 @@ impl<'a> MaspClient<'a, TestingClient> for TestingMaspClient<'a> { let mut note_pos = 0; for msg in self.client.next_masp_txs.drain() { if let Some((ix, _)) = msg.as_ref() { - note_map_delta.insert(*ix, note_pos); - note_pos += 1; + if *ix >= last_witness_tx { + note_map_delta.insert(*ix, note_pos); + note_pos += 1; + } } channel_temp.push(msg); } @@ -163,9 +171,7 @@ impl<'a> MaspClient<'a, TestingClient> for TestingMaspClient<'a> { } } -/// An iterator that yields its first element -/// but runs forever on the second -/// `next` call. +/// An iterator that yields its first element only struct YieldOnceIterator { first: Option, } diff --git a/crates/sdk/src/masp/types.rs b/crates/sdk/src/masp/types.rs index 6afc649dfb..6ea87627cd 100644 --- a/crates/sdk/src/masp/types.rs +++ b/crates/sdk/src/masp/types.rs @@ -103,6 +103,9 @@ pub struct MaspTokenRewardData { pub locked_amount_target: Uint, } +/// The MASP transaction(s) found in a Namada tx. +/// These transactions can appear in the fee payment +/// and / or the main payload. #[derive(Debug, Clone)] pub(super) struct ExtractedMaspTx { pub(crate) fee_unshielding: @@ -131,7 +134,7 @@ pub enum ContextSyncStatus { Speculative, } -/// a masp change +/// A MASP specific amount delta. #[derive(BorshSerialize, BorshDeserialize, BorshDeserializer, Debug, Clone)] pub struct MaspChange { /// the token address @@ -142,6 +145,10 @@ pub struct MaspChange { #[derive(Debug, Default)] /// Data returned by successfully scanning a tx +/// +/// This is append-only data that will be sent +/// to a [`TaskManager`] to be applied to the +/// shielded context. pub(super) struct ScannedData { pub div_map: HashMap, pub memo_map: HashMap, @@ -153,6 +160,7 @@ pub(super) struct ScannedData { } impl ScannedData { + /// Append `self` to a [`ShieldedContext`] pub(super) fn apply_to( mut self, ctx: &mut ShieldedContext, @@ -181,6 +189,7 @@ impl ScannedData { ctx.decrypted_note_cache.merge(self.decrypted_note_cache); } + /// Merge to different instances of `Self`. pub(super) fn merge(&mut self, mut other: Self) { for (k, v) in other.note_map.drain(..) { self.note_map.insert(k, v); @@ -211,6 +220,11 @@ impl ScannedData { #[derive(Debug, Clone, BorshSerialize, BorshDeserialize)] /// Data extracted from a successfully decrypted MASP note +/// +/// These will be cached until the trial-decryption phase +/// of shielded-sync has finished. Then they will be +/// re-scanned as part of nullifying spent notes (which +/// is not parallelizable). pub struct DecryptedData { pub tx: Transaction, pub keys: BTreeSet, @@ -227,6 +241,7 @@ pub struct DecryptedDataCache { } impl DecryptedDataCache { + /// Add an entry to the cache pub fn insert( &mut self, key: (IndexedTx, ViewingKey), @@ -235,12 +250,15 @@ impl DecryptedDataCache { self.inner.insert(key, value); } + /// Merge another cache into `self`. pub fn merge(&mut self, mut other: Self) { for (k, v) in other.inner.drain(..) { self.insert(k, v); } } + /// Check if the cache already contains an entry for a given IndexedTx and + /// viewing key. pub fn contains(&self, ix: &IndexedTx, vk: &ViewingKey) -> bool { self.inner .keys() @@ -248,6 +266,7 @@ impl DecryptedDataCache { .is_some() } + /// Return an iterator over the cache that consumes it. pub fn drain( &mut self, ) -> impl Iterator + '_ @@ -258,8 +277,9 @@ impl DecryptedDataCache { /// A cache of fetched indexed transactions. /// -/// The cache is designed so that it either contains -/// all transactions from a given height, or none. +/// An invariant that shielded-sync maintains is that +/// this cache either contains all transactions from +/// a given height, or none. #[derive(Debug, Default, Clone)] pub struct Unscanned { pub(super) txs: Arc>, @@ -283,6 +303,7 @@ impl BorshDeserialize for Unscanned { } impl Unscanned { + /// Append elements to the cache from an iterator. pub fn extend(&self, items: I) where I: IntoIterator, @@ -291,11 +312,14 @@ impl Unscanned { locked.extend(items); } + /// Add a single entry to the cache. pub fn insert(&self, (k, v): IndexedNoteEntry) { let mut locked = self.txs.lock().unwrap(); locked.insert(k, v); } + /// Check if this cache has already been populated for a given + /// block height. pub fn contains_height(&self, height: u64) -> bool { let locked = self.txs.lock().unwrap(); locked.keys().any(|k| k.height.0 == height) @@ -327,6 +351,7 @@ impl Unscanned { .unwrap_or_default() } + /// Remove the first entry from the cache and return it. pub fn pop_first(&self) -> Option { let mut locked = self.txs.lock().unwrap(); locked.pop_first() diff --git a/crates/sdk/src/masp/utils.rs b/crates/sdk/src/masp/utils.rs index 9ecd2526ac..bbe4d26353 100644 --- a/crates/sdk/src/masp/utils.rs +++ b/crates/sdk/src/masp/utils.rs @@ -135,9 +135,9 @@ pub(super) fn extract_payload( Ok(()) } -// Retrieves all the indexes and tx events at the specified height which refer -// to a valid MASP transaction. If an index is given, it filters only the -// transactions with an index equal or greater to the provided one. +/// Retrieves all the indexes and tx events at the specified height which refer +/// to a valid MASP transaction. If an index is given, it filters only the +/// transactions with an index equal or greater to the provided one. pub(super) async fn get_indexed_masp_events_at_height( client: &C, height: BlockHeight, @@ -288,7 +288,7 @@ pub(super) async fn extract_masp_tx<'args, C: Client + Sync>( }) } -// Extract the changed keys and Transaction hash from a MASP over ibc message +/// Extract the changed keys and Transaction hash from a MASP over ibc message pub(super) async fn extract_payload_from_shielded_action< 'args, C: Client + Sync, @@ -429,13 +429,17 @@ fn get_tx_result( }) } +/// The updates to the commitment tree and witness maps +/// fetched at the beginning of shielded-sync. pub struct CommitmentTreeUpdates { pub commitment_tree: CommitmentTree, pub witness_map: HashMap>, pub note_map_delta: BTreeMap, } -/// TODO: Used the sealed pattern? +/// This abstracts away the implementation details +/// of how shielded-sync fetches the necessary data +/// from a remote server. pub trait MaspClient<'a, C: Client> { fn new(client: &'a C) -> Self where @@ -680,9 +684,7 @@ pub(super) struct FetchQueueReceiver { } impl FetchQueueReceiver { - /// Check if the sender has hung up. If so, manually calculate the latest - /// height fetched. Otherwise, update the latest height fetched with the - /// data provided by the sender. + /// Check if the sender has hung up. fn sender_alive(&self) -> bool { self.last_fetched.sender_count() > 0 } @@ -711,10 +713,12 @@ impl Iterator for FetchQueueReceiver { } impl FetchQueueSender { + /// Checks if the channel is already populated for the given block height pub(super) fn contains_height(&self, height: u64) -> bool { self.cache.contains_height(height) } + /// Send a new value of the channel pub(super) fn send(&mut self, data: IndexedNoteEntry) { self.last_fetched.send(data.0.height).unwrap(); self.cache.insert(data); @@ -775,6 +779,8 @@ pub(super) struct TaskScheduler { } impl TaskManager { + /// Create a new [`TaskManage`] and a [`TaskScheduler`] which can be used + /// to schedule tasks to be run by the manager. pub(super) fn new(ctx: ShieldedContext) -> (TaskScheduler, Self) { let (action_send, action_recv) = tokio::sync::mpsc::channel(100); ( @@ -815,8 +821,10 @@ impl TaskManager { return Ok(()); } Action::Data(scanned, idx) => { - // track the latest scanned height - self.latest_idx = idx; + // track the latest scanned height. Due to parallelism, + // these won't come in ascending order, thus we should + // track the maximum seen. + self.latest_idx = std::cmp::max(self.latest_idx, idx); // apply state changes from the scanning process let mut locked = self.ctx.lock().await; scanned.apply_to(&mut locked); @@ -944,7 +952,7 @@ pub trait ProgressTracker { fn left_to_fetch(&self) -> usize; } -/// The default type for logging sync progress. +/// The default type for tracking the progress of shielded-sync. #[derive(Debug, Clone)] pub struct DefaultTracker<'io, IO: Io> { io: &'io IO, From ccb0f0912233f62801fe6b258647cb88c0529f71 Mon Sep 17 00:00:00 2001 From: satan Date: Tue, 30 Apr 2024 10:49:01 +0200 Subject: [PATCH 10/29] Fixing issues from rebasing --- crates/sdk/src/masp/test_utils.rs | 1 - crates/sdk/src/rpc.rs | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/crates/sdk/src/masp/test_utils.rs b/crates/sdk/src/masp/test_utils.rs index 8ccaa0e05b..b677e5cb2b 100644 --- a/crates/sdk/src/masp/test_utils.rs +++ b/crates/sdk/src/masp/test_utils.rs @@ -82,7 +82,6 @@ pub fn test_client( let mut client = TestClient::new(RPC); client.state.in_mem_mut().last_block = Some(LastBlock { height: last_height, - hash: Default::default(), time: Default::default(), }); ( diff --git a/crates/sdk/src/rpc.rs b/crates/sdk/src/rpc.rs index a03ec02ba9..4c130066b5 100644 --- a/crates/sdk/src/rpc.rs +++ b/crates/sdk/src/rpc.rs @@ -50,7 +50,7 @@ use crate::error::{EncodingError, Error, QueryError, TxSubmitError}; use crate::events::{extend, Event}; use crate::internal_macros::echo_error; use crate::io::Io; -use crate::masp::MaspTokenRewardData; +use crate::masp::types::MaspTokenRewardData; use crate::queries::vp::pos::{ EnrichedBondsAndUnbondsDetails, ValidatorStateInfo, }; From dc1bd9ebbe543f8bf3d1edd0d38a1eb183450462 Mon Sep 17 00:00:00 2001 From: satan Date: Tue, 30 Apr 2024 15:05:38 +0200 Subject: [PATCH 11/29] Fix integration tests --- crates/sdk/src/masp/utils.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/crates/sdk/src/masp/utils.rs b/crates/sdk/src/masp/utils.rs index bbe4d26353..fe086afe36 100644 --- a/crates/sdk/src/masp/utils.rs +++ b/crates/sdk/src/masp/utils.rs @@ -515,7 +515,9 @@ where self.fetch_shielded_transfer( &progress, tx_sender, - last_witnessed_tx.height.0, + // we don't want to re-fetch the last witnessed tx. + // instead, we start fetching block one after + last_witnessed_tx.height.0 + 1, last_query_height.0, ), async { @@ -524,8 +526,8 @@ where witness_map: ctx.witness_map.clone(), note_map_delta: Default::default(), }; + let mut note_pos = updates.commitment_tree.size(); for (indexed_tx, (_, _, ref shielded)) in tx_receiver { - let mut note_pos = updates.commitment_tree.size(); updates.note_map_delta.insert(indexed_tx, note_pos); for so in shielded .sapling_bundle() From 51ba26d6bec29fd1d98a513a6d527367b60dae50 Mon Sep 17 00:00:00 2001 From: satan Date: Tue, 30 Apr 2024 16:48:48 +0200 Subject: [PATCH 12/29] Fixed the check-crates --- crates/apps_lib/src/client/masp.rs | 7 ++++++- crates/sdk/src/masp/utils.rs | 3 ++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/crates/apps_lib/src/client/masp.rs b/crates/apps_lib/src/client/masp.rs index 74c0b47d26..46aff6695d 100644 --- a/crates/apps_lib/src/client/masp.rs +++ b/crates/apps_lib/src/client/masp.rs @@ -67,12 +67,16 @@ pub async fn syncing< } } +/// The amount of progress a shielded sync sub-process has made #[derive(Default, Copy, Clone)] struct IterProgress { index: usize, length: usize, } +/// A type that can track progress for +/// shielded sync and draw corresponding +/// progress bars to hte provided I/O struct StdoutDrawer<'io, IO: Io> { io: &'io IO, fetch: IterProgress, @@ -80,6 +84,7 @@ struct StdoutDrawer<'io, IO: Io> { } impl<'io, IO: Io> StdoutDrawer<'io, IO> { + /// Given the current progress, print progress bars to the provided I/O fn draw(&self) { let (fetch_percent, fetch_completed) = (self.fetch.length > 0) .then(|| { @@ -184,6 +189,7 @@ impl<'io, IO: Io> Drop for StdoutDrawer<'io, IO> { } } +/// An iterator that logs to screen the progress it tracks pub struct LoggingIterator<'io, T, I, IO> where T: Debug, @@ -196,7 +202,6 @@ where peeked: Option, } -/// An iterator that logs to screen the progress it tracks impl<'io, T, I, IO> LoggingIterator<'io, T, I, IO> where T: Debug, diff --git a/crates/sdk/src/masp/utils.rs b/crates/sdk/src/masp/utils.rs index fe086afe36..6c529d8497 100644 --- a/crates/sdk/src/masp/utils.rs +++ b/crates/sdk/src/masp/utils.rs @@ -491,6 +491,7 @@ pub struct LedgerMaspClient<'a, C: Client> { client: &'a C, } +#[cfg(not(target_family = "wasm"))] impl<'a, C: Client + Sync> MaspClient<'a, C> for LedgerMaspClient<'a, C> where LedgerMaspClient<'a, C>: 'a, @@ -841,7 +842,7 @@ impl TaskManager { } } -impl TaskScheduler { +impl TaskScheduler { /// Signal the [`TaskManager`] that the scanning thread has completed pub(super) fn complete(&self, with_error: bool) { _ = self.action.blocking_send(Action::Complete { with_error }); From af0d6872351ca82d00348e6a98c74ebe9f8a7f0e Mon Sep 17 00:00:00 2001 From: satan Date: Thu, 2 May 2024 10:44:39 +0200 Subject: [PATCH 13/29] Removed some unwraps on mutexes --- crates/apps_lib/src/client/masp.rs | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/crates/apps_lib/src/client/masp.rs b/crates/apps_lib/src/client/masp.rs index 46aff6695d..71ceb6263a 100644 --- a/crates/apps_lib/src/client/masp.rs +++ b/crates/apps_lib/src/client/masp.rs @@ -233,8 +233,10 @@ where } } - fn advance_index(&self) { - let mut locked = self.drawer.lock().unwrap(); + fn advance_index(&self) -> Result<(), Error> { + let mut locked = self.drawer + .lock() + .map_err(|_| Error::Other("Error acquiring mutex".into()))?; match self.r#type { ProgressType::Fetch => { locked.fetch.index += 1; @@ -244,11 +246,15 @@ where locked.scan.length = self.items.size_hint().0; } } + Ok(()) } - fn draw(&self) { - let locked = self.drawer.lock().unwrap(); + fn draw(&self) -> Result<(), Error> { + let locked = self.drawer + .lock() + .map_err(|_| Error::Other("Error acquiring mutex".into()))?; locked.draw(); + Ok(()) } } @@ -268,8 +274,8 @@ where fn next(&mut self) -> Option { self.peek(); let next_item = self.peeked.take()?; - self.advance_index(); - self.draw(); + self.advance_index().ok()?; + self.draw().ok()?; Some(next_item) } } From 6ff36461ca4003884c2aaa4ca90b4805ffad6f51 Mon Sep 17 00:00:00 2001 From: satan Date: Thu, 2 May 2024 11:16:15 +0200 Subject: [PATCH 14/29] reverting to unwrapping locks --- crates/apps_lib/src/client/masp.rs | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/crates/apps_lib/src/client/masp.rs b/crates/apps_lib/src/client/masp.rs index 71ceb6263a..46aff6695d 100644 --- a/crates/apps_lib/src/client/masp.rs +++ b/crates/apps_lib/src/client/masp.rs @@ -233,10 +233,8 @@ where } } - fn advance_index(&self) -> Result<(), Error> { - let mut locked = self.drawer - .lock() - .map_err(|_| Error::Other("Error acquiring mutex".into()))?; + fn advance_index(&self) { + let mut locked = self.drawer.lock().unwrap(); match self.r#type { ProgressType::Fetch => { locked.fetch.index += 1; @@ -246,15 +244,11 @@ where locked.scan.length = self.items.size_hint().0; } } - Ok(()) } - fn draw(&self) -> Result<(), Error> { - let locked = self.drawer - .lock() - .map_err(|_| Error::Other("Error acquiring mutex".into()))?; + fn draw(&self) { + let locked = self.drawer.lock().unwrap(); locked.draw(); - Ok(()) } } @@ -274,8 +268,8 @@ where fn next(&mut self) -> Option { self.peek(); let next_item = self.peeked.take()?; - self.advance_index().ok()?; - self.draw().ok()?; + self.advance_index(); + self.draw(); Some(next_item) } } From 3f1f1d5cb44df8914d96d2157c2802f8be2b6c6d Mon Sep 17 00:00:00 2001 From: satan Date: Thu, 2 May 2024 11:19:14 +0200 Subject: [PATCH 15/29] Testing something in ci --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 97128c1264..eed8a2ba54 100644 --- a/Makefile +++ b/Makefile @@ -180,7 +180,7 @@ test-e2e: # Run integration tests test-integration: RUST_BACKTRACE=$(RUST_BACKTRACE) \ - $(cargo) +$(nightly) test --lib $(jobs) integration::$(TEST_FILTER) --features integration \ + $(cargo) +$(nightly) test --lib $(jobs) integration::masp::$(TEST_FILTER) --features integration \ -Z unstable-options \ -- \ --test-threads=1 \ From 1a2dc53fb7cd4eb1645a69d8d0e00fa96e8b4d95 Mon Sep 17 00:00:00 2001 From: satan Date: Thu, 2 May 2024 11:54:25 +0200 Subject: [PATCH 16/29] Testing something in ci --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index eed8a2ba54..97128c1264 100644 --- a/Makefile +++ b/Makefile @@ -180,7 +180,7 @@ test-e2e: # Run integration tests test-integration: RUST_BACKTRACE=$(RUST_BACKTRACE) \ - $(cargo) +$(nightly) test --lib $(jobs) integration::masp::$(TEST_FILTER) --features integration \ + $(cargo) +$(nightly) test --lib $(jobs) integration::$(TEST_FILTER) --features integration \ -Z unstable-options \ -- \ --test-threads=1 \ From 115ef339dbbad2f2054da9205fd951bdf102a659 Mon Sep 17 00:00:00 2001 From: satan Date: Thu, 2 May 2024 13:57:48 +0200 Subject: [PATCH 17/29] changelog --- .../improvements/3006-refactor-shielded-sync.md | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100644 .changelog/unreleased/improvements/3006-refactor-shielded-sync.md diff --git a/.changelog/unreleased/improvements/3006-refactor-shielded-sync.md b/.changelog/unreleased/improvements/3006-refactor-shielded-sync.md new file mode 100644 index 0000000000..519a52dac6 --- /dev/null +++ b/.changelog/unreleased/improvements/3006-refactor-shielded-sync.md @@ -0,0 +1,9 @@ +This PR refactors shielded sync to make the following improvements + * Allow fetching new masp txs and trial-decrypting notes to happen asynchronously + * Parallelize the trial-decryptions + * Modularize the logic so that we can mock parts of the algorithm for tests and to enable migration over to using a specila masp indexer + * Added test coverage + * Decouple nullifying notes and updating spent notes from the trial-decryption process + * Refactor the masp.rs module in the sdk into several smaller files and submodules + +[\#3006](https://github.com/anoma/namada/pull/3006) From 829d5144adafca09cc0ecc83cff5f1d80e5eaf7f Mon Sep 17 00:00:00 2001 From: satan Date: Mon, 13 May 2024 18:12:04 +0200 Subject: [PATCH 18/29] [chore]: rebasing --- crates/apps_lib/src/client/rpc.rs | 2 +- crates/sdk/src/masp/mod.rs | 76 ++-- crates/sdk/src/masp/shielded_ctx.rs | 643 ++++------------------------ crates/sdk/src/masp/types.rs | 36 +- crates/sdk/src/masp/utils.rs | 266 ++---------- 5 files changed, 174 insertions(+), 849 deletions(-) diff --git a/crates/apps_lib/src/client/rpc.rs b/crates/apps_lib/src/client/rpc.rs index ac8f98d3b2..11f4f8db22 100644 --- a/crates/apps_lib/src/client/rpc.rs +++ b/crates/apps_lib/src/client/rpc.rs @@ -37,7 +37,7 @@ use namada::proof_of_stake::types::{ }; use namada::{state as storage, token}; use namada_sdk::control_flow::time::{Duration, Instant}; -use namada_sdk::masp::MaspTokenRewardData; +use namada_sdk::masp::types::MaspTokenRewardData; use namada_sdk::proof_of_stake::types::ValidatorMetaData; use namada_sdk::queries::Client; use namada_sdk::rpc::{ diff --git a/crates/sdk/src/masp/mod.rs b/crates/sdk/src/masp/mod.rs index e1d7b1e35a..e7acefcfd3 100644 --- a/crates/sdk/src/masp/mod.rs +++ b/crates/sdk/src/masp/mod.rs @@ -422,7 +422,7 @@ pub mod testing { use masp_primitives::consensus::testing::arb_height; use masp_primitives::constants::SPENDING_KEY_GENERATOR; use masp_primitives::convert::AllowedConversion; - use masp_primitives::ff::{Field, PrimeField}; + use masp_primitives::ff::PrimeField; use masp_primitives::memo::MemoBytes; use masp_primitives::merkle_tree::MerklePath; use masp_primitives::sapling::note_encryption::{ @@ -434,6 +434,7 @@ pub mod testing { Diversifier, Node, Note, ProofGenerationKey, Rseed, }; use masp_primitives::transaction::builder::Builder; + use masp_primitives::transaction::components::sapling::builder::RngBuildParams; use masp_primitives::transaction::components::{U64Sum, GROTH_PROOF_SIZE}; use masp_primitives::transaction::fees::fixed::FeeRule; use masp_primitives::transaction::TransparentAddress; @@ -644,16 +645,11 @@ pub mod testing { value: u64, _anchor: bls12_381::Scalar, _merkle_path: MerklePath, + rcv: jubjub::Fr, ) -> Result< ([u8; GROTH_PROOF_SIZE], jubjub::ExtendedPoint, PublicKey), (), > { - // Initialize secure RNG - let mut rng = self.0.lock().unwrap(); - - // We create the randomness of the value commitment - let rcv = jubjub::Fr::random(&mut *rng); - // Accumulate the value commitment randomness in the context { let mut tmp = rcv; @@ -698,15 +694,8 @@ pub mod testing { _rcm: jubjub::Fr, asset_type: AssetType, value: u64, + rcv: jubjub::Fr, ) -> ([u8; GROTH_PROOF_SIZE], jubjub::ExtendedPoint) { - // Initialize secure RNG - let mut rng = self.0.lock().unwrap(); - - // We construct ephemeral randomness for the value commitment. This - // randomness is not given back to the caller, but the synthetic - // blinding factor `bsk` is accumulated in the context. - let rcv = jubjub::Fr::random(&mut *rng); - // Accumulate the value commitment randomness in the context { let mut tmp = rcv.neg(); // Outputs subtract from the total. @@ -747,14 +736,9 @@ pub mod testing { value: u64, _anchor: bls12_381::Scalar, _merkle_path: MerklePath, + rcv: jubjub::Fr, ) -> Result<([u8; GROTH_PROOF_SIZE], jubjub::ExtendedPoint), ()> { - // Initialize secure RNG - let mut rng = self.0.lock().unwrap(); - - // We create the randomness of the value commitment - let rcv = jubjub::Fr::random(&mut *rng); - // Accumulate the value commitment randomness in the context { let mut tmp = rcv; @@ -908,6 +892,7 @@ pub mod testing { address in arb_transparent_address(), expiration_height in arb_height(BranchId::MASP, &TestNetwork), mut rng in arb_rng().prop_map(TestCsprng), + bparams_rng in arb_rng().prop_map(TestCsprng), prover_rng in arb_rng().prop_map(TestCsprng), ) -> (ExtendedSpendingKey, Diversifier, Note, Node) { let mut spending_key_seed = [0; 32]; @@ -920,13 +905,12 @@ pub mod testing { .to_payment_address(div) .expect("a PaymentAddress"); - let mut builder = Builder::::new_with_rng( + let mut builder = Builder::::new( NETWORK, // NOTE: this is going to add 20 more blocks to the actual // expiration but there's no other exposed function that we could // use from the masp crate to specify the expiration better expiration_height.unwrap(), - rng, ); // Add a transparent input to support our desired shielded output builder.add_transparent_input(TxOut { asset_type, value, address }).unwrap(); @@ -936,6 +920,8 @@ pub mod testing { let (transaction, metadata) = builder.build( &MockTxProver(Mutex::new(prover_rng)), &FeeRule::non_standard(U64Sum::zero()), + &mut rng, + &mut RngBuildParams::new(bparams_rng), ).unwrap(); // Extract the shielded output from the transaction let shielded_output = &transaction @@ -1066,7 +1052,6 @@ pub mod testing { ), )( expiration_height in arb_height(BranchId::MASP, &TestNetwork), - rng in arb_rng().prop_map(TestCsprng), spend_descriptions in assets .iter() .map(|(asset, values)| arb_spend_descriptions(asset.clone(), values.clone())) @@ -1077,16 +1062,15 @@ pub mod testing { .collect::>(), assets in Just(assets), ) -> ( - Builder::>, + Builder::, HashMap, ) { - let mut builder = Builder::::new_with_rng( + let mut builder = Builder::::new( NETWORK, // NOTE: this is going to add 20 more blocks to the actual // expiration but there's no other exposed function that we could // use from the masp crate to specify the expiration better - expiration_height.unwrap(), - rng, + expiration_height.unwrap() ); let mut leaves = Vec::new(); // First construct a Merkle tree containing all notes to be used @@ -1135,7 +1119,6 @@ pub mod testing { ), )( expiration_height in arb_height(BranchId::MASP, &TestNetwork), - rng in arb_rng().prop_map(TestCsprng), txins in assets .iter() .map(|(asset, values)| arb_txouts(asset.clone(), values.clone(), source)) @@ -1146,16 +1129,15 @@ pub mod testing { .collect::>(), assets in Just(assets), ) -> ( - Builder::>, + Builder::, HashMap, ) { - let mut builder = Builder::::new_with_rng( + let mut builder = Builder::::new( NETWORK, // NOTE: this is going to add 20 more blocks to the actual // expiration but there's no other exposed function that we could // use from the masp crate to specify the expiration better expiration_height.unwrap(), - rng, ); for txin in txins.into_iter().flatten() { builder.add_transparent_input(txin).unwrap(); @@ -1180,7 +1162,6 @@ pub mod testing { ), )( expiration_height in arb_height(BranchId::MASP, &TestNetwork), - rng in arb_rng().prop_map(TestCsprng), spend_descriptions in assets .iter() .map(|(asset, values)| arb_spend_descriptions(asset.clone(), values.clone())) @@ -1191,16 +1172,15 @@ pub mod testing { .collect::>(), assets in Just(assets), ) -> ( - Builder::>, + Builder::, HashMap, ) { - let mut builder = Builder::::new_with_rng( + let mut builder = Builder::::new( NETWORK, // NOTE: this is going to add 20 more blocks to the actual // expiration but there's no other exposed function that we could // use from the masp crate to specify the expiration better expiration_height.unwrap(), - rng, ); let mut leaves = Vec::new(); // First construct a Merkle tree containing all notes to be used @@ -1226,11 +1206,15 @@ pub mod testing { )(asset_range in Just(asset_range.into()))( (builder, asset_types) in arb_shielded_builder(asset_range), epoch in arb_epoch(), - rng in arb_rng().prop_map(TestCsprng), + prover_rng in arb_rng().prop_map(TestCsprng), + mut rng in arb_rng().prop_map(TestCsprng), + bparams_rng in arb_rng().prop_map(TestCsprng), ) -> (ShieldedTransfer, HashMap) { let (masp_tx, metadata) = builder.clone().build( - &MockTxProver(Mutex::new(rng)), + &MockTxProver(Mutex::new(prover_rng)), &FeeRule::non_standard(U64Sum::zero()), + &mut rng, + &mut RngBuildParams::new(bparams_rng), ).unwrap(); (ShieldedTransfer { builder: builder.map_builder(WalletMap), @@ -1252,11 +1236,15 @@ pub mod testing { asset_range, ), epoch in arb_epoch(), - rng in arb_rng().prop_map(TestCsprng), + prover_rng in arb_rng().prop_map(TestCsprng), + mut rng in arb_rng().prop_map(TestCsprng), + bparams_rng in arb_rng().prop_map(TestCsprng), ) -> (ShieldedTransfer, HashMap) { let (masp_tx, metadata) = builder.clone().build( - &MockTxProver(Mutex::new(rng)), + &MockTxProver(Mutex::new(prover_rng)), &FeeRule::non_standard(U64Sum::zero()), + &mut rng, + &mut RngBuildParams::new(bparams_rng), ).unwrap(); (ShieldedTransfer { builder: builder.map_builder(WalletMap), @@ -1278,11 +1266,15 @@ pub mod testing { asset_range, ), epoch in arb_epoch(), - rng in arb_rng().prop_map(TestCsprng), + prover_rng in arb_rng().prop_map(TestCsprng), + mut rng in arb_rng().prop_map(TestCsprng), + bparams_rng in arb_rng().prop_map(TestCsprng), ) -> (ShieldedTransfer, HashMap) { let (masp_tx, metadata) = builder.clone().build( - &MockTxProver(Mutex::new(rng)), + &MockTxProver(Mutex::new(prover_rng)), &FeeRule::non_standard(U64Sum::zero()), + &mut rng, + &mut RngBuildParams::new(bparams_rng), ).unwrap(); (ShieldedTransfer { builder: builder.map_builder(WalletMap), diff --git a/crates/sdk/src/masp/shielded_ctx.rs b/crates/sdk/src/masp/shielded_ctx.rs index fac62a46c7..4153092058 100644 --- a/crates/sdk/src/masp/shielded_ctx.rs +++ b/crates/sdk/src/masp/shielded_ctx.rs @@ -4,9 +4,7 @@ use std::convert::TryInto; use borsh::{BorshDeserialize, BorshSerialize}; use borsh_ext::BorshSerializeExt; -use itertools::Either; use masp_primitives::asset_type::AssetType; -use masp_primitives::consensus::TestNetwork; use masp_primitives::convert::AllowedConversion; use masp_primitives::memo::MemoBytes; use masp_primitives::merkle_tree::{ @@ -26,46 +24,38 @@ use masp_primitives::transaction::fees::fixed::FeeRule; use masp_primitives::transaction::{ builder, Authorization, Authorized, Transaction, TransparentAddress, }; +use masp_primitives::transaction::components::sapling::builder::RngBuildParams; use masp_primitives::zip32::{ExtendedFullViewingKey, ExtendedSpendingKey}; -use namada_core::address::{Address, MASP}; +use namada_core::address::Address; use namada_core::collections::{HashMap, HashSet}; use namada_core::masp::{ - encode_asset_type, AssetData, BalanceOwner, ExtendedViewingKey, - PaymentAddress, TransferSource, TransferTarget, + encode_asset_type, AssetData, + TransferSource, TransferTarget, }; use namada_core::storage::{BlockHeight, Epoch, IndexedTx, TxIndex}; use namada_core::time::{DateTimeUtc, DurationSecs}; -use namada_core::token::Amount; use namada_token::{self as token, Denomination, MaspDigitPos}; -use namada_tx::Tx; use rand_core::OsRng; use rayon::prelude::*; use ripemd::Digest as RipemdDigest; use sha2::Digest; -use tendermint_rpc::query::Query; -use tendermint_rpc::Order; -use crate::error::{Error, PinnedBalanceError, QueryError}; -use crate::eth_bridge::token::storage_key::{ - balance_key, is_any_shielded_action_balance_key, -}; +use crate::error::{Error, QueryError}; use crate::io::Io; use crate::masp::types::{ ContextSyncStatus, Conversions, DecryptedData, DecryptedDataCache, - MaspAmount, MaspChange, ScannedData, ShieldedTransfer, TransactionDelta, - TransferDelta, TransferErr, Unscanned, WalletMap, + MaspAmount, ScannedData, ShieldedTransfer, TransactionDelta, TransferErr, + Unscanned, WalletMap, }; use crate::masp::utils::{ - cloned_pair, extract_masp_tx, extract_payload, fetch_channel, - is_amount_required, to_viewing_key, DefaultTracker, - ExtractShieldedActionArg, FetchQueueSender, LedgerMaspClient, MaspClient, - ProgressTracker, RetryStrategy, ShieldedUtils, TaskManager, + cloned_pair, fetch_channel, is_amount_required, + to_viewing_key, FetchQueueSender, + MaspClient, ProgressTracker, RetryStrategy, ShieldedUtils, TaskManager, }; -use crate::masp::NETWORK; +use crate::masp::{Network, NETWORK}; use crate::queries::Client; use crate::rpc::{ - query_block, query_conversion, query_denom, query_epoch_at_height, - query_native_token, + query_block, query_conversion, query_denom, }; use crate::{display_line, edisplay_line, rpc, MaybeSend, MaybeSync, Namada}; @@ -93,9 +83,6 @@ pub struct ShieldedContext { pub div_map: HashMap, /// Maps note positions to their witness (used to make merkle paths) pub witness_map: HashMap>, - /// Tracks what each transaction does to various account balances - pub delta_map: - BTreeMap, /// The set of note positions that have been spent pub spents: HashSet, /// Maps asset types to their decodings @@ -130,7 +117,6 @@ impl Default for ShieldedContext { div_map: HashMap::default(), witness_map: HashMap::default(), spents: HashSet::default(), - delta_map: BTreeMap::default(), asset_types: HashMap::default(), vk_map: HashMap::default(), unscanned: Default::default(), @@ -303,18 +289,13 @@ impl ShieldedContext { /// Parse the cache of decrypted notes: /// * nullify notes that have been spent /// * update balances of each viewing key - pub(super) fn nullify_spent_notes( - &mut self, - native_token: &Address, - ) -> Result<(), Error> { - for ((indexed_tx, _vk), decrypted_data) in + pub(super) fn nullify_spent_notes(&mut self) -> Result<(), Error> { + for ((_, _vk), decrypted_data) in self.decrypted_note_cache.drain() { let DecryptedData { tx: shielded, - keys: tx_changed_keys, delta: mut transaction_delta, - epoch, } = decrypted_data; // Cancel out those of our notes that have been spent @@ -344,156 +325,10 @@ impl ShieldedContext { })?; } } - - let mut transfer_delta = TransferDelta::new(); - let balance_keys: Vec<_> = tx_changed_keys - .iter() - .filter_map(is_any_shielded_action_balance_key) - .collect(); - let (source, token, amount) = match shielded.transparent_bundle() { - Some(transp_bundle) => { - // Shielding/Unshielding transfer - match (transp_bundle.vin.len(), transp_bundle.vout.len()) { - (0, 0) => { - return Err(Error::Other( - "Expected shielding/unshielding transaction" - .to_string(), - )); - } - (_, 0) => { - // Shielding, only if we are syncing. If in - // speculative context do not update - if let ContextSyncStatus::Confirmed = - self.sync_status - { - let addresses = balance_keys - .iter() - .find(|addresses| { - if addresses[1] != &MASP { - let transp_addr_commit = - TransparentAddress( - ripemd::Ripemd160::digest( - sha2::Sha256::digest( - &addresses[1] - .serialize_to_vec(), - ), - ) - .into(), - ); - // Vins contain the same address, so we - // can - // just examine the first one - transp_bundle.vin.first().is_some_and( - |vin| { - vin.address - == transp_addr_commit - }, - ) - } else { - false - } - }) - .ok_or_else(|| { - Error::Other( - "Could not find source of MASP tx" - .to_string(), - ) - })?; - - let amount = transp_bundle.vin.iter().fold( - Amount::zero(), - |acc, vin| { - acc + Amount::from_u64(vin.value) - }, - ); - - ( - addresses[1].to_owned(), - addresses[0].to_owned(), - amount, - ) - } else { - return Ok(()); - } - } - (0, _) => { - // Unshielding - let token = balance_keys - .iter() - .find(|addresses| { - if addresses[1] != &MASP { - let transp_addr_commit = - TransparentAddress( - ripemd::Ripemd160::digest( - sha2::Sha256::digest( - &addresses[1] - .serialize_to_vec(), - ), - ) - .into(), - ); - // Vouts contain the same address, so we - // can - // just examine the first one - transp_bundle.vout.first().is_some_and( - |vout| { - vout.address - == transp_addr_commit - }, - ) - } else { - false - } - }) - .ok_or_else(|| { - Error::Other( - "Could not find target of MASP tx" - .to_string(), - ) - })?[0]; - let amount = transp_bundle - .vout - .iter() - .fold(Amount::zero(), |acc, vout| { - acc + Amount::from_u64(vout.value) - }); - (MASP, token.to_owned(), amount) - } - (_, _) => { - return Err(Error::Other( - "MASP transaction cannot contain both \ - transparent inputs and outputs" - .to_string(), - )); - } - } - } - None => { - // Shielded transfer - (MASP, native_token.clone(), Amount::zero()) - } - }; - transfer_delta.insert( - source, - MaspChange { - asset: token, - change: -amount.change(), - }, - ); - self.delta_map - .insert(indexed_tx, (epoch, transfer_delta, transaction_delta)); } Ok(()) } - /// Summarize the effects on shielded and transparent accounts of each - /// Transfer in this context - pub fn get_tx_deltas( - &self, - ) -> &BTreeMap { - &self.delta_map - } - /// Compute the total unspent notes associated with the viewing key in the /// context. If the key is not in the context, then we do not know the /// balance and hence we return None. @@ -943,139 +778,6 @@ impl ShieldedContext { Ok((val_acc, notes, conversions)) } - /// Compute the combined value of the output notes of the transaction pinned - /// at the given payment address. This computation uses the supplied viewing - /// keys to try to decrypt the output notes. If no transaction is pinned at - /// the given payment address fails with - /// `PinnedBalanceError::NoTransactionPinned`. - pub async fn compute_pinned_balance( - client: &C, - owner: PaymentAddress, - viewing_key: &ViewingKey, - ) -> Result<(I128Sum, Epoch), Error> { - // Check that the supplied viewing key corresponds to given payment - // address - let counter_owner = viewing_key.to_payment_address( - *masp_primitives::sapling::PaymentAddress::diversifier( - &owner.into(), - ), - ); - match counter_owner { - Some(counter_owner) if counter_owner == owner.into() => {} - _ => { - return Err(Error::from(PinnedBalanceError::InvalidViewingKey)); - } - } - // Construct the key for where the transaction ID would be stored - let pin_key = namada_token::storage_key::masp_pin_tx_key(&owner.hash()); - // Obtain the transaction pointer at the key - // If we don't discard the error message then a test fails, - // however the error underlying this will go undetected - let indexed_tx = - rpc::query_storage_value::(client, &pin_key) - .await - .map_err(|_| PinnedBalanceError::NoTransactionPinned)?; - let tx_epoch = query_epoch_at_height(client, indexed_tx.height) - .await? - .ok_or_else(|| { - Error::from(QueryError::General( - "Queried height is greater than the last committed block \ - height" - .to_string(), - )) - })?; - - let block = client - .block(indexed_tx.height.0 as u32) - .await - .map_err(|e| Error::from(QueryError::General(e.to_string())))? - .block - .data; - - let tx = Tx::try_from(block[indexed_tx.index.0 as usize].as_ref()) - .map_err(|e| Error::Other(e.to_string()))?; - let (_, shielded) = extract_masp_tx( - &tx, - ExtractShieldedActionArg::Request::(( - client, - indexed_tx.height, - Some(indexed_tx.index), - )), - false, - ) - .await? - .inner_tx - .ok_or_else(|| { - Error::Other("Missing shielded inner portion of pinned tx".into()) - })?; - - // Accumulate the combined output note value into this Amount - let mut val_acc = I128Sum::zero(); - for so in shielded - .sapling_bundle() - .map_or(&vec![], |x| &x.shielded_outputs) - { - // Let's try to see if our viewing key can decrypt current note - let decres = try_sapling_note_decryption::<_, OutputDescription<<::SaplingAuth as masp_primitives::transaction::components::sapling::Authorization>::Proof>>( - &NETWORK, - 1.into(), - &PreparedIncomingViewingKey::new(&viewing_key.ivk()), - so, - ); - match decres { - // So the given viewing key does decrypt this current note... - Some((note, pa, _memo)) if pa == owner.into() => { - val_acc += I128Sum::from_nonnegative( - note.asset_type, - note.value as i128, - ) - .map_err(|()| { - Error::Other( - "found note with invalid value or asset type" - .to_string(), - ) - })?; - } - _ => {} - } - } - Ok((val_acc, tx_epoch)) - } - - /// Compute the combined value of the output notes of the pinned transaction - /// at the given payment address if there's any. The asset types may be from - /// the epoch of the transaction or even before, so exchange all these - /// amounts to the epoch of the transaction in order to get the value that - /// would have been displayed in the epoch of the transaction. - pub async fn compute_exchanged_pinned_balance( - &mut self, - context: &impl Namada, - owner: PaymentAddress, - viewing_key: &ViewingKey, - ) -> Result<(ValueSum, I128Sum, Epoch), Error> { - // Obtain the balance that will be exchanged - let (amt, ep) = - Self::compute_pinned_balance(context.client(), owner, viewing_key) - .await?; - display_line!(context.io(), "Pinned balance: {:?}", amt); - // Finally, exchange the balance to the transaction's epoch - let computed_amount = self - .compute_exchanged_amount( - context.client(), - context.io(), - amt, - ep, - BTreeMap::new(), - ) - .await? - .0; - display_line!(context.io(), "Exchanged amount: {:?}", computed_amount); - let (decoded, undecoded) = self - .decode_combine_sum_to_epoch(context.client(), computed_amount, ep) - .await; - Ok((decoded, undecoded, ep)) - } - /// Convert an amount whose units are AssetTypes to one whose units are /// Addresses that they decode to. All asset types not corresponding to /// the given epoch are ignored. @@ -1189,6 +891,7 @@ impl ShieldedContext { // are shielded use rand::rngs::StdRng; use rand_core::SeedableRng; + let spending_key = source.spending_key(); let payment_address = target.payment_address(); // No shielded components are needed when neither source nor @@ -1211,9 +914,10 @@ impl ShieldedContext { let memo = MemoBytes::empty(); // Try to get a seed from env var, if any. - let rng = StdRng::from_rng(OsRng).unwrap(); + #[allow(unused_mut)] + let mut rng = StdRng::from_rng(OsRng).unwrap(); #[cfg(feature = "testing")] - let rng = if let Ok(seed) = std::env::var(super::ENV_VAR_MASP_TEST_SEED) + let mut rng = if let Ok(seed) = std::env::var(super::ENV_VAR_MASP_TEST_SEED) .map_err(|e| Error::Other(e.to_string())) .and_then(|seed| { let exp_str = format!( @@ -1280,13 +984,12 @@ impl ShieldedContext { u32::MAX - 20 } }; - let mut builder = Builder::::new_with_rng( + let mut builder = Builder::::new( NETWORK, // NOTE: this is going to add 20 more blocks to the actual // expiration but there's no other exposed function that we could // use from the masp crate to specify the expiration better expiration_height.into(), - rng, ); // Convert transaction amount into MASP types @@ -1378,7 +1081,7 @@ impl ShieldedContext { } } - // Anotate the asset type in the value balance with its decoding in + // Annotate the asset type in the value balance with its decoding in // order to facilitate cross-epoch computations let value_balance = builder.value_balance().map_err(|e| { Error::Other(format!("unable to complete value balance: {}", e)) @@ -1539,14 +1242,19 @@ impl ShieldedContext { #[cfg(feature = "testing")] let prover = super::testing::MockTxProver(std::sync::Mutex::new(OsRng)); let (masp_tx, metadata) = - builder.build(&prover, &FeeRule::non_standard(U64Sum::zero()))?; + builder.build( + &prover, + &FeeRule::non_standard(U64Sum::zero()), + &mut rng, + &mut RngBuildParams::new(OsRng), + )?; if update_ctx { // Cache the generated transfer let mut shielded_ctx = context.shielded_mut().await; shielded_ctx .pre_cache_transaction( - context, &masp_tx, source, target, token, epoch, + context, &masp_tx, ) .await?; } @@ -1560,30 +1268,14 @@ impl ShieldedContext { } // Updates the internal state with the data of the newly generated - // transaction. More specifically invalidate the spent notes and the - // transparent balances, but do not cache the newly produced output - // descriptions and therefore the merkle tree + // transaction. More specifically invalidate the spent notes, but do not + // cache the newly produced output descriptions and therefore the merkle + // tree async fn pre_cache_transaction( &mut self, context: &impl Namada, masp_tx: &Transaction, - source: &TransferSource, - target: &TransferTarget, - token: &Address, - epoch: Epoch, ) -> Result<(), Error> { - // Need to mock the changed balance keys - let mut changed_balance_keys = BTreeSet::default(); - match (source.effective_address(), target.effective_address()) { - // Shielded transactions don't write balance keys - (MASP, MASP) => (), - (source, target) => { - changed_balance_keys.insert(balance_key(token, &source)); - changed_balance_keys.insert(balance_key(token, &target)); - } - } - - let native_token = query_native_token(context.client()).await?; let vks: Vec<_> = context .wallet() .await @@ -1602,7 +1294,7 @@ impl ShieldedContext { }, |indexed| IndexedTx { height: indexed.height, - index: indexed.index + 1, + index: TxIndex(indexed.index.0 + 1), is_wrapper: false, }, ); @@ -1623,16 +1315,14 @@ impl ShieldedContext { (indexed_tx, vk), DecryptedData { tx: masp_tx.clone(), - keys: changed_balance_keys.clone(), delta: tx_delta, - epoch, }, ); } let mut temp_cache = DecryptedDataCache::default(); std::mem::swap(&mut temp_cache, &mut self.decrypted_note_cache); scanned_data.apply_to(self); - self.nullify_spent_notes(&native_token)?; + self.nullify_spent_notes()?; std::mem::swap(&mut temp_cache, &mut self.decrypted_note_cache); // Save the speculative state for future usage self.save().await.map_err(|e| Error::Other(e.to_string()))?; @@ -1663,7 +1353,7 @@ impl ShieldedContext { Ok(asset_type) } - /// Convert Anoma amount and token type to MASP equivalents + /// Convert Namada amount and token type to MASP equivalents async fn convert_amount( &mut self, client: &C, @@ -1747,7 +1437,6 @@ impl ShieldedContext { // Save the context to persist newly added keys let _ = self.save().await; - let native_token = query_native_token(client).await?; // the height of the key that is least synced let Some(least_idx) = self.vk_heights.values().min().cloned() else { return Ok(()); @@ -1805,48 +1494,43 @@ impl ShieldedContext { // N.B. DON'T GO PANICKING IN HERE. DON'T DO IT. SERIOUSLY. // YOU COULD ACCIDENTALLY FREEZE EVERYTHING let txs = progress.scan(fetch_recv); - txs.par_bridge().try_for_each( - |(indexed_tx, (epoch, tx, stx))| { - let mut scanned_data = ScannedData::default(); - for (vk, _) in vk_heights - .iter() - .filter(|(_vk, h)| **h < Some(indexed_tx)) + txs.par_bridge().try_for_each(|(indexed_tx, stx)| { + let mut scanned_data = ScannedData::default(); + for (vk, _) in vk_heights + .iter() + .filter(|(_vk, h)| **h < Some(indexed_tx)) + { + // if this note is in the cache, skip it. + if scanned_data + .decrypted_note_cache + .contains(&indexed_tx, vk) { - // if this note is in the cache, skip it. - if scanned_data - .decrypted_note_cache - .contains(&indexed_tx, vk) - { - continue; - } - // attempt to decrypt the note and get the state - // changes - let (scanned, tx_delta) = task_scheduler - .scan_tx( - self.sync_status, - indexed_tx, - &self.tx_note_map, - &stx, - vk, - )?; - // add the new state changes to the aggregated - scanned_data.merge(scanned); - // add the note to the cache - scanned_data.decrypted_note_cache.insert( - (indexed_tx, *vk), - DecryptedData { - tx: stx.clone(), - keys: tx.clone(), - delta: tx_delta, - epoch, - }, - ); + continue; } - // save the aggregated state changes - task_scheduler.save(scanned_data, indexed_tx); - Ok::<(), Error>(()) - }, - )?; + // attempt to decrypt the note and get the state + // changes + let (scanned, tx_delta) = task_scheduler.scan_tx( + self.sync_status, + indexed_tx, + &self.tx_note_map, + &stx, + vk, + )?; + // add the new state changes to the aggregated + scanned_data.merge(scanned); + // add the note to the cache + scanned_data.decrypted_note_cache.insert( + (indexed_tx, *vk), + DecryptedData { + tx: stx.clone(), + delta: tx_delta, + }, + ); + } + // save the aggregated state changes + task_scheduler.save(scanned_data, indexed_tx); + Ok::<(), Error>(()) + })?; // signal that the process has finished without error task_scheduler.complete(false); Ok::<(), Error>(()) @@ -1858,7 +1542,7 @@ impl ShieldedContext { tokio::task::block_in_place(|| { tokio::runtime::Handle::current().block_on(async { tokio::join!( - task_manager.run(&native_token), + task_manager.run(), Self::fetch_shielded_transfers::<_, _, M>( fetch_send, client, @@ -1899,149 +1583,6 @@ impl ShieldedContext { } }) } - - /// Obtain the known effects of all accepted shielded and transparent - /// transactions. If an owner is specified, then restrict the set to only - /// transactions crediting/debiting the given owner. If token is specified, - /// then restrict set to only transactions involving the given token. - #[cfg(not(target_family = "wasm"))] - pub async fn query_tx_deltas( - &mut self, - client: &C, - io: &IO, - query_owner: &Either>, - query_token: &Option
, - viewing_keys: &HashMap, - ) -> Result< - BTreeMap, - Error, - > { - const TXS_PER_PAGE: u8 = 100; - let _ = self.load().await; - let vks = viewing_keys; - let fvks: Vec<_> = vks - .values() - .map(|fvk| ExtendedFullViewingKey::from(*fvk).fvk.vk) - .collect(); - // Required for filtering out rejected transactions from Tendermint - // responses - let block_results = rpc::query_results(client).await?; - self.fetch::<_, _, _, LedgerMaspClient>( - client, - &DefaultTracker::new(io), - RetryStrategy::Forever, - None, - None, - 1, - &[], - &fvks, - ) - .await?; - // Save the update state so that future fetches can be short-circuited - let _ = self.save().await; - - let mut transfers = self.get_tx_deltas().clone(); - // Construct the set of addresses relevant to user's query - let relevant_addrs = match &query_owner { - Either::Left(BalanceOwner::Address(owner)) => vec![owner.clone()], - // MASP objects are dealt with outside of tx_search - Either::Left(BalanceOwner::FullViewingKey(_viewing_key)) => vec![], - Either::Left(BalanceOwner::PaymentAddress(_owner)) => vec![], - // Unspecified owner means all known addresses are considered - // relevant - Either::Right(addrs) => addrs.clone(), - }; - // Find all transactions to or from the relevant address set - for addr in relevant_addrs { - for prop in ["transfer.source", "transfer.target"] { - // Query transactions involving the current address - let mut tx_query = Query::eq(prop, addr.encode()); - // Elaborate the query if requested by the user - if let Some(token) = &query_token { - tx_query = - tx_query.and_eq("transfer.token", token.encode()); - } - for page in 1.. { - let txs = &client - .tx_search( - tx_query.clone(), - true, - page, - TXS_PER_PAGE, - Order::Ascending, - ) - .await - .map_err(|e| { - Error::from(QueryError::General(format!( - "for transaction: {e}" - ))) - })? - .txs; - for response_tx in txs { - let height = BlockHeight(response_tx.height.value()); - let idx = TxIndex(response_tx.index); - // Only process yet unprocessed transactions which have - // been accepted by node VPs - // TODO: Check that wrappers shouldn't be considered - // here - let should_process = - !transfers.contains_key(&IndexedTx { - height, - index: idx, - is_wrapper: false, - }) && block_results[u64::from(height) as usize] - .is_accepted(idx.0 as usize); - if !should_process { - continue; - } - let tx = Tx::try_from(response_tx.tx.as_ref()) - .map_err(|e| Error::Other(e.to_string()))?; - let mut wrapper = None; - let mut transfer = None; - extract_payload(tx, &mut wrapper, &mut transfer)?; - // Epoch data is not needed for transparent transactions - let epoch = - wrapper.map(|x| x.epoch).unwrap_or_default(); - if let Some(transfer) = transfer { - // Skip MASP addresses as they are already handled - // by ShieldedContext - if transfer.source == MASP - || transfer.target == MASP - { - continue; - } - // Describe how a Transfer simply subtracts from one - // account and adds the same to another - - let delta = TransferDelta::from([( - transfer.source.clone(), - MaspChange { - asset: transfer.token.clone(), - change: -transfer.amount.amount().change(), - }, - )]); - - // No shielded accounts are affected by this - // Transfer - transfers.insert( - IndexedTx { - height, - index: idx, - is_wrapper: false, - }, - (epoch, delta, TransactionDelta::new()), - ); - } - } - // An incomplete page signifies no more transactions - if (txs.len() as u8) < TXS_PER_PAGE { - break; - } - } - } - } - Ok(transfers) - } } #[cfg(test)] @@ -2049,9 +1590,7 @@ mod shielded_ctx_tests { use core::str::FromStr; use masp_primitives::zip32::ExtendedFullViewingKey; - use namada_core::address::InternalAddress; use namada_core::masp::ExtendedViewingKey; - use namada_core::storage::Key; use tempfile::tempdir; use super::*; @@ -2071,19 +1610,8 @@ mod shielded_ctx_tests { /// making these things is a misery not worth my time. /// /// This a tx sending 1 BTC from Albert to Albert's PA - fn arbitrary_masp_tx() -> (Transaction, BTreeSet) { - const ALBERT: &str = "tnam1qxfj3sf6a0meahdu9t6znp05g8zx4dkjtgyn9gfu"; - const BTC: &str = "tnam1qy88jaykzw8tay6svmu6kkxxj5xd53w6qvqkw20u"; - let albert = Address::from_str(ALBERT).unwrap(); - let btc = Address::from_str(BTC).unwrap(); - let mut changed_keys = BTreeSet::default(); - changed_keys.insert(balance_key(&btc, &albert)); - changed_keys.insert(balance_key( - &btc, - &Address::Internal(InternalAddress::Masp), - )); - - let tx = Transaction::try_from_slice(&[ + fn arbitrary_masp_tx() -> Transaction { + Transaction::try_from_slice(&[ 2, 0, 0, 0, 10, 39, 167, 38, 166, 117, 255, 233, 0, 0, 0, 0, 255, 255, 255, 255, 1, 162, 120, 217, 193, 173, 117, 92, 126, 107, 199, 182, 72, 95, 60, 122, 52, 9, 134, 72, 4, 167, 41, 187, 171, 17, @@ -2166,8 +1694,7 @@ mod shielded_ctx_tests { 132, 19, 106, 221, 246, 176, 100, 20, 114, 26, 55, 163, 14, 173, 255, 121, 181, 58, 121, 140, 3, ]) - .expect("Test failed"); - (tx, changed_keys) + .expect("Test failed") } /// Test that if fetching fails before finishing, @@ -2212,7 +1739,7 @@ mod shielded_ctx_tests { // We now have a fetch failure followed by two successful // masp txs from the same block. - let (masp_tx, changed_keys) = arbitrary_masp_tx(); + let masp_tx = arbitrary_masp_tx(); masp_tx_sender.send(None).expect("Test failed"); masp_tx_sender .send(Some(( @@ -2221,7 +1748,7 @@ mod shielded_ctx_tests { index: TxIndex(1), is_wrapper: false, }, - (Default::default(), changed_keys.clone(), masp_tx.clone()), + masp_tx.clone(), ))) .expect("Test failed"); masp_tx_sender @@ -2231,7 +1758,7 @@ mod shielded_ctx_tests { index: TxIndex(2), is_wrapper: false, }, - (Default::default(), changed_keys, masp_tx.clone()), + masp_tx.clone(), ))) .expect("Test failed"); @@ -2296,7 +1823,7 @@ mod shielded_ctx_tests { ) .fvk .vk; - let (masp_tx, changed_keys) = arbitrary_masp_tx(); + let masp_tx = arbitrary_masp_tx(); // first fetch no blocks masp_tx_sender.send(None).expect("Test failed"); @@ -2323,7 +1850,7 @@ mod shielded_ctx_tests { index: Default::default(), is_wrapper: false, }, - (Default::default(), changed_keys.clone(), masp_tx.clone()), + masp_tx.clone(), ))) .expect("Test failed"); masp_tx_sender.send(None).expect("Test failed"); @@ -2386,7 +1913,7 @@ mod shielded_ctx_tests { index: Default::default(), is_wrapper: false, }, - (Default::default(), changed_keys.clone(), masp_tx.clone()), + masp_tx.clone(), ))) .expect("Test failed"); masp_tx_sender @@ -2396,7 +1923,7 @@ mod shielded_ctx_tests { index: Default::default(), is_wrapper: false, }, - (Default::default(), changed_keys.clone(), masp_tx.clone()), + masp_tx.clone(), ))) .expect("Test failed"); // this should not produce an error since we have fetched @@ -2436,7 +1963,7 @@ mod shielded_ctx_tests { .vk; // the fetched txs - let (masp_tx, changed_keys) = arbitrary_masp_tx(); + let masp_tx = arbitrary_masp_tx(); masp_tx_sender .send(Some(( IndexedTx { @@ -2444,7 +1971,7 @@ mod shielded_ctx_tests { index: TxIndex(1), is_wrapper: false, }, - (Default::default(), changed_keys.clone(), masp_tx.clone()), + masp_tx.clone(), ))) .expect("Test failed"); masp_tx_sender @@ -2454,7 +1981,7 @@ mod shielded_ctx_tests { index: TxIndex(2), is_wrapper: false, }, - (Default::default(), changed_keys.clone(), masp_tx.clone()), + masp_tx.clone(), ))) .expect("Test failed"); @@ -2506,7 +2033,7 @@ mod shielded_ctx_tests { .vk; // Fetch a large number of MASP notes - let (masp_tx, changed_keys) = arbitrary_masp_tx(); + let masp_tx = arbitrary_masp_tx(); for h in 1..20 { masp_tx_sender .send(Some(( @@ -2515,7 +2042,7 @@ mod shielded_ctx_tests { index: TxIndex(1), is_wrapper: false, }, - (Default::default(), changed_keys.clone(), masp_tx.clone()), + masp_tx.clone(), ))) .expect("Test failed"); } @@ -2565,7 +2092,7 @@ mod shielded_ctx_tests { index: TxIndex(1), is_wrapper: false, }, - (Default::default(), changed_keys.clone(), masp_tx.clone()), + masp_tx.clone(), ))) .expect("Test failed"); } diff --git a/crates/sdk/src/masp/types.rs b/crates/sdk/src/masp/types.rs index 6ea87627cd..749ac84b9c 100644 --- a/crates/sdk/src/masp/types.rs +++ b/crates/sdk/src/masp/types.rs @@ -35,16 +35,10 @@ use crate::error::Error; use crate::masp::{ShieldedContext, ShieldedUtils}; /// Type alias for convenience and profit -pub type IndexedNoteData = BTreeMap< - IndexedTx, - (Epoch, BTreeSet, Transaction), ->; +pub type IndexedNoteData = BTreeMap; /// Type alias for the entries of [`IndexedNoteData`] iterators -pub type IndexedNoteEntry = ( - IndexedTx, - (Epoch, BTreeSet, Transaction), -); +pub type IndexedNoteEntry = (IndexedTx, Transaction); /// Represents the amount used of different conversions pub type Conversions = @@ -83,7 +77,7 @@ impl Authorization for PartialAuthorized { #[derive(Clone, Debug, BorshSerialize, BorshDeserialize, BorshDeserializer)] pub struct ShieldedTransfer { /// Shielded transfer builder - pub builder: Builder<(), (), ExtendedFullViewingKey, ()>, + pub builder: Builder<(), ExtendedFullViewingKey, ()>, /// MASP transaction pub masp_tx: Transaction, /// Metadata @@ -108,10 +102,8 @@ pub struct MaspTokenRewardData { /// and / or the main payload. #[derive(Debug, Clone)] pub(super) struct ExtractedMaspTx { - pub(crate) fee_unshielding: - Option<(BTreeSet, Transaction)>, - pub(crate) inner_tx: - Option<(BTreeSet, Transaction)>, + pub(crate) fee_unshielding: Option, + pub(crate) inner_tx: Option, } /// MASP verifying keys @@ -227,9 +219,7 @@ impl ScannedData { /// is not parallelizable). pub struct DecryptedData { pub tx: Transaction, - pub keys: BTreeSet, pub delta: TransactionDelta, - pub epoch: Epoch, } /// A cache of decrypted txs that have not yet been @@ -392,19 +382,9 @@ impl } } -impl - MapBuilder< - P1, - R1, - ExtendedSpendingKey, - N1, - (), - (), - ExtendedFullViewingKey, - (), - > for WalletMap +impl + MapBuilder + for WalletMap { - fn map_rng(&self, _s: R1) {} - fn map_notifier(&self, _s: N1) {} } diff --git a/crates/sdk/src/masp/utils.rs b/crates/sdk/src/masp/utils.rs index 6c529d8497..a65737562b 100644 --- a/crates/sdk/src/masp/utils.rs +++ b/crates/sdk/src/masp/utils.rs @@ -1,5 +1,4 @@ -use core::str::FromStr; -use std::collections::{BTreeMap, BTreeSet}; +use std::collections::BTreeMap; use std::env; use std::marker::PhantomData; use std::path::PathBuf; @@ -14,12 +13,13 @@ use masp_primitives::transaction::components::I128Sum; use masp_primitives::transaction::Transaction; use masp_primitives::zip32::{ExtendedFullViewingKey, ExtendedSpendingKey}; use masp_proofs::prover::LocalTxProver; -use namada_core::address::Address; use namada_core::collections::HashMap; use namada_core::storage::{BlockHeight, IndexedTx, TxIndex}; use namada_core::token::Transfer; +use namada_events::extend::{ + ReadFromEventAttributes, ValidMaspTx as ValidMaspTxAttr, +}; use namada_ibc::IbcMessage; -use namada_tx::data::{TxResult, WrapperTx}; use namada_tx::Tx; use rand_core::{CryptoRng, RngCore}; use tokio::sync::mpsc::{Receiver, Sender}; @@ -33,7 +33,6 @@ use crate::masp::types::{ }; use crate::masp::{ENV_VAR_MASP_PARAMS_DIR, VERIFIYING_KEYS}; use crate::queries::Client; -use crate::rpc::query_epoch_at_height; use crate::{MaybeSend, MaybeSync}; /// Make sure the MASP params are present and load verifying keys into memory @@ -122,18 +121,6 @@ pub(super) fn cloned_pair((a, b): (&T, &U)) -> (T, U) { (a.clone(), b.clone()) } -/// Extract the payload from the given Tx object -pub(super) fn extract_payload( - tx: Tx, - wrapper: &mut Option, - transfer: &mut Option, -) -> Result<(), Error> { - *wrapper = tx.header.wrapper(); - let _ = tx.data().map(|signed| { - Transfer::try_from_slice(&signed[..]).map(|tfer| *transfer = Some(tfer)) - }); - Ok(()) -} /// Retrieves all the indexes and tx events at the specified height which refer /// to a valid MASP transaction. If an index is given, it filters only the @@ -142,7 +129,7 @@ pub(super) async fn get_indexed_masp_events_at_height( client: &C, height: BlockHeight, first_idx_to_query: Option, -) -> Result>, Error> { +) -> Result>, Error> { let first_idx_to_query = first_idx_to_query.unwrap_or_default(); Ok(client @@ -154,69 +141,26 @@ pub(super) async fn get_indexed_masp_events_at_height( events .into_iter() .filter_map(|event| { - let tx_index = - event.attributes.iter().find_map(|attribute| { - if attribute.key == "is_valid_masp_tx" { - Some(TxIndex( - u32::from_str(&attribute.value).unwrap(), - )) - } else { - None - } - }); - - match tx_index { - Some(idx) => { - if idx >= first_idx_to_query { - Some((idx, event)) - } else { - None - } - } - None => None, + let tx_index = ValidMaspTxAttr::read_from_event_attributes( + &event.attributes, + ) + .ok()?; + + if tx_index >= first_idx_to_query { + Some(tx_index) + } else { + None } }) .collect::>() })) } -pub(super) enum ExtractShieldedActionArg<'args, C: Client> { - Event(&'args crate::tendermint::abci::Event), - Request((&'args C, BlockHeight, Option)), -} - /// Extract the relevant shielded portions of a [`Tx`], if any. -pub(super) async fn extract_masp_tx<'args, C: Client + Sync>( +pub(super) async fn extract_masp_tx( tx: &Tx, - action_arg: ExtractShieldedActionArg<'args, C>, check_header: bool, ) -> Result { - // We use the changed keys instead of the Transfer object - // because those are what the masp validity predicate works on - let (wrapper_changed_keys, changed_keys) = - if let ExtractShieldedActionArg::Event(tx_event) = action_arg { - let tx_result_str = tx_event - .attributes - .iter() - .find_map(|attr| { - if attr.key == "inner_tx" { - Some(&attr.value) - } else { - None - } - }) - .ok_or_else(|| { - Error::Other( - "Missing required tx result in event".to_string(), - ) - })?; - let result = TxResult::from_str(tx_result_str) - .map_err(|e| Error::Other(e.to_string()))?; - (result.wrapper_changed_keys, result.changed_keys) - } else { - (Default::default(), Default::default()) - }; - let tx_header = tx.header(); // NOTE: simply looking for masp sections attached to the tx // is not safe. We don't validate the sections attached to a @@ -240,7 +184,7 @@ pub(super) async fn extract_masp_tx<'args, C: Client + Sync>( Error::Other("Missing masp transaction".to_string()) })?; - Some((wrapper_changed_keys, masp_transaction)) + Some(masp_transaction) } else { None }; @@ -250,17 +194,15 @@ pub(super) async fn extract_masp_tx<'args, C: Client + Sync>( .data() .ok_or_else(|| Error::Other("Missing data section".to_string()))?; let maybe_masp_tx = match Transfer::try_from_slice(&tx_data) { - Ok(transfer) => Some((changed_keys, transfer)), + Ok(transfer) => Some(transfer), Err(_) => { // This should be a MASP over IBC transaction, it // could be a ShieldedTransfer or an Envelope // message, need to try both - extract_payload_from_shielded_action::(&tx_data, action_arg) - .await - .ok() + extract_payload_from_shielded_action(&tx_data).await.ok() } } - .map(|(changed_keys, transfer)| { + .map(|transfer| { if let Some(hash) = transfer.shielded { let masp_tx = tx .get_section(&hash) @@ -274,7 +216,7 @@ pub(super) async fn extract_masp_tx<'args, C: Client + Sync>( Error::Other("Missing masp transaction".to_string()) })?; - Ok::<_, Error>(Some((changed_keys, masp_tx))) + Ok::<_, Error>(Some(masp_tx)) } else { Ok(None) } @@ -289,63 +231,33 @@ pub(super) async fn extract_masp_tx<'args, C: Client + Sync>( } /// Extract the changed keys and Transaction hash from a MASP over ibc message -pub(super) async fn extract_payload_from_shielded_action< - 'args, - C: Client + Sync, ->( +pub(super) async fn extract_payload_from_shielded_action( tx_data: &[u8], - args: ExtractShieldedActionArg<'args, C>, -) -> Result<(BTreeSet, Transfer), Error> { +) -> Result { let message = namada_ibc::decode_message(tx_data) .map_err(|e| Error::Other(e.to_string()))?; let result = match message { - IbcMessage::Transfer(msg) => { - let tx_result = get_sending_result(args)?; - - let transfer = msg.transfer.ok_or_else(|| { - Error::Other("Missing masp tx in the ibc message".to_string()) - })?; - - (tx_result.changed_keys, transfer) - } - IbcMessage::NftTransfer(msg) => { - let tx_result = get_sending_result(args)?; - - let transfer = msg.transfer.ok_or_else(|| { - Error::Other("Missing masp tx in the ibc message".to_string()) - })?; - - (tx_result.changed_keys, transfer) - } - IbcMessage::RecvPacket(msg) => { - let tx_result = get_receiving_result(args).await?; - - let transfer = msg.transfer.ok_or_else(|| { - Error::Other("Missing masp tx in the ibc message".to_string()) - })?; - - (tx_result.changed_keys, transfer) - } + IbcMessage::Transfer(msg) => msg.transfer.ok_or_else(|| { + Error::Other("Missing masp tx in the ibc message".to_string()) + })?, + IbcMessage::NftTransfer(msg) => msg.transfer.ok_or_else(|| { + Error::Other("Missing masp tx in the ibc message".to_string()) + })?, + IbcMessage::RecvPacket(msg) => msg.transfer.ok_or_else(|| { + Error::Other("Missing masp tx in the ibc message".to_string()) + })?, IbcMessage::AckPacket(msg) => { // Refund tokens by the ack message - let tx_result = get_receiving_result(args).await?; - - let transfer = msg.transfer.ok_or_else(|| { + msg.transfer.ok_or_else(|| { Error::Other("Missing masp tx in the ibc message".to_string()) - })?; - - (tx_result.changed_keys, transfer) + })? } IbcMessage::Timeout(msg) => { // Refund tokens by the timeout message - let tx_result = get_receiving_result(args).await?; - - let transfer = msg.transfer.ok_or_else(|| { + msg.transfer.ok_or_else(|| { Error::Other("Missing masp tx in the ibc message".to_string()) - })?; - - (tx_result.changed_keys, transfer) + })? } IbcMessage::Envelope(_) => { return Err(Error::Other( @@ -357,78 +269,6 @@ pub(super) async fn extract_payload_from_shielded_action< Ok(result) } -fn get_sending_result( - args: ExtractShieldedActionArg<'_, C>, -) -> Result { - let tx_event = match args { - ExtractShieldedActionArg::Event(event) => event, - ExtractShieldedActionArg::Request(_) => { - return Err(Error::Other( - "Unexpected event request for ShieldedTransfer".to_string(), - )); - } - }; - - get_tx_result(tx_event) -} - -async fn get_receiving_result( - args: ExtractShieldedActionArg<'_, C>, -) -> Result { - let tx_event = match args { - ExtractShieldedActionArg::Event(event) => { - std::borrow::Cow::Borrowed(event) - } - ExtractShieldedActionArg::Request((client, height, index)) => { - std::borrow::Cow::Owned( - get_indexed_masp_events_at_height(client, height, index) - .await? - .ok_or_else(|| { - Error::Other(format!( - "Missing required ibc event at block height {}", - height - )) - })? - .first() - .ok_or_else(|| { - Error::Other(format!( - "Missing required ibc event at block height {}", - height - )) - })? - .1 - .to_owned(), - ) - } - }; - - get_tx_result(&tx_event) -} - -fn get_tx_result( - tx_event: &crate::tendermint::abci::Event, -) -> Result { - tx_event - .attributes - .iter() - .find_map(|attribute| { - if attribute.key == "inner_tx" { - let tx_result = TxResult::from_str(&attribute.value) - .expect("The event value should be parsable"); - Some(tx_result) - } else { - None - } - }) - .ok_or_else(|| { - Error::Other( - "Couldn't find changed keys in the event for the provided \ - transaction" - .to_string(), - ) - }) -} - /// The updates to the commitment tree and witness maps /// fetched at the beginning of shielded-sync. pub struct CommitmentTreeUpdates { @@ -528,7 +368,7 @@ where note_map_delta: Default::default(), }; let mut note_pos = updates.commitment_tree.size(); - for (indexed_tx, (_, _, ref shielded)) in tx_receiver { + for (indexed_tx, ref shielded) in tx_receiver { updates.note_map_delta.insert(indexed_tx, note_pos); for so in shielded .sapling_bundle() @@ -582,16 +422,7 @@ where fetch_iter.next(); continue; } - // Get the valid masp transactions at the specified height - let epoch = query_epoch_at_height(self.client, height.into()) - .await? - .ok_or_else(|| { - Error::from(QueryError::General( - "Queried height is greater than the last committed \ - block height" - .to_string(), - )) - })?; + let txs_results = match get_indexed_masp_events_at_height::( self.client, height.into(), @@ -619,37 +450,32 @@ where .block .data; - for (idx, tx_event) in txs_results { + for idx in txs_results { let tx = Tx::try_from(block[idx.0 as usize].as_ref()) .map_err(|e| Error::Other(e.to_string()))?; let ExtractedMaspTx { fee_unshielding, inner_tx, - } = extract_masp_tx::( - &tx, - ExtractShieldedActionArg::Event(&tx_event), - true, - ) - .await?; - if let Some((changed_keys, masp_transaction)) = fee_unshielding - { + } = extract_masp_tx(&tx, true).await?; + // Collect the current transaction(s) + if let Some(masp_transaction) = fee_unshielding { tx_sender.send(( IndexedTx { height: height.into(), index: idx, is_wrapper: true, }, - (epoch, changed_keys, masp_transaction), + masp_transaction, )); } - if let Some((changed_keys, masp_transaction)) = inner_tx { + if let Some(masp_transaction) = inner_tx { tx_sender.send(( IndexedTx { height: height.into(), index: idx, is_wrapper: false, }, - (epoch, changed_keys, masp_transaction), + masp_transaction, )); } } @@ -801,7 +627,7 @@ impl TaskManager { /// Run all actions scheduled by the scanning thread until /// that process indicates it has finished. - pub async fn run(&mut self, native_token: &Address) -> Result<(), Error> { + pub async fn run(&mut self) -> Result<(), Error> { while let Some(action) = self.action.recv().await { match action { // On completion, update the height to which all keys have been @@ -818,7 +644,7 @@ impl TaskManager { *h = std::cmp::max(*h, Some(self.latest_idx)); } // updated the spent notes and balances - locked.nullify_spent_notes(native_token)?; + locked.nullify_spent_notes()?; _ = locked.save().await; } return Ok(()); From 890c486cb7d17c7e25ad3850b5a645a79eb58d4c Mon Sep 17 00:00:00 2001 From: satan Date: Mon, 27 May 2024 12:26:33 +0200 Subject: [PATCH 19/29] Fixing conflicts from rebasing --- crates/apps_lib/src/client/masp.rs | 2 +- crates/benches/native_vps.rs | 4 +- crates/node/src/bench_utils.rs | 11 +- crates/sdk/src/masp/mod.rs | 110 +++++++++-------- crates/sdk/src/masp/shielded_ctx.rs | 184 ++++++++++++---------------- crates/sdk/src/masp/test_utils.rs | 5 +- crates/sdk/src/masp/types.rs | 14 +-- crates/sdk/src/masp/utils.rs | 178 +++++++++++++-------------- crates/sdk/src/tx.rs | 2 +- crates/tx/src/types.rs | 2 + 10 files changed, 244 insertions(+), 268 deletions(-) diff --git a/crates/apps_lib/src/client/masp.rs b/crates/apps_lib/src/client/masp.rs index 46aff6695d..f5bc676ecd 100644 --- a/crates/apps_lib/src/client/masp.rs +++ b/crates/apps_lib/src/client/masp.rs @@ -40,7 +40,7 @@ pub async fn syncing< let logger = CliProgressTracker::new(io); let sync = async move { shielded - .fetch::<_, _, _, LedgerMaspClient>( + .fetch::<_, _, _, LedgerMaspClient<'_, C>>( client, &logger, RetryStrategy::Forever, diff --git a/crates/benches/native_vps.rs b/crates/benches/native_vps.rs index 5f2b4455e4..251c865b72 100644 --- a/crates/benches/native_vps.rs +++ b/crates/benches/native_vps.rs @@ -1531,7 +1531,7 @@ fn parameters(c: &mut Criterion) { let mut tx = Tx::from_type(namada::tx::data::TxType::Raw); tx.set_data(namada::tx::Data::new(borsh::to_vec(&0).unwrap())); let verifiers_from_tx = BTreeSet::default(); - let cmt = tx.first_commitments().unwrap().clone(); + let cmt = *tx.first_commitments().unwrap(); let batched_tx = tx.batch_tx(cmt); (verifiers_from_tx, batched_tx) } @@ -1605,7 +1605,7 @@ fn pos(c: &mut Criterion) { let mut tx = Tx::from_type(namada::tx::data::TxType::Raw); tx.set_data(namada::tx::Data::new(borsh::to_vec(&0).unwrap())); let verifiers_from_tx = BTreeSet::default(); - let cmt = tx.first_commitments().unwrap().clone(); + let cmt = *tx.first_commitments().unwrap(); let batched_tx = tx.batch_tx(cmt); (verifiers_from_tx, batched_tx) } diff --git a/crates/node/src/bench_utils.rs b/crates/node/src/bench_utils.rs index e49c1d184b..8fd9cef1f1 100644 --- a/crates/node/src/bench_utils.rs +++ b/crates/node/src/bench_utils.rs @@ -90,9 +90,8 @@ use namada_apps_lib::cli; use namada_apps_lib::cli::context::FromContext; use namada_apps_lib::cli::Context; use namada_apps_lib::wallet::{defaults, CliWalletUtils}; -use namada_sdk::masp::{ - self, ContextSyncStatus, ShieldedContext, ShieldedTransfer, ShieldedUtils, -}; +use namada_sdk::masp::types::{ContextSyncStatus, ShieldedTransfer}; +use namada_sdk::masp::{self, ShieldedContext, ShieldedUtils}; pub use namada_sdk::tx::{ TX_BECOME_VALIDATOR_WASM, TX_BOND_WASM, TX_BRIDGE_POOL_WASM, TX_CHANGE_COMMISSION_WASM as TX_CHANGE_VALIDATOR_COMMISSION_WASM, @@ -337,7 +336,7 @@ impl BenchShell { ))); } - let cmt = tx.first_commitments().unwrap().clone(); + let cmt = *tx.first_commitments().unwrap(); tx.batch_tx(cmt) } @@ -358,7 +357,7 @@ impl BenchShell { tx.set_data(Data::new(data)); // NOTE: the Ibc VP doesn't actually check the signature - let cmt = tx.first_commitments().unwrap().clone(); + let cmt = *tx.first_commitments().unwrap(); tx.batch_tx(cmt) } @@ -641,7 +640,7 @@ pub fn generate_foreign_key_tx(signer: &SecretKey) -> BatchedTx { None, ))); - let cmt = tx.first_commitments().unwrap().clone(); + let cmt = *tx.first_commitments().unwrap(); tx.batch_tx(cmt) } diff --git a/crates/sdk/src/masp/mod.rs b/crates/sdk/src/masp/mod.rs index e7acefcfd3..03be8aaf6e 100644 --- a/crates/sdk/src/masp/mod.rs +++ b/crates/sdk/src/masp/mod.rs @@ -14,9 +14,9 @@ use std::path::PathBuf; use borsh::{BorshDeserialize, BorshSerialize}; use lazy_static::lazy_static; #[cfg(feature = "mainnet")] -use masp_primitives::consensus::MainNetwork; +use masp_primitives::consensus::MainNetwork as Network; #[cfg(not(feature = "mainnet"))] -use masp_primitives::consensus::TestNetwork; +use masp_primitives::consensus::TestNetwork as Network; use masp_primitives::group::GroupEncoding; use masp_primitives::sapling::redjubjub::PublicKey; use masp_primitives::transaction::components::transparent::builder::TransparentBuilder; @@ -57,10 +57,7 @@ pub const ENV_VAR_MASP_PARAMS_DIR: &str = "NAMADA_MASP_PARAMS_DIR"; pub const ENV_VAR_MASP_TEST_SEED: &str = "NAMADA_MASP_TEST_SEED"; /// The network to use for MASP -#[cfg(feature = "mainnet")] -const NETWORK: MainNetwork = MainNetwork; -#[cfg(not(feature = "mainnet"))] -const NETWORK: TestNetwork = TestNetwork; +const NETWORK: Network = Network; // TODO these could be exported from masp_proof crate /// Spend circuit name @@ -451,7 +448,6 @@ pub mod testing { use super::*; use crate::address::testing::arb_address; use crate::masp::types::{ShieldedTransfer, WalletMap}; - use crate::masp::utils::find_valid_diversifier; use crate::masp_primitives::consensus::BranchId; use crate::masp_primitives::constants::VALUE_COMMITMENT_RANDOMNESS_GENERATOR; use crate::masp_primitives::merkle_tree::FrozenCommitmentTree; @@ -568,8 +564,13 @@ pub mod testing { sighash_value, binding_sig, |bvk, msg, binding_sig| { + // Compute the signature's message for bvk/binding_sig + let mut data_to_be_signed = [0u8; 64]; + data_to_be_signed[0..32].copy_from_slice(&bvk.0.to_bytes()); + data_to_be_signed[32..64].copy_from_slice(msg); + bvk.verify_with_zip216( - &msg, + &data_to_be_signed, &binding_sig, VALUE_COMMITMENT_RANDOMNESS_GENERATOR, self.zip216_enabled, @@ -579,8 +580,8 @@ pub mod testing { } } - // This function computes `value` in the exponent of the value commitment - // base + /// This function computes `value` in the exponent of the value commitment + /// base fn masp_compute_value_balance( asset_type: AssetType, value: i128, @@ -610,8 +611,8 @@ pub mod testing { Some(value_balance.into()) } - // A context object for creating the Sapling components of a Zcash - // transaction. + /// A context object for creating the Sapling components of a Zcash + /// transaction. pub struct SaplingProvingContext { bsk: jubjub::Fr, // (sum of the Spend value commitments) - (sum of the Output value @@ -619,9 +620,9 @@ pub mod testing { cv_sum: jubjub::ExtendedPoint, } - // An implementation of TxProver that does everything except generating - // valid zero-knowledge proofs. Uses the supplied source of randomness to - // carry out its operations. + /// An implementation of TxProver that does everything except generating + /// valid zero-knowledge proofs. Uses the supplied source of randomness to + /// carry out its operations. pub struct MockTxProver(pub Mutex); impl TxProver for MockTxProver { @@ -828,7 +829,7 @@ pub mod testing { } #[derive(Debug, Clone)] - // Adapts a CSPRNG from a PRNG for proptesting + /// Adapts a CSPRNG from a PRNG for proptesting pub struct TestCsprng(R); impl CryptoRng for TestCsprng {} @@ -855,14 +856,14 @@ pub mod testing { } prop_compose! { - // Expose a random number generator + /// Expose a random number generator pub fn arb_rng()(rng in Just(()).prop_perturb(|(), rng| rng)) -> TestRng { rng } } prop_compose! { - // Generate an arbitrary output description with the given value + /// Generate an arbitrary output description with the given value pub fn arb_output_description( asset_type: AssetType, value: u64, @@ -884,13 +885,13 @@ pub mod testing { } prop_compose! { - // Generate an arbitrary spend description with the given value + /// Generate an arbitrary spend description with the given value pub fn arb_spend_description( asset_type: AssetType, value: u64, )( address in arb_transparent_address(), - expiration_height in arb_height(BranchId::MASP, &TestNetwork), + expiration_height in arb_height(BranchId::MASP, &Network), mut rng in arb_rng().prop_map(TestCsprng), bparams_rng in arb_rng().prop_map(TestCsprng), prover_rng in arb_rng().prop_map(TestCsprng), @@ -905,7 +906,7 @@ pub mod testing { .to_payment_address(div) .expect("a PaymentAddress"); - let mut builder = Builder::::new( + let mut builder = Builder::::new( NETWORK, // NOTE: this is going to add 20 more blocks to the actual // expiration but there's no other exposed function that we could @@ -944,7 +945,7 @@ pub mod testing { } prop_compose! { - // Generate an arbitrary MASP denomination + /// Generate an arbitrary MASP denomination pub fn arb_masp_digit_pos()(denom in 0..4u8) -> MaspDigitPos { MaspDigitPos::from(denom) } @@ -956,8 +957,8 @@ pub mod testing { const MAX_SPLITS: usize = 3; prop_compose! { - // Arbitrarily partition the given vector of integers into sets and sum - // them + /// Arbitrarily partition the given vector of integers into sets and sum + /// them pub fn arb_partition(values: Vec)(buckets in ((!values.is_empty()) as usize)..=values.len())( values in Just(values.clone()), assigns in collection::vec(0..buckets, values.len()), @@ -972,8 +973,8 @@ pub mod testing { } prop_compose! { - // Generate arbitrary spend descriptions with the given asset type - // partitioning the given values + /// Generate arbitrary spend descriptions with the given asset type + /// partitioning the given values pub fn arb_spend_descriptions( asset: AssetData, values: Vec, @@ -995,8 +996,8 @@ pub mod testing { } prop_compose! { - // Generate arbitrary output descriptions with the given asset type - // partitioning the given values + /// Generate arbitrary output descriptions with the given asset type + /// partitioning the given values pub fn arb_output_descriptions( asset: AssetData, values: Vec, @@ -1018,8 +1019,8 @@ pub mod testing { } prop_compose! { - // Generate arbitrary spend descriptions with the given asset type - // partitioning the given values + /// Generate arbitrary spend descriptions with the given asset type + /// partitioning the given values pub fn arb_txouts( asset: AssetData, values: Vec, @@ -1043,7 +1044,7 @@ pub mod testing { } prop_compose! { - // Generate an arbitrary shielded MASP transaction builder + /// Generate an arbitrary shielded MASP transaction builder pub fn arb_shielded_builder(asset_range: impl Into)( assets in collection::hash_map( arb_pre_asset_type(), @@ -1051,7 +1052,7 @@ pub mod testing { asset_range, ), )( - expiration_height in arb_height(BranchId::MASP, &TestNetwork), + expiration_height in arb_height(BranchId::MASP, &Network), spend_descriptions in assets .iter() .map(|(asset, values)| arb_spend_descriptions(asset.clone(), values.clone())) @@ -1062,15 +1063,15 @@ pub mod testing { .collect::>(), assets in Just(assets), ) -> ( - Builder::, + Builder::, HashMap, ) { - let mut builder = Builder::::new( + let mut builder = Builder::::new( NETWORK, // NOTE: this is going to add 20 more blocks to the actual // expiration but there's no other exposed function that we could // use from the masp crate to specify the expiration better - expiration_height.unwrap() + expiration_height.unwrap(), ); let mut leaves = Vec::new(); // First construct a Merkle tree containing all notes to be used @@ -1090,7 +1091,7 @@ pub mod testing { } prop_compose! { - // Generate an arbitrary pre-asset type + /// Generate an arbitrary pre-asset type pub fn arb_pre_asset_type()( token in arb_address(), denom in arb_denomination(), @@ -1107,7 +1108,7 @@ pub mod testing { } prop_compose! { - // Generate an arbitrary shielding MASP transaction builder + /// Generate an arbitrary shielding MASP transaction builder pub fn arb_shielding_builder( source: TransparentAddress, asset_range: impl Into, @@ -1118,7 +1119,7 @@ pub mod testing { asset_range, ), )( - expiration_height in arb_height(BranchId::MASP, &TestNetwork), + expiration_height in arb_height(BranchId::MASP, &Network), txins in assets .iter() .map(|(asset, values)| arb_txouts(asset.clone(), values.clone(), source)) @@ -1129,10 +1130,10 @@ pub mod testing { .collect::>(), assets in Just(assets), ) -> ( - Builder::, + Builder::, HashMap, ) { - let mut builder = Builder::::new( + let mut builder = Builder::::new( NETWORK, // NOTE: this is going to add 20 more blocks to the actual // expiration but there's no other exposed function that we could @@ -1150,7 +1151,7 @@ pub mod testing { } prop_compose! { - // Generate an arbitrary deshielding MASP transaction builder + /// Generate an arbitrary deshielding MASP transaction builder pub fn arb_deshielding_builder( target: TransparentAddress, asset_range: impl Into, @@ -1161,7 +1162,7 @@ pub mod testing { asset_range, ), )( - expiration_height in arb_height(BranchId::MASP, &TestNetwork), + expiration_height in arb_height(BranchId::MASP, &Network), spend_descriptions in assets .iter() .map(|(asset, values)| arb_spend_descriptions(asset.clone(), values.clone())) @@ -1172,10 +1173,10 @@ pub mod testing { .collect::>(), assets in Just(assets), ) -> ( - Builder::, + Builder::, HashMap, ) { - let mut builder = Builder::::new( + let mut builder = Builder::::new( NETWORK, // NOTE: this is going to add 20 more blocks to the actual // expiration but there's no other exposed function that we could @@ -1200,7 +1201,7 @@ pub mod testing { } prop_compose! { - // Generate an arbitrary MASP shielded transfer + /// Generate an arbitrary MASP shielded transfer pub fn arb_shielded_transfer( asset_range: impl Into, )(asset_range in Just(asset_range.into()))( @@ -1226,7 +1227,7 @@ pub mod testing { } prop_compose! { - // Generate an arbitrary MASP shielded transfer + /// Generate an arbitrary MASP shielded transfer pub fn arb_shielding_transfer( source: TransparentAddress, asset_range: impl Into, @@ -1256,7 +1257,7 @@ pub mod testing { } prop_compose! { - // Generate an arbitrary MASP shielded transfer + /// Generate an arbitrary MASP shielded transfer pub fn arb_deshielding_transfer( target: TransparentAddress, asset_range: impl Into, @@ -1324,12 +1325,19 @@ pub mod fs { && convert_path.exists() && output_path.exists()) { - println!("MASP parameters not present, downloading..."); + #[allow(clippy::print_stdout)] + { + println!("MASP parameters not present, downloading..."); + } masp_proofs::download_masp_parameters(None) .expect("MASP parameters not present or downloadable"); - println!( - "MASP parameter download complete, resuming execution..." - ); + #[allow(clippy::print_stdout)] + { + println!( + "MASP parameter download complete, resuming \ + execution..." + ); + } } // Finally initialize a shielded context with the supplied directory diff --git a/crates/sdk/src/masp/shielded_ctx.rs b/crates/sdk/src/masp/shielded_ctx.rs index 4153092058..4c36e70c3d 100644 --- a/crates/sdk/src/masp/shielded_ctx.rs +++ b/crates/sdk/src/masp/shielded_ctx.rs @@ -1,3 +1,5 @@ +//! The main implementation of the shielded context. This acts +//! as wallet and client for the MASP use std::cmp::Ordering; use std::collections::{btree_map, BTreeMap, BTreeSet}; use std::convert::TryInto; @@ -17,6 +19,7 @@ use masp_primitives::sapling::{ Diversifier, Node, Note, Nullifier, ViewingKey, }; use masp_primitives::transaction::builder::Builder; +use masp_primitives::transaction::components::sapling::builder::RngBuildParams; use masp_primitives::transaction::components::{ I128Sum, OutputDescription, TxOut, U64Sum, ValueSum, }; @@ -24,17 +27,16 @@ use masp_primitives::transaction::fees::fixed::FeeRule; use masp_primitives::transaction::{ builder, Authorization, Authorized, Transaction, TransparentAddress, }; -use masp_primitives::transaction::components::sapling::builder::RngBuildParams; use masp_primitives::zip32::{ExtendedFullViewingKey, ExtendedSpendingKey}; use namada_core::address::Address; use namada_core::collections::{HashMap, HashSet}; use namada_core::masp::{ - encode_asset_type, AssetData, - TransferSource, TransferTarget, + encode_asset_type, AssetData, TransferSource, TransferTarget, }; -use namada_core::storage::{BlockHeight, Epoch, IndexedTx, TxIndex}; +use namada_core::storage::{BlockHeight, Epoch}; use namada_core::time::{DateTimeUtc, DurationSecs}; use namada_token::{self as token, Denomination, MaspDigitPos}; +use namada_tx::{IndexedTx, TxCommitments}; use rand_core::OsRng; use rayon::prelude::*; use ripemd::Digest as RipemdDigest; @@ -48,15 +50,13 @@ use crate::masp::types::{ Unscanned, WalletMap, }; use crate::masp::utils::{ - cloned_pair, fetch_channel, is_amount_required, - to_viewing_key, FetchQueueSender, - MaspClient, ProgressTracker, RetryStrategy, ShieldedUtils, TaskManager, + cloned_pair, fetch_channel, is_amount_required, to_viewing_key, + FetchQueueSender, MaspClient, ProgressTracker, RetryStrategy, + ShieldedUtils, TaskManager, }; use crate::masp::{Network, NETWORK}; use crate::queries::Client; -use crate::rpc::{ - query_block, query_conversion, query_denom, -}; +use crate::rpc::{query_block, query_conversion, query_denom}; use crate::{display_line, edisplay_line, rpc, MaybeSend, MaybeSync, Namada}; /// Represents the current state of the shielded pool from the perspective of @@ -290,9 +290,7 @@ impl ShieldedContext { /// * nullify notes that have been spent /// * update balances of each viewing key pub(super) fn nullify_spent_notes(&mut self) -> Result<(), Error> { - for ((_, _vk), decrypted_data) in - self.decrypted_note_cache.drain() - { + for ((_, _vk), decrypted_data) in self.decrypted_note_cache.drain() { let DecryptedData { tx: shielded, delta: mut transaction_delta, @@ -536,11 +534,9 @@ impl ShieldedContext { let required = value / threshold; // Forget about the trace amount left over because we cannot // realize its value - let trace = I128Sum::from_pair(asset_type, value % threshold) - .expect("the trace should be a valid i128"); + let trace = I128Sum::from_pair(asset_type, value % threshold); let normed_trace = - I128Sum::from_pair(normed_asset_type, value % threshold) - .expect("the trace should be a valid i128"); + I128Sum::from_pair(normed_asset_type, value % threshold); // Record how much more of the given conversion has been used *usage += required; // Apply the conversions to input and move the trace amount to output @@ -720,13 +716,7 @@ impl ShieldedContext { // The amount contributed by this note before conversion let pre_contr = - I128Sum::from_pair(note.asset_type, note.value as i128) - .map_err(|()| { - Error::Other( - "received note has invalid value or asset type" - .to_string(), - ) - })?; + I128Sum::from_pair(note.asset_type, note.value as i128); let (contr, normed_contr, proposed_convs) = self .compute_exchanged_amount( context.client(), @@ -807,12 +797,10 @@ impl ShieldedContext { res += ValueSum::from_pair( pre_asset_type.token, decoded_change, - ) - .expect("expected this to fit"); + ); } None => { - undecoded += ValueSum::from_pair(*asset_type, *val) - .expect("expected this to fit"); + undecoded += ValueSum::from_pair(*asset_type, *val); } _ => {} } @@ -842,11 +830,9 @@ impl ShieldedContext { res += MaspAmount::from_pair( (decoded.epoch, decoded.token), decoded_change, - ) - .expect("unable to construct decoded amount"); + ); } else { - undecoded += ValueSum::from_pair(*asset_type, *val) - .expect("expected this to fit"); + undecoded += ValueSum::from_pair(*asset_type, *val); } } (res, undecoded) @@ -865,8 +851,7 @@ impl ShieldedContext { if let Some(decoded) = self.decode_asset_type(client, *asset_type).await { - res += ValueSum::from_pair((*asset_type, decoded), *val) - .expect("unable to construct decoded amount"); + res += ValueSum::from_pair((*asset_type, decoded), *val); } } res @@ -917,17 +902,19 @@ impl ShieldedContext { #[allow(unused_mut)] let mut rng = StdRng::from_rng(OsRng).unwrap(); #[cfg(feature = "testing")] - let mut rng = if let Ok(seed) = std::env::var(super::ENV_VAR_MASP_TEST_SEED) - .map_err(|e| Error::Other(e.to_string())) - .and_then(|seed| { - let exp_str = format!( - "Env var {} must be a u64.", - super::ENV_VAR_MASP_TEST_SEED - ); - let parsed_seed: u64 = std::str::FromStr::from_str(&seed) - .map_err(|_| Error::Other(exp_str))?; - Ok(parsed_seed) - }) { + let mut rng = if let Ok(seed) = + std::env::var(super::ENV_VAR_MASP_TEST_SEED) + .map_err(|e| Error::Other(e.to_string())) + .and_then(|seed| { + let exp_str = format!( + "Env var {} must be a u64.", + super::ENV_VAR_MASP_TEST_SEED + ); + let parsed_seed: u64 = + std::str::FromStr::from_str(&seed) + .map_err(|_| Error::Other(exp_str))?; + Ok(parsed_seed) + }) { tracing::warn!( "UNSAFE: Using a seed from {} env var to build proofs.", super::ENV_VAR_MASP_TEST_SEED, @@ -1083,9 +1070,7 @@ impl ShieldedContext { // Annotate the asset type in the value balance with its decoding in // order to facilitate cross-epoch computations - let value_balance = builder.value_balance().map_err(|e| { - Error::Other(format!("unable to complete value balance: {}", e)) - })?; + let value_balance = builder.value_balance(); let value_balance = context .shielded_mut() .await @@ -1175,8 +1160,7 @@ impl ShieldedContext { // Convert the shortfall into a I128Sum let mut shortfall = I128Sum::zero(); for (asset_type, val) in asset_types.iter().zip(rem_amount) { - shortfall += I128Sum::from_pair(*asset_type, val.into()) - .expect("unable to construct value sum"); + shortfall += I128Sum::from_pair(*asset_type, val.into()); } // Return an insufficient funds error return Result::Err(TransferErr::from( @@ -1188,16 +1172,7 @@ impl ShieldedContext { if let Some(sk) = spending_key { // Represents the amount of inputs we are short by let mut additional = I128Sum::zero(); - for (asset_type, amt) in builder - .value_balance() - .map_err(|e| { - Error::Other(format!( - "unable to complete value balance: {}", - e - )) - })? - .components() - { + for (asset_type, amt) in builder.value_balance().components() { match amt.cmp(&0) { Ordering::Greater => { // Send the change in this asset type back to the sender @@ -1241,21 +1216,18 @@ impl ShieldedContext { let prover = context.shielded().await.utils.local_tx_prover(); #[cfg(feature = "testing")] let prover = super::testing::MockTxProver(std::sync::Mutex::new(OsRng)); - let (masp_tx, metadata) = - builder.build( - &prover, - &FeeRule::non_standard(U64Sum::zero()), - &mut rng, - &mut RngBuildParams::new(OsRng), - )?; + let (masp_tx, metadata) = builder.build( + &prover, + &FeeRule::non_standard(U64Sum::zero()), + &mut rng, + &mut RngBuildParams::new(OsRng), + )?; if update_ctx { // Cache the generated transfer let mut shielded_ctx = context.shielded_mut().await; shielded_ctx - .pre_cache_transaction( - context, &masp_tx, - ) + .pre_cache_transaction(context, &masp_tx) .await?; } @@ -1286,18 +1258,17 @@ impl ShieldedContext { let last_witnessed_tx = self.tx_note_map.keys().max(); // This data will be discarded at the next fetch so we don't need to // populate it accurately - let indexed_tx = last_witnessed_tx.map_or_else( - || IndexedTx { - height: BlockHeight::first(), - index: TxIndex(0), - is_wrapper: false, - }, - |indexed| IndexedTx { - height: indexed.height, - index: TxIndex(indexed.index.0 + 1), - is_wrapper: false, - }, - ); + let indexed_tx = + last_witnessed_tx.map_or_else(IndexedTx::default, |indexed| { + IndexedTx { + height: indexed.height, + index: indexed + .index + .checked_add(1) + .expect("Tx index shouldn't overflow"), + inner_tx: TxCommitments::default(), + } + }); self.sync_status = ContextSyncStatus::Speculative; let mut scanned_data = ScannedData::default(); for vk in vks { @@ -1591,6 +1562,7 @@ mod shielded_ctx_tests { use masp_primitives::zip32::ExtendedFullViewingKey; use namada_core::masp::ExtendedViewingKey; + use namada_core::storage::TxIndex; use tempfile::tempdir; use super::*; @@ -1717,7 +1689,7 @@ mod shielded_ctx_tests { // we first test that with no retries, a fetching failure // stops process let result = shielded_ctx - .fetch::<_, _, _, TestingMaspClient>( + .fetch::<_, _, _, TestingMaspClient<'_>>( &client, &progress, RetryStrategy::Times(1), @@ -1746,7 +1718,7 @@ mod shielded_ctx_tests { IndexedTx { height: 1.into(), index: TxIndex(1), - is_wrapper: false, + inner_tx: Default::default(), }, masp_tx.clone(), ))) @@ -1756,7 +1728,7 @@ mod shielded_ctx_tests { IndexedTx { height: 1.into(), index: TxIndex(2), - is_wrapper: false, + inner_tx: Default::default(), }, masp_tx.clone(), ))) @@ -1764,7 +1736,7 @@ mod shielded_ctx_tests { // This should complete successfully shielded_ctx - .fetch::<_, _, _, TestingMaspClient>( + .fetch::<_, _, _, TestingMaspClient<'_>>( &client, &progress, RetryStrategy::Times(2), @@ -1787,12 +1759,12 @@ mod shielded_ctx_tests { IndexedTx { height: 1.into(), index: TxIndex(1), - is_wrapper: false, + inner_tx: Default::default(), }, IndexedTx { height: 1.into(), index: TxIndex(2), - is_wrapper: false, + inner_tx: Default::default(), }, ]); @@ -1802,7 +1774,7 @@ mod shielded_ctx_tests { IndexedTx { height: 1.into(), index: TxIndex(2), - is_wrapper: false, + inner_tx: Default::default(), } ); assert_eq!(shielded_ctx.note_map.len(), 2); @@ -1828,7 +1800,7 @@ mod shielded_ctx_tests { // first fetch no blocks masp_tx_sender.send(None).expect("Test failed"); shielded_ctx - .fetch::<_, _, _, TestingMaspClient>( + .fetch::<_, _, _, TestingMaspClient<'_>>( &client, &progress, RetryStrategy::Times(1), @@ -1848,14 +1820,14 @@ mod shielded_ctx_tests { IndexedTx { height: 1.into(), index: Default::default(), - is_wrapper: false, + inner_tx: Default::default(), }, masp_tx.clone(), ))) .expect("Test failed"); masp_tx_sender.send(None).expect("Test failed"); shielded_ctx - .fetch::<_, _, _, TestingMaspClient>( + .fetch::<_, _, _, TestingMaspClient<'_>>( &client, &progress, RetryStrategy::Times(1), @@ -1872,7 +1844,7 @@ mod shielded_ctx_tests { // fetch no blocks masp_tx_sender.send(None).expect("Test failed"); shielded_ctx - .fetch::<_, _, _, TestingMaspClient>( + .fetch::<_, _, _, TestingMaspClient<'_>>( &client, &progress, RetryStrategy::Times(1), @@ -1891,7 +1863,7 @@ mod shielded_ctx_tests { let (client, masp_tx_sender) = test_client(3.into()); masp_tx_sender.send(None).expect("Test failed"); shielded_ctx - .fetch::<_, _, _, TestingMaspClient>( + .fetch::<_, _, _, TestingMaspClient<'_>>( &client, &progress, RetryStrategy::Times(1), @@ -1911,7 +1883,7 @@ mod shielded_ctx_tests { IndexedTx { height: 2.into(), index: Default::default(), - is_wrapper: false, + inner_tx: Default::default(), }, masp_tx.clone(), ))) @@ -1921,7 +1893,7 @@ mod shielded_ctx_tests { IndexedTx { height: 3.into(), index: Default::default(), - is_wrapper: false, + inner_tx: Default::default(), }, masp_tx.clone(), ))) @@ -1930,7 +1902,7 @@ mod shielded_ctx_tests { // all expected blocks masp_tx_sender.send(None).expect("Test failed"); shielded_ctx - .fetch::<_, _, _, TestingMaspClient>( + .fetch::<_, _, _, TestingMaspClient<'_>>( &client, &progress, RetryStrategy::Times(1), @@ -1969,7 +1941,7 @@ mod shielded_ctx_tests { IndexedTx { height: 1.into(), index: TxIndex(1), - is_wrapper: false, + inner_tx: Default::default(), }, masp_tx.clone(), ))) @@ -1979,14 +1951,14 @@ mod shielded_ctx_tests { IndexedTx { height: 1.into(), index: TxIndex(2), - is_wrapper: false, + inner_tx: Default::default(), }, masp_tx.clone(), ))) .expect("Test failed"); shielded_ctx - .fetch::<_, _, _, TestingMaspClient>( + .fetch::<_, _, _, TestingMaspClient<'_>>( &client, &progress, RetryStrategy::Times(2), @@ -2011,7 +1983,7 @@ mod shielded_ctx_tests { let expected = vec![IndexedTx { height: 1.into(), index: TxIndex(2), - is_wrapper: false, + inner_tx: Default::default(), }]; assert_eq!(keys, expected); } @@ -2040,7 +2012,7 @@ mod shielded_ctx_tests { IndexedTx { height: h.into(), index: TxIndex(1), - is_wrapper: false, + inner_tx: Default::default(), }, masp_tx.clone(), ))) @@ -2050,7 +2022,7 @@ mod shielded_ctx_tests { // we expect this to fail. let result = shielded_ctx - .fetch::<_, _, _, TestingMaspClient>( + .fetch::<_, _, _, TestingMaspClient<'_>>( &client, &progress, RetryStrategy::Times(1), @@ -2079,7 +2051,7 @@ mod shielded_ctx_tests { shielded_ctx.tx_note_map.remove(&IndexedTx { height: 18.into(), index: TxIndex(1), - is_wrapper: false, + inner_tx: Default::default(), }); shielded_ctx.save().await.expect("Test failed"); @@ -2090,7 +2062,7 @@ mod shielded_ctx_tests { IndexedTx { height: h.into(), index: TxIndex(1), - is_wrapper: false, + inner_tx: Default::default(), }, masp_tx.clone(), ))) @@ -2100,7 +2072,7 @@ mod shielded_ctx_tests { // we expect this to fail. shielded_ctx - .fetch::<_, _, _, TestingMaspClient>( + .fetch::<_, _, _, TestingMaspClient<'_>>( &client, &progress, RetryStrategy::Times(1), diff --git a/crates/sdk/src/masp/test_utils.rs b/crates/sdk/src/masp/test_utils.rs index b677e5cb2b..1658bb6ac6 100644 --- a/crates/sdk/src/masp/test_utils.rs +++ b/crates/sdk/src/masp/test_utils.rs @@ -4,8 +4,9 @@ use std::sync::{Arc, Mutex}; use masp_primitives::merkle_tree::CommitmentTree; use masp_primitives::sapling::Node; -use namada_core::storage::{BlockHeight, IndexedTx}; +use namada_core::storage::BlockHeight; use namada_state::LastBlock; +use namada_tx::IndexedTx; use tendermint_rpc::SimpleRequest; use crate::error::Error; @@ -110,7 +111,7 @@ impl<'a> MaspClient<'a, TestingClient> for TestingMaspClient<'a> { Self { client } } - async fn witness_map_updates( + async fn fetch_witness_map_updates( &self, _: &ShieldedContext, _: &IO, diff --git a/crates/sdk/src/masp/types.rs b/crates/sdk/src/masp/types.rs index 749ac84b9c..89ee5bda32 100644 --- a/crates/sdk/src/masp/types.rs +++ b/crates/sdk/src/masp/types.rs @@ -1,3 +1,4 @@ +//! The public types for using the MASP tooling use std::collections::{BTreeMap, BTreeSet}; use std::io::{Read, Write}; use std::sync::{Arc, Mutex}; @@ -23,12 +24,13 @@ use namada_core::address::Address; use namada_core::borsh::{BorshDeserialize, BorshSerialize}; use namada_core::collections::HashMap; use namada_core::dec::Dec; -use namada_core::storage::{BlockHeight, Epoch, IndexedTx}; +use namada_core::storage::{BlockHeight, Epoch}; use namada_core::uint::Uint; use namada_macros::BorshDeserializer; #[cfg(feature = "migrations")] use namada_migrations::*; use namada_token as token; +use namada_tx::{IndexedTx, TxCommitments}; use thiserror::Error; use crate::error::Error; @@ -87,6 +89,7 @@ pub struct ShieldedTransfer { } /// Shielded pool data for a token +#[allow(missing_docs)] #[derive(Debug, BorshSerialize, BorshDeserialize, BorshDeserializer)] pub struct MaspTokenRewardData { pub name: String, @@ -98,13 +101,8 @@ pub struct MaspTokenRewardData { } /// The MASP transaction(s) found in a Namada tx. -/// These transactions can appear in the fee payment -/// and / or the main payload. #[derive(Debug, Clone)] -pub(super) struct ExtractedMaspTx { - pub(crate) fee_unshielding: Option, - pub(crate) inner_tx: Option, -} +pub(crate) struct ExtractedMaspTxs(pub Vec<(TxCommitments, Transaction)>); /// MASP verifying keys pub struct PVKs { @@ -218,7 +216,9 @@ impl ScannedData { /// re-scanned as part of nullifying spent notes (which /// is not parallelizable). pub struct DecryptedData { + /// The actual transaction pub tx: Transaction, + /// balance changes from the tx pub delta: TransactionDelta, } diff --git a/crates/sdk/src/masp/utils.rs b/crates/sdk/src/masp/utils.rs index a65737562b..ff8f0847c8 100644 --- a/crates/sdk/src/masp/utils.rs +++ b/crates/sdk/src/masp/utils.rs @@ -1,3 +1,4 @@ +//! Helper functions use std::collections::BTreeMap; use std::env; use std::marker::PhantomData; @@ -14,13 +15,13 @@ use masp_primitives::transaction::Transaction; use masp_primitives::zip32::{ExtendedFullViewingKey, ExtendedSpendingKey}; use masp_proofs::prover::LocalTxProver; use namada_core::collections::HashMap; -use namada_core::storage::{BlockHeight, IndexedTx, TxIndex}; +use namada_core::storage::{BlockHeight, TxIndex}; use namada_core::token::Transfer; use namada_events::extend::{ - ReadFromEventAttributes, ValidMaspTx as ValidMaspTxAttr, + MaspTxBlockIndex as MaspTxBlockIndexAttr, ReadFromEventAttributes, }; use namada_ibc::IbcMessage; -use namada_tx::Tx; +use namada_tx::{IndexedTx, Tx}; use rand_core::{CryptoRng, RngCore}; use tokio::sync::mpsc::{Receiver, Sender}; @@ -28,7 +29,7 @@ use crate::error::{Error, QueryError}; use crate::io::Io; use crate::masp::shielded_ctx::ShieldedContext; use crate::masp::types::{ - ContextSyncStatus, ExtractedMaspTx, IndexedNoteEntry, PVKs, ScannedData, + ContextSyncStatus, ExtractedMaspTxs, IndexedNoteEntry, PVKs, ScannedData, TransactionDelta, Unscanned, }; use crate::masp::{ENV_VAR_MASP_PARAMS_DIR, VERIFIYING_KEYS}; @@ -48,7 +49,10 @@ pub(super) fn load_pvks() -> &'static PVKs { /// use the default. pub fn get_params_dir() -> PathBuf { if let Ok(params_dir) = env::var(ENV_VAR_MASP_PARAMS_DIR) { - println!("Using {} as masp parameter folder.", params_dir); + #[allow(clippy::print_stdout)] + { + println!("Using {} as masp parameter folder.", params_dir); + } PathBuf::from(params_dir) } else { masp_proofs::default_params_folder().unwrap() @@ -121,7 +125,6 @@ pub(super) fn cloned_pair((a, b): (&T, &U)) -> (T, U) { (a.clone(), b.clone()) } - /// Retrieves all the indexes and tx events at the specified height which refer /// to a valid MASP transaction. If an index is given, it filters only the /// transactions with an index equal or greater to the provided one. @@ -141,10 +144,11 @@ pub(super) async fn get_indexed_masp_events_at_height( events .into_iter() .filter_map(|event| { - let tx_index = ValidMaspTxAttr::read_from_event_attributes( - &event.attributes, - ) - .ok()?; + let tx_index = + MaspTxBlockIndexAttr::read_from_event_attributes( + &event.attributes, + ) + .ok()?; if tx_index >= first_idx_to_query { Some(tx_index) @@ -159,75 +163,57 @@ pub(super) async fn get_indexed_masp_events_at_height( /// Extract the relevant shielded portions of a [`Tx`], if any. pub(super) async fn extract_masp_tx( tx: &Tx, - check_header: bool, -) -> Result { - let tx_header = tx.header(); +) -> Result { // NOTE: simply looking for masp sections attached to the tx // is not safe. We don't validate the sections attached to a // transaction se we could end up with transactions carrying // an unnecessary masp section. We must instead look for the // required masp sections in the signed commitments (hashes) - // of the transactions' headers/data sections - let wrapper_header = tx_header - .wrapper() - .expect("All transactions must have a wrapper"); - let maybe_fee_unshield = if let (Some(hash), true) = - (wrapper_header.unshield_section_hash, check_header) - { - let masp_transaction = tx - .get_section(&hash) - .ok_or_else(|| { - Error::Other("Missing expected masp section".to_string()) - })? - .masp_tx() - .ok_or_else(|| { - Error::Other("Missing masp transaction".to_string()) - })?; - - Some(masp_transaction) - } else { - None - }; + // of the transactions' data sections + let mut txs = vec![]; // Expect transaction - let tx_data = tx - .data() - .ok_or_else(|| Error::Other("Missing data section".to_string()))?; - let maybe_masp_tx = match Transfer::try_from_slice(&tx_data) { - Ok(transfer) => Some(transfer), - Err(_) => { - // This should be a MASP over IBC transaction, it - // could be a ShieldedTransfer or an Envelope - // message, need to try both - extract_payload_from_shielded_action(&tx_data).await.ok() + for cmt in tx.commitments() { + let tx_data = tx + .data(cmt) + .ok_or_else(|| Error::Other("Missing data section".to_string()))?; + let maybe_masp_tx = match Transfer::try_from_slice(&tx_data) { + Ok(transfer) => Some(transfer), + Err(_) => { + // This should be a MASP over IBC transaction, it + // could be a ShieldedTransfer or an Envelope + // message, need to try both + extract_payload_from_shielded_action(&tx_data).await.ok() + } } - } - .map(|transfer| { - if let Some(hash) = transfer.shielded { - let masp_tx = tx - .get_section(&hash) - .ok_or_else(|| { - Error::Other( - "Missing masp section in transaction".to_string(), - ) - })? - .masp_tx() - .ok_or_else(|| { - Error::Other("Missing masp transaction".to_string()) - })?; - - Ok::<_, Error>(Some(masp_tx)) - } else { - Ok(None) + .map(|transfer| { + if let Some(hash) = transfer.shielded { + let masp_tx = tx + .get_section(&hash) + .ok_or_else(|| { + Error::Other( + "Missing masp section in transaction".to_string(), + ) + })? + .masp_tx() + .ok_or_else(|| { + Error::Other("Missing masp transaction".to_string()) + })?; + + Ok::<_, Error>(Some(masp_tx)) + } else { + Ok(None) + } + }) + .transpose()? + .flatten(); + + if let Some(transaction) = maybe_masp_tx { + txs.push((cmt.to_owned(), transaction)); } - }) - .transpose()? - .flatten(); + } - Ok(ExtractedMaspTx { - fee_unshielding: maybe_fee_unshield, - inner_tx: maybe_masp_tx, - }) + Ok(ExtractedMaspTxs(txs)) } /// Extract the changed keys and Transaction hash from a MASP over ibc message @@ -271,6 +257,7 @@ pub(super) async fn extract_payload_from_shielded_action( /// The updates to the commitment tree and witness maps /// fetched at the beginning of shielded-sync. +#[allow(missing_docs)] pub struct CommitmentTreeUpdates { pub commitment_tree: CommitmentTree, pub witness_map: HashMap>, @@ -281,12 +268,14 @@ pub struct CommitmentTreeUpdates { /// of how shielded-sync fetches the necessary data /// from a remote server. pub trait MaspClient<'a, C: Client> { + /// Create a new [`MaspClient`] given an rpc client. fn new(client: &'a C) -> Self where Self: 'a; + /// Fetch data relevant to the commitment tree #[allow(async_fn_in_trait)] - async fn witness_map_updates( + async fn fetch_witness_map_updates( &self, ctx: &ShieldedContext, io: &IO, @@ -294,6 +283,7 @@ pub trait MaspClient<'a, C: Client> { last_query_height: BlockHeight, ) -> Result; + /// Apply updates to the commitment tree #[allow(async_fn_in_trait)] async fn update_commitment_tree( &self, @@ -307,7 +297,12 @@ pub trait MaspClient<'a, C: Client> { witness_map, mut note_map_delta, } = self - .witness_map_updates(ctx, io, last_witnessed_tx, last_query_height) + .fetch_witness_map_updates( + ctx, + io, + last_witnessed_tx, + last_query_height, + ) .await?; ctx.tree = commitment_tree; ctx.witness_map = witness_map; @@ -315,6 +310,7 @@ pub trait MaspClient<'a, C: Client> { Ok(()) } + /// Fetches shielded transfers #[allow(async_fn_in_trait)] async fn fetch_shielded_transfer( &self, @@ -343,7 +339,7 @@ where Self { client } } - async fn witness_map_updates( + async fn fetch_witness_map_updates( &self, ctx: &ShieldedContext, io: &IO, @@ -368,7 +364,7 @@ where note_map_delta: Default::default(), }; let mut note_pos = updates.commitment_tree.size(); - for (indexed_tx, ref shielded) in tx_receiver { + for (indexed_tx, ref shielded) in tx_receiver { updates.note_map_delta.insert(indexed_tx, note_pos); for so in shielded .sapling_bundle() @@ -453,30 +449,16 @@ where for idx in txs_results { let tx = Tx::try_from(block[idx.0 as usize].as_ref()) .map_err(|e| Error::Other(e.to_string()))?; - let ExtractedMaspTx { - fee_unshielding, - inner_tx, - } = extract_masp_tx(&tx, true).await?; - // Collect the current transaction(s) - if let Some(masp_transaction) = fee_unshielding { + let extracted_masp_txs = extract_masp_tx(&tx).await?; + for (inner_tx, transaction) in extracted_masp_txs.0 { tx_sender.send(( IndexedTx { height: height.into(), index: idx, - is_wrapper: true, + inner_tx, }, - masp_transaction, - )); - } - if let Some(masp_transaction) = inner_tx { - tx_sender.send(( - IndexedTx { - height: height.into(), - index: idx, - is_wrapper: false, - }, - masp_transaction, - )); + transaction, + )) } } fetch_iter.next(); @@ -708,7 +690,9 @@ impl TaskScheduler { /// loop, this dictates the strategy for /// how many attempts should be made. pub enum RetryStrategy { + /// Always retry Forever, + /// Limit number of retries to a fixed number Times(u64), } @@ -732,14 +716,19 @@ impl Iterator for RetryStrategy { /// An enum to indicate how to log sync progress depending on /// whether sync is currently fetch or scanning blocks. +#[allow(missing_docs)] #[derive(Debug, Copy, Clone)] pub enum ProgressType { Fetch, Scan, } +/// A peekable iterator interface pub trait PeekableIter { + /// Peek at next element fn peek(&mut self) -> Option<&I>; + + /// get next element fn next(&mut self) -> Option; } @@ -765,12 +754,15 @@ where /// Additionally, it has access to IO in case the struct implementing /// this trait wishes to log this progress. pub trait ProgressTracker { + /// Get an IO handle fn io(&self) -> &IO; + /// Return an iterator to fetched shielded transfers fn fetch(&self, items: I) -> impl PeekableIter where I: Iterator; + /// Return an interator over MASP transactions to be scanned fn scan( &self, items: I, @@ -778,6 +770,7 @@ pub trait ProgressTracker { where I: Iterator + Send; + /// The number of blocks that need to be fetched fn left_to_fetch(&self) -> usize; } @@ -789,6 +782,7 @@ pub struct DefaultTracker<'io, IO: Io> { } impl<'io, IO: Io> DefaultTracker<'io, IO> { + /// New [`DefaultTracker`] pub fn new(io: &'io IO) -> Self { Self { io, diff --git a/crates/sdk/src/tx.rs b/crates/sdk/src/tx.rs index 0c69c87c55..16d7539a2e 100644 --- a/crates/sdk/src/tx.rs +++ b/crates/sdk/src/tx.rs @@ -2806,7 +2806,7 @@ pub fn build_batch( } let cmt = tx.first_commitments().unwrap().to_owned(); - if !batched_tx.add_inner_tx(tx, cmt.clone()) { + if !batched_tx.add_inner_tx(tx, cmt) { return Err(Error::Other(format!( "The transaction batch already contains inner tx: {}", cmt.get_hash() diff --git a/crates/tx/src/types.rs b/crates/tx/src/types.rs index 3156b584d5..3f1d16308f 100644 --- a/crates/tx/src/types.rs +++ b/crates/tx/src/types.rs @@ -892,6 +892,7 @@ impl Section { /// An inner transaction of the batch, represented by its commitments to the /// [`Code`], [`Data`] and [`Memo`] sections #[derive( + Copy, Clone, Debug, Default, @@ -1733,6 +1734,7 @@ impl<'tx> Tx { /// Represents the pointers to a indexed tx, which are the block height and the /// index inside that block #[derive( + Copy, Debug, Clone, BorshSerialize, From babfe493cd2884921db33419aaf0afed9c3518c5 Mon Sep 17 00:00:00 2001 From: Tiago Carvalho Date: Mon, 27 May 2024 15:19:55 +0100 Subject: [PATCH 20/29] Update Cargo.lock --- Cargo.lock | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index 0c189121f3..8c0d52d47a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -376,7 +376,11 @@ dependencies = [ "percent-encoding", "pin-project-lite", "rustversion", +<<<<<<< HEAD "serde", +======= + "serde 1.0.193", +>>>>>>> 7229d4d13 (Update Cargo.lock) "sync_wrapper 0.1.2", "tower", "tower-layer", @@ -5586,6 +5590,15 @@ dependencies = [ "getrandom 0.2.15", ] +[[package]] +name = "nanorand" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a51313c5820b0b02bd422f4b44776fbf47961755c74ce64afc73bfad10226c3" +dependencies = [ + "getrandom 0.2.11", +] + [[package]] name = "native-tls" version = "0.2.11" From d7d3e1d10ac8d35b3472627b8b35f3dac7b42f55 Mon Sep 17 00:00:00 2001 From: Tiago Carvalho Date: Tue, 28 May 2024 11:49:25 +0100 Subject: [PATCH 21/29] Fix pruning of unscanned txs --- crates/sdk/src/masp/utils.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/sdk/src/masp/utils.rs b/crates/sdk/src/masp/utils.rs index ff8f0847c8..3eda20d59b 100644 --- a/crates/sdk/src/masp/utils.rs +++ b/crates/sdk/src/masp/utils.rs @@ -617,6 +617,8 @@ impl TaskManager { Action::Complete { with_error } => { if !with_error { let mut locked = self.ctx.lock().await; + // possibly remove unneeded elements from the cache. + locked.unscanned.scanned(&self.latest_idx); // update each key to be synced to the latest scanned // height. for (_, h) in locked.vk_heights.iter_mut() { @@ -639,8 +641,6 @@ impl TaskManager { // apply state changes from the scanning process let mut locked = self.ctx.lock().await; scanned.apply_to(&mut locked); - // possibly remove unneeded elements from the cache. - locked.unscanned.scanned(&idx); // persist the changes _ = locked.save().await; } From 24351a3e6a33d46db5af156bcc2191055887b1f1 Mon Sep 17 00:00:00 2001 From: Tiago Carvalho Date: Tue, 28 May 2024 12:16:38 +0100 Subject: [PATCH 22/29] Fix subtract underflow in stdout drawer --- crates/apps_lib/src/client/masp.rs | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/crates/apps_lib/src/client/masp.rs b/crates/apps_lib/src/client/masp.rs index f5bc676ecd..e2dfc0d375 100644 --- a/crates/apps_lib/src/client/masp.rs +++ b/crates/apps_lib/src/client/masp.rs @@ -88,8 +88,10 @@ impl<'io, IO: Io> StdoutDrawer<'io, IO> { fn draw(&self) { let (fetch_percent, fetch_completed) = (self.fetch.length > 0) .then(|| { - let fetch_percent = - (100 * self.fetch.index) / self.fetch.length; + let fetch_percent = std::cmp::min( + 100, + (100 * self.fetch.index) / self.fetch.length, + ); let fetch_completed: String = vec!['#'; fetch_percent].iter().collect(); (fetch_percent, fetch_completed) @@ -101,7 +103,10 @@ impl<'io, IO: Io> StdoutDrawer<'io, IO> { let (scan_percent, scan_completed) = (self.scan.length > 0) .then(|| { - let scan_percent = (100 * self.scan.index) / self.scan.length; + let scan_percent = std::cmp::min( + 100, + (100 * self.scan.index) / self.scan.length, + ); let scan_completed: String = vec!['#'; scan_percent].iter().collect(); (scan_percent, scan_completed) From a32de5508866f6637d040974f8a8f99ad53a395e Mon Sep 17 00:00:00 2001 From: Tiago Carvalho Date: Tue, 28 May 2024 12:17:35 +0100 Subject: [PATCH 23/29] Fix percent sign --- crates/apps_lib/src/client/masp.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/apps_lib/src/client/masp.rs b/crates/apps_lib/src/client/masp.rs index e2dfc0d375..ed4f9e5892 100644 --- a/crates/apps_lib/src/client/masp.rs +++ b/crates/apps_lib/src/client/masp.rs @@ -158,7 +158,7 @@ impl<'io, IO: Io> StdoutDrawer<'io, IO> { ); display!( self.io, - "[{}{}] ~~ {} \n\n%", + "[{}{}] ~~ {} %\n\n", fetch_completed.unwrap(), fetch_incomplete.unwrap(), fp @@ -176,7 +176,7 @@ impl<'io, IO: Io> StdoutDrawer<'io, IO> { ); display!( self.io, - "[{}{}] ~~ {} \n\n%", + "[{}{}] ~~ {} %\n\n", scan_completed.unwrap(), scan_incomplete.unwrap(), sp From baf950741f996e8ce330f8a5916e5b56e51a32c2 Mon Sep 17 00:00:00 2001 From: Tiago Carvalho Date: Wed, 29 May 2024 09:49:26 +0100 Subject: [PATCH 24/29] Refactor shielded utils --- crates/node/src/bench_utils.rs | 32 ++++++++--------------- crates/sdk/src/masp/mod.rs | 39 +++++++++++------------------ crates/sdk/src/masp/shielded_ctx.rs | 4 +-- crates/sdk/src/masp/utils.rs | 24 ++++++++++++------ 4 files changed, 43 insertions(+), 56 deletions(-) diff --git a/crates/node/src/bench_utils.rs b/crates/node/src/bench_utils.rs index 8fd9cef1f1..cc5d79693d 100644 --- a/crates/node/src/bench_utils.rs +++ b/crates/node/src/bench_utils.rs @@ -5,8 +5,8 @@ use std::cell::RefCell; use std::collections::BTreeSet; -use std::fs::{File, OpenOptions}; -use std::io::{Read, Write}; +use std::fs::OpenOptions; +use std::io::Write; use std::ops::{Deref, DerefMut}; use std::path::PathBuf; use std::str::FromStr; @@ -688,40 +688,30 @@ impl ShieldedUtils for BenchShieldedUtils { } } - /// Try to load the last saved shielded context from the given context - /// directory. If this fails, then leave the current context unchanged. - async fn load( + async fn load( &self, - ctx: &mut ShieldedContext, + sync_status: ContextSyncStatus, force_confirmed: bool, - ) -> std::io::Result<()> { + ) -> std::io::Result> { // Try to load shielded context from file let file_name = if force_confirmed { FILE_NAME } else { - match ctx.sync_status { + match sync_status { ContextSyncStatus::Confirmed => FILE_NAME, ContextSyncStatus::Speculative => SPECULATIVE_FILE_NAME, } }; - let mut ctx_file = File::open( + let bytes = std::fs::read( self.context_dir.0.path().to_path_buf().join(file_name), )?; - let mut bytes = Vec::new(); - ctx_file.read_to_end(&mut bytes)?; - // Fill the supplied context with the deserialized object - *ctx = ShieldedContext { - utils: ctx.utils.clone(), + Ok(ShieldedContext { + utils: self.clone(), ..ShieldedContext::deserialize(&mut &bytes[..])? - }; - Ok(()) + }) } - /// Save this shielded context into its associated context directory - async fn save( - &self, - ctx: &ShieldedContext, - ) -> std::io::Result<()> { + async fn save(&self, ctx: &ShieldedContext) -> std::io::Result<()> { let (tmp_file_name, file_name) = match ctx.sync_status { ContextSyncStatus::Confirmed => (TMP_FILE_NAME, FILE_NAME), ContextSyncStatus::Speculative => { diff --git a/crates/sdk/src/masp/mod.rs b/crates/sdk/src/masp/mod.rs index 03be8aaf6e..dd67e537d8 100644 --- a/crates/sdk/src/masp/mod.rs +++ b/crates/sdk/src/masp/mod.rs @@ -46,7 +46,6 @@ pub use utils::{ use crate::masp::types::PartialAuthorized; use crate::masp::utils::{get_params_dir, load_pvks}; -use crate::{MaybeSend, MaybeSync}; /// Env var to point to a dir with MASP parameters. When not specified, /// the default OS specific path is used. @@ -1290,8 +1289,8 @@ pub mod testing { #[cfg(feature = "std")] /// Implementation of MASP functionality depending on a standard filesystem pub mod fs { - use std::fs::{File, OpenOptions}; - use std::io::{Read, Write}; + use std::fs::{self, OpenOptions}; + use std::io::Write; use super::*; use crate::masp::shielded_ctx::ShieldedContext; @@ -1342,9 +1341,7 @@ pub mod fs { // Finally initialize a shielded context with the supplied directory let sync_status = - if std::fs::read(context_dir.join(SPECULATIVE_FILE_NAME)) - .is_ok() - { + if fs::read(context_dir.join(SPECULATIVE_FILE_NAME)).is_ok() { // Load speculative state ContextSyncStatus::Speculative } else { @@ -1384,38 +1381,30 @@ pub mod fs { } } - /// Try to load the last saved shielded context from the given context - /// directory. If this fails, then leave the current context unchanged. - async fn load( + async fn load( &self, - ctx: &mut ShieldedContext, + sync_status: ContextSyncStatus, force_confirmed: bool, - ) -> std::io::Result<()> { + ) -> std::io::Result> { // Try to load shielded context from file let file_name = if force_confirmed { FILE_NAME } else { - match ctx.sync_status { + match sync_status { ContextSyncStatus::Confirmed => FILE_NAME, ContextSyncStatus::Speculative => SPECULATIVE_FILE_NAME, } }; - let mut ctx_file = File::open(self.context_dir.join(file_name))?; - let mut bytes = Vec::new(); - ctx_file.read_to_end(&mut bytes)?; - // Fill the supplied context with the deserialized object - *ctx = ShieldedContext { - utils: ctx.utils.clone(), - ..ShieldedContext::::deserialize(&mut &bytes[..])? - }; - Ok(()) + let bytes = fs::read(self.context_dir.join(file_name))?; + Ok(ShieldedContext { + utils: self.clone(), + ..ShieldedContext::::deserialize(&mut &bytes[..])? + }) } - /// Save this confirmed shielded context into its associated context - /// directory. At the same time, delete the speculative file if present - async fn save( + async fn save( &self, - ctx: &ShieldedContext, + ctx: &ShieldedContext, ) -> std::io::Result<()> { // TODO: use mktemp crate? let (tmp_file_name, file_name) = match ctx.sync_status { diff --git a/crates/sdk/src/masp/shielded_ctx.rs b/crates/sdk/src/masp/shielded_ctx.rs index 4c36e70c3d..68e9caf25a 100644 --- a/crates/sdk/src/masp/shielded_ctx.rs +++ b/crates/sdk/src/masp/shielded_ctx.rs @@ -130,14 +130,14 @@ impl ShieldedContext { /// Try to load the last saved shielded context from the given context /// directory. If this fails, then leave the current context unchanged. pub async fn load(&mut self) -> std::io::Result<()> { - self.utils.clone().load(self, false).await + self.utils.clone().load_and_update(self, false).await } /// Try to load the last saved confirmed shielded context from the given /// context directory. If this fails, then leave the current context /// unchanged. pub async fn load_confirmed(&mut self) -> std::io::Result<()> { - self.utils.clone().load(self, true).await?; + self.utils.clone().load_and_update(self, true).await?; Ok(()) } diff --git a/crates/sdk/src/masp/utils.rs b/crates/sdk/src/masp/utils.rs index 3eda20d59b..70416ff679 100644 --- a/crates/sdk/src/masp/utils.rs +++ b/crates/sdk/src/masp/utils.rs @@ -64,23 +64,31 @@ pub fn get_params_dir() -> PathBuf { #[cfg_attr(feature = "async-send", async_trait::async_trait)] #[cfg_attr(not(feature = "async-send"), async_trait::async_trait(?Send))] pub trait ShieldedUtils: - Sized + BorshDeserialize + BorshSerialize + Default + Clone + Sized + BorshDeserialize + BorshSerialize + Default + Clone + MaybeSend { /// Get a MASP transaction prover fn local_tx_prover(&self) -> LocalTxProver; + /// Load up the currently saved ShieldedContext and + /// update `ctx` with the loaded contents + async fn load_and_update( + &self, + ctx: &mut ShieldedContext, + force_confirmed: bool, + ) -> std::io::Result<()> { + *ctx = self.load(ctx.sync_status, force_confirmed).await?; + Ok(()) + } + /// Load up the currently saved ShieldedContext - async fn load( + async fn load( &self, - ctx: &mut ShieldedContext, + sync_status: ContextSyncStatus, force_confirmed: bool, - ) -> std::io::Result<()>; + ) -> std::io::Result>; /// Save the given ShieldedContext for future loads - async fn save( - &self, - ctx: &ShieldedContext, - ) -> std::io::Result<()>; + async fn save(&self, ctx: &ShieldedContext) -> std::io::Result<()>; } /// Make a ViewingKey that can view notes encrypted by given ExtendedSpendingKey From 8183fbb424789e9b5e5a654b182c8464f3dc4704 Mon Sep 17 00:00:00 2001 From: Tiago Carvalho Date: Wed, 29 May 2024 11:30:20 +0100 Subject: [PATCH 25/29] Cache scanned masp txs more effectively --- crates/apps_lib/src/cli/client.rs | 2 +- crates/apps_lib/src/client/masp.rs | 42 +- crates/core/src/hash.rs | 11 +- crates/node/src/bench_utils.rs | 19 +- crates/sdk/src/masp/shielded_ctx.rs | 654 ++++++++++++++-------------- crates/sdk/src/masp/types.rs | 82 ++-- 6 files changed, 436 insertions(+), 374 deletions(-) diff --git a/crates/apps_lib/src/cli/client.rs b/crates/apps_lib/src/cli/client.rs index a31e71e0a9..eecd0bd55c 100644 --- a/crates/apps_lib/src/cli/client.rs +++ b/crates/apps_lib/src/cli/client.rs @@ -323,7 +323,7 @@ impl CliApi { .map(|sk| sk.into()) .collect::>(); crate::client::masp::syncing( - chain_ctx.shielded, + &chain_ctx.shielded.utils, &client, &io, args.batch_size, diff --git a/crates/apps_lib/src/client/masp.rs b/crates/apps_lib/src/client/masp.rs index ed4f9e5892..b3a56187e1 100644 --- a/crates/apps_lib/src/client/masp.rs +++ b/crates/apps_lib/src/client/masp.rs @@ -5,12 +5,13 @@ use masp_primitives::sapling::ViewingKey; use masp_primitives::zip32::ExtendedSpendingKey; use namada_sdk::error::Error; use namada_sdk::io::Io; +use namada_sdk::masp::shielded_ctx::fetch_shielded_ctx; use namada_sdk::masp::types::IndexedNoteEntry; use namada_sdk::masp::utils::{ LedgerMaspClient, PeekableIter, ProgressTracker, ProgressType, RetryStrategy, }; -use namada_sdk::masp::{ShieldedContext, ShieldedUtils}; +use namada_sdk::masp::ShieldedUtils; use namada_sdk::queries::Client; use namada_sdk::storage::BlockHeight; use namada_sdk::{display, display_line}; @@ -21,7 +22,7 @@ pub async fn syncing< C: Client + Sync, IO: Io + Sync + Send, >( - mut shielded: ShieldedContext, + shielded_utils: &U, client: &C, io: &IO, batch_size: u64, @@ -29,7 +30,7 @@ pub async fn syncing< last_query_height: Option, sks: &[ExtendedSpendingKey], fvks: &[ViewingKey], -) -> Result, Error> { +) -> Result<(), Error> { let shutdown_signal = async { let (tx, rx) = tokio::sync::oneshot::channel(); namada_sdk::control_flow::shutdown_send(tx).await; @@ -38,31 +39,30 @@ pub async fn syncing< display_line!(io, "\n\n"); let logger = CliProgressTracker::new(io); - let sync = async move { - shielded - .fetch::<_, _, _, LedgerMaspClient<'_, C>>( - client, - &logger, - RetryStrategy::Forever, - start_query_height, - last_query_height, - batch_size, - sks, - fvks, - ) - .await - .map(|_| shielded) + let sync_result_fut = async move { + fetch_shielded_ctx::<_, _, _, LedgerMaspClient<'_, C>, _>( + shielded_utils, + client, + &logger, + RetryStrategy::Forever, + start_query_height, + last_query_height, + batch_size, + sks, + fvks, + ) + .await }; tokio::select! { - sync = sync => { - let shielded = sync?; + sync_result = sync_result_fut => { + sync_result?; display!(io, "\nSyncing finished\n"); - Ok(shielded) + Ok(()) }, sig = shutdown_signal => { sig.map_err(|e| Error::Other(e.to_string()))?; display!(io, "\n"); - Ok(ShieldedContext::default()) + Ok(()) }, } } diff --git a/crates/core/src/hash.rs b/crates/core/src/hash.rs index 99905167c7..2070ab6fab 100644 --- a/crates/core/src/hash.rs +++ b/crates/core/src/hash.rs @@ -5,7 +5,6 @@ use std::str::FromStr; use arse_merkle_tree::traits::Hasher; use arse_merkle_tree::H256; -use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; use data_encoding::HEXUPPER; use namada_macros::BorshDeserializer; #[cfg(feature = "migrations")] @@ -14,6 +13,10 @@ use serde::{Deserialize, Serialize}; use sha2::{Digest, Sha256}; use thiserror::Error; +use crate::borsh::{ + BorshDeserialize, BorshSchema, BorshSerialize, BorshSerializeExt, +}; + /// The length of the transaction hash string pub const HASH_LENGTH: usize = 32; @@ -130,6 +133,12 @@ impl Hash { Self(*digest.as_ref()) } + /// Compute sha256 of some borsh encodable data + #[inline] + pub fn sha256_borsh(value: &T) -> Self { + Self::sha256(value.serialize_to_vec()) + } + /// Return zeros pub fn zero() -> Self { Self([0u8; HASH_LENGTH]) diff --git a/crates/node/src/bench_utils.rs b/crates/node/src/bench_utils.rs index cc5d79693d..ab7b10c97a 100644 --- a/crates/node/src/bench_utils.rs +++ b/crates/node/src/bench_utils.rs @@ -1041,18 +1041,27 @@ impl BenchShieldedCtx { .wallet .find_spending_key(ALBERT_SPENDING_KEY, None) .unwrap(); - self.shielded = async_runtime - .block_on(namada_apps_lib::client::masp::syncing( - self.shielded, - &self.shell, + let shielded_utils_ref = &self.shielded.utils; + let shell_ref = &self.shell; + self.shielded = async_runtime.block_on(async move { + namada_apps_lib::client::masp::syncing( + shielded_utils_ref, + shell_ref, &StdIo, 1, None, None, &[spending_key.into()], &[], - )) + ) + .await .unwrap(); + + shielded_utils_ref + .load(ContextSyncStatus::Confirmed, true) + .await + .unwrap() + }); let native_token = self.shell.state.in_mem().native_token.clone(); let namada = NamadaImpl::native_new( self.shell, diff --git a/crates/sdk/src/masp/shielded_ctx.rs b/crates/sdk/src/masp/shielded_ctx.rs index 68e9caf25a..04ceebff0d 100644 --- a/crates/sdk/src/masp/shielded_ctx.rs +++ b/crates/sdk/src/masp/shielded_ctx.rs @@ -290,7 +290,7 @@ impl ShieldedContext { /// * nullify notes that have been spent /// * update balances of each viewing key pub(super) fn nullify_spent_notes(&mut self) -> Result<(), Error> { - for ((_, _vk), decrypted_data) in self.decrypted_note_cache.drain() { + for (_, _, decrypted_data) in self.decrypted_note_cache.drain() { let DecryptedData { tx: shielded, delta: mut transaction_delta, @@ -1283,7 +1283,8 @@ impl ShieldedContext { )?; scanned_data.merge(scanned); scanned_data.decrypted_note_cache.insert( - (indexed_tx, vk), + indexed_tx, + vk, DecryptedData { tx: masp_tx.clone(), delta: tx_delta, @@ -1361,199 +1362,209 @@ impl ShieldedContext { } } -impl ShieldedContext { - /// Fetch the current state of the multi-asset shielded pool into a - /// ShieldedContext - #[allow(clippy::too_many_arguments)] - #[cfg(not(target_family = "wasm"))] - pub async fn fetch< - 'a, - C: Client + Sync, - IO: Io + Send + Sync, - T: ProgressTracker + Sync, - M: MaspClient<'a, C> + 'a, - >( - &mut self, - client: &'a C, - progress: &T, - retry: RetryStrategy, - start_query_height: Option, - last_query_height: Option, - _batch_size: u64, - sks: &[ExtendedSpendingKey], - fvks: &[ViewingKey], - ) -> Result<(), Error> { - // Reload the state from file to get the last confirmed state and - // discard any speculative data, we cannot fetch on top of a - // speculative state - // Always reload the confirmed context or initialize a new one if not - // found - if self.load_confirmed().await.is_err() { - // Initialize a default context if we couldn't load a valid one - // from storage - *self = Self { - utils: std::mem::take(&mut self.utils), - ..Default::default() - }; +/// Fetch the current state of the multi-asset shielded pool into a +/// [`ShieldedContext`]. +#[allow(clippy::too_many_arguments)] +#[cfg(not(target_family = "wasm"))] +pub async fn fetch_shielded_ctx<'client, C, IO, T, M, U>( + utils: &U, + client: &'client C, + progress: &T, + retry: RetryStrategy, + start_query_height: Option, + last_query_height: Option, + _batch_size: u64, + sks: &[ExtendedSpendingKey], + fvks: &[ViewingKey], +) -> Result<(), Error> +where + C: Client + Sync, + IO: Io + Send + Sync, + T: ProgressTracker + Sync, + M: MaspClient<'client, C> + 'client, + U: ShieldedUtils + Send + Sync, +{ + // Reload the state from file to get the last confirmed state and + // discard any speculative data, we cannot fetch on top of a + // speculative state + // Always reload the confirmed context or initialize a new one if not + // found + let mut ctx = if let Ok(ctx) = load_shielded_ctx(utils).await { + ctx + } else { + // Initialize a default context if we couldn't load a valid one + // from storage + ShieldedContext { + utils: utils.clone(), + ..Default::default() } + }; - // add new viewing keys - for esk in sks { - let vk = to_viewing_key(esk).vk; - self.vk_heights.entry(vk).or_default(); - } - for vk in fvks { - self.vk_heights.entry(*vk).or_default(); - } - // Save the context to persist newly added keys - let _ = self.save().await; + // add new viewing keys + for esk in sks { + let vk = to_viewing_key(esk).vk; + ctx.vk_heights.entry(vk).or_default(); + } + for vk in fvks { + ctx.vk_heights.entry(*vk).or_default(); + } + // Save the context to persist newly added keys + let _ = ctx.save().await; - // the height of the key that is least synced - let Some(least_idx) = self.vk_heights.values().min().cloned() else { - return Ok(()); - }; - // the latest block height which has been added to the witness Merkle - // tree - let last_witnessed_tx = self.tx_note_map.keys().max().cloned(); - // get the bounds on the block heights to fetch - let start_height = - std::cmp::min(last_witnessed_tx, least_idx).map(|idx| idx.height); - let start_height = start_query_height.or(start_height); - // Query for the last produced block height - let last_block_height = query_block(client) - .await? - .map(|b| b.height) - .unwrap_or_else(BlockHeight::first); - let last_query_height = last_query_height.unwrap_or(last_block_height); - let last_query_height = - std::cmp::min(last_query_height, last_block_height); - - // Update the commitment tree and witnesses - self.update_witness_map::<_, _, M>( - client, - progress.io(), - last_witnessed_tx.unwrap_or_default(), - last_query_height, - ) - .await?; - let vk_heights = self.vk_heights.clone(); - - // the task scheduler allows the thread performing trial decryptions to - // communicate errors and actions (such as saving and updating state). - // The task manager runs on the main thread and performs the tasks - // scheduled by the scheduler. - let (task_scheduler, mut task_manager) = - TaskManager::::new(self.clone()); - - // The main loop that performs - // * fetching and caching MASP txs in sequence - // * trial decryption of each note to determine if it is owned by a - // viewing key in this context and caching the result. - // * Nullifying spent notes and updating balances for each viewing key - // * Regular saving of the context to disk in case of process interrupts - std::thread::scope(|s| { - for _ in retry { - // a stateful channel that communicates notes fetched to the - // trial decryption process - let (fetch_send, fetch_recv) = - fetch_channel::new(self.unscanned.clone()); - - // we trial-decrypt all notes fetched in parallel and schedule - // the state changes to be applied to the shielded context - // back on the main thread - let decryption_handle = s.spawn(|| { - // N.B. DON'T GO PANICKING IN HERE. DON'T DO IT. SERIOUSLY. - // YOU COULD ACCIDENTALLY FREEZE EVERYTHING - let txs = progress.scan(fetch_recv); - txs.par_bridge().try_for_each(|(indexed_tx, stx)| { - let mut scanned_data = ScannedData::default(); - for (vk, _) in vk_heights - .iter() - .filter(|(_vk, h)| **h < Some(indexed_tx)) - { - // if this note is in the cache, skip it. - if scanned_data - .decrypted_note_cache - .contains(&indexed_tx, vk) - { - continue; - } - // attempt to decrypt the note and get the state - // changes - let (scanned, tx_delta) = task_scheduler.scan_tx( - self.sync_status, - indexed_tx, - &self.tx_note_map, - &stx, - vk, - )?; - // add the new state changes to the aggregated - scanned_data.merge(scanned); - // add the note to the cache - scanned_data.decrypted_note_cache.insert( - (indexed_tx, *vk), - DecryptedData { - tx: stx.clone(), - delta: tx_delta, - }, - ); + // the height of the key that is least synced + let Some(least_idx) = ctx.vk_heights.values().min().cloned() else { + return Ok(()); + }; + // the latest block height which has been added to the witness Merkle + // tree + let last_witnessed_tx = ctx.tx_note_map.keys().max().cloned(); + // get the bounds on the block heights to fetch + let start_height = + std::cmp::min(last_witnessed_tx, least_idx).map(|idx| idx.height); + let start_height = start_query_height.or(start_height); + // Query for the last produced block height + let last_block_height = query_block(client) + .await? + .map(|b| b.height) + .unwrap_or_else(BlockHeight::first); + let last_query_height = last_query_height.unwrap_or(last_block_height); + let last_query_height = std::cmp::min(last_query_height, last_block_height); + + // Update the commitment tree and witnesses + ctx.update_witness_map::<_, _, M>( + client, + progress.io(), + last_witnessed_tx.unwrap_or_default(), + last_query_height, + ) + .await?; + let vk_heights = ctx.vk_heights.clone(); + + // the task scheduler allows the thread performing trial decryptions to + // communicate errors and actions (such as saving and updating state). + // The task manager runs on the main thread and performs the tasks + // scheduled by the scheduler. + let (task_scheduler, mut task_manager) = TaskManager::::new(ctx.clone()); + + // The main loop that performs + // * fetching and caching MASP txs in sequence + // * trial decryption of each note to determine if it is owned by a viewing + // key in this context and caching the result. + // * Nullifying spent notes and updating balances for each viewing key + // * Regular saving of the context to disk in case of process interrupts + std::thread::scope(|s| { + for _ in retry { + // a stateful channel that communicates notes fetched to the + // trial decryption process + let (fetch_send, fetch_recv) = + fetch_channel::new(ctx.unscanned.clone()); + + // we trial-decrypt all notes fetched in parallel and schedule + // the state changes to be applied to the shielded context + // back on the main thread + let decryption_handle = s.spawn(|| { + // N.B. DON'T GO PANICKING IN HERE. DON'T DO IT. SERIOUSLY. + // YOU COULD ACCIDENTALLY FREEZE EVERYTHING + let txs = progress.scan(fetch_recv); + txs.par_bridge().try_for_each(|(indexed_tx, stx)| { + let mut new_scanned_data = ScannedData { + decrypted_note_cache: ctx.decrypted_note_cache.clone(), + ..ScannedData::default() + }; + for (vk, _) in vk_heights + .iter() + .filter(|(_vk, h)| **h < Some(indexed_tx)) + { + // if this note is in the cache, skip it. + if ctx.decrypted_note_cache.contains(&indexed_tx, vk) { + continue; } - // save the aggregated state changes - task_scheduler.save(scanned_data, indexed_tx); - Ok::<(), Error>(()) - })?; - // signal that the process has finished without error - task_scheduler.complete(false); + // attempt to decrypt the note and get the state + // changes + let (scanned, tx_delta) = task_scheduler.scan_tx( + ctx.sync_status, + indexed_tx, + &ctx.tx_note_map, + &stx, + vk, + )?; + // add the new state changes to the aggregated + new_scanned_data.merge(scanned); + // add the note to the cache + new_scanned_data.decrypted_note_cache.insert( + indexed_tx, + *vk, + DecryptedData { + tx: stx.clone(), + delta: tx_delta, + }, + ); + } + // save the aggregated state changes + task_scheduler.save(new_scanned_data, indexed_tx); Ok::<(), Error>(()) - }); - - // fetch MASP txs and coordinate the state changes from - // scanning fetched txs asynchronously. - let (decrypt_res, fetch_res) = - tokio::task::block_in_place(|| { - tokio::runtime::Handle::current().block_on(async { - tokio::join!( - task_manager.run(), - Self::fetch_shielded_transfers::<_, _, M>( - fetch_send, - client, - progress, - start_height, - last_query_height, - ) - ) - }) - }); - // shut down the scanning thread. - decryption_handle.join().unwrap()?; - // if the scanning process errored, return that error here and - // exit. - decrypt_res?; - - // if fetching errored, log it. But this is recoverable. - if let Err(e) = fetch_res { - display_line!( - progress.io(), - "Error encountered while fetching: {}", - e.to_string() - ); - } + })?; + // signal that the process has finished without error + task_scheduler.complete(false); + Ok::<(), Error>(()) + }); - // if fetching failed for before completing, we restart - // the fetch process. Otherwise, we can break the loop. - if progress.left_to_fetch() == 0 { - break; - } + // fetch MASP txs and coordinate the state changes from + // scanning fetched txs asynchronously. + let (decrypt_res, fetch_res) = tokio::task::block_in_place(|| { + tokio::runtime::Handle::current().block_on(async { + tokio::join!( + task_manager.run(), + >::fetch_shielded_transfers::<_, _, M>( + fetch_send, + client, + progress, + start_height, + last_query_height, + ) + ) + }) + }); + // shut down the scanning thread. + decryption_handle.join().unwrap()?; + // if the scanning process errored, return that error here and + // exit. + decrypt_res?; + + // if fetching errored, log it. But this is recoverable. + if let Err(e) = fetch_res { + display_line!( + progress.io(), + "Error encountered while fetching: {}", + e.to_string() + ); } - if progress.left_to_fetch() != 0 { - Err(Error::Other( - "After retrying, could not fetch all MASP txs.".to_string(), - )) - } else { - Ok(()) + + // if fetching failed for before completing, we restart + // the fetch process. Otherwise, we can break the loop. + if progress.left_to_fetch() == 0 { + break; } + } + if progress.left_to_fetch() != 0 { + Err(Error::Other( + "After retrying, could not fetch all MASP txs.".to_string(), + )) + } else { + Ok(()) + } + }) +} + +async fn load_shielded_ctx( + utils: &U, +) -> Result, Error> { + utils + .load(ContextSyncStatus::Confirmed, true) + .await + .map_err(|err| { + Error::Other(format!("Could not load shielded context: {err}")) }) - } } #[cfg(test)] @@ -1688,19 +1699,19 @@ mod shielded_ctx_tests { // we first test that with no retries, a fetching failure // stops process - let result = shielded_ctx - .fetch::<_, _, _, TestingMaspClient<'_>>( - &client, - &progress, - RetryStrategy::Times(1), - None, - None, - 0, - &[], - &[vk], - ) - .await - .unwrap_err(); + let result = fetch_shielded_ctx::<_, _, _, TestingMaspClient<'_>, _>( + &shielded_ctx.utils, + &client, + &progress, + RetryStrategy::Times(1), + None, + None, + 0, + &[], + &[vk], + ) + .await + .unwrap_err(); match result { Error::Other(msg) => assert_eq!( msg.as_str(), @@ -1735,19 +1746,19 @@ mod shielded_ctx_tests { .expect("Test failed"); // This should complete successfully - shielded_ctx - .fetch::<_, _, _, TestingMaspClient<'_>>( - &client, - &progress, - RetryStrategy::Times(2), - None, - None, - 0, - &[], - &[vk], - ) - .await - .expect("Test failed"); + fetch_shielded_ctx::<_, _, _, TestingMaspClient<'_>, _>( + &shielded_ctx.utils, + &client, + &progress, + RetryStrategy::Times(2), + None, + None, + 0, + &[], + &[vk], + ) + .await + .expect("Test failed"); shielded_ctx.load_confirmed().await.expect("Test failed"); let keys = shielded_ctx @@ -1785,8 +1796,7 @@ mod shielded_ctx_tests { #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn test_left_to_fetch() { let temp_dir = tempdir().unwrap(); - let mut shielded_ctx = - FsShieldedUtils::new(temp_dir.path().to_path_buf()); + let shielded_ctx = FsShieldedUtils::new(temp_dir.path().to_path_buf()); let (client, masp_tx_sender) = test_client(2.into()); let io = StdIo; let progress = DefaultTracker::new(&io); @@ -1799,19 +1809,19 @@ mod shielded_ctx_tests { // first fetch no blocks masp_tx_sender.send(None).expect("Test failed"); - shielded_ctx - .fetch::<_, _, _, TestingMaspClient<'_>>( - &client, - &progress, - RetryStrategy::Times(1), - None, - None, - 0, - &[], - &[vk], - ) - .await - .unwrap_err(); + fetch_shielded_ctx::<_, _, _, TestingMaspClient<'_>, _>( + &shielded_ctx.utils, + &client, + &progress, + RetryStrategy::Times(1), + None, + None, + 0, + &[], + &[vk], + ) + .await + .unwrap_err(); assert_eq!(progress.left_to_fetch(), 2); // fetch one of the two blocks @@ -1826,55 +1836,55 @@ mod shielded_ctx_tests { ))) .expect("Test failed"); masp_tx_sender.send(None).expect("Test failed"); - shielded_ctx - .fetch::<_, _, _, TestingMaspClient<'_>>( - &client, - &progress, - RetryStrategy::Times(1), - None, - None, - 0, - &[], - &[vk], - ) - .await - .unwrap_err(); + fetch_shielded_ctx::<_, _, _, TestingMaspClient<'_>, _>( + &shielded_ctx.utils, + &client, + &progress, + RetryStrategy::Times(1), + None, + None, + 0, + &[], + &[vk], + ) + .await + .unwrap_err(); assert_eq!(progress.left_to_fetch(), 1); // fetch no blocks masp_tx_sender.send(None).expect("Test failed"); - shielded_ctx - .fetch::<_, _, _, TestingMaspClient<'_>>( - &client, - &progress, - RetryStrategy::Times(1), - None, - None, - 0, - &[], - &[vk], - ) - .await - .unwrap_err(); + fetch_shielded_ctx::<_, _, _, TestingMaspClient<'_>, _>( + &shielded_ctx.utils, + &client, + &progress, + RetryStrategy::Times(1), + None, + None, + 0, + &[], + &[vk], + ) + .await + .unwrap_err(); assert_eq!(progress.left_to_fetch(), 1); // fetch no blocks, but increase the latest block height // thus the amount left to fetch should increase let (client, masp_tx_sender) = test_client(3.into()); masp_tx_sender.send(None).expect("Test failed"); - shielded_ctx - .fetch::<_, _, _, TestingMaspClient<'_>>( - &client, - &progress, - RetryStrategy::Times(1), - None, - None, - 0, - &[], - &[vk], - ) - .await - .unwrap_err(); + fetch_shielded_ctx::<_, _, _, TestingMaspClient<'_>, _>( + &shielded_ctx.utils, + &client, + &progress, + RetryStrategy::Times(1), + None, + None, + 0, + &[], + &[vk], + ) + .await + .unwrap_err(); assert_eq!(progress.left_to_fetch(), 2); // fetch remaining block @@ -1901,19 +1911,19 @@ mod shielded_ctx_tests { // this should not produce an error since we have fetched // all expected blocks masp_tx_sender.send(None).expect("Test failed"); - shielded_ctx - .fetch::<_, _, _, TestingMaspClient<'_>>( - &client, - &progress, - RetryStrategy::Times(1), - None, - None, - 0, - &[], - &[vk], - ) - .await - .expect("Test failed"); + fetch_shielded_ctx::<_, _, _, TestingMaspClient<'_>, _>( + &shielded_ctx.utils, + &client, + &progress, + RetryStrategy::Times(1), + None, + None, + 0, + &[], + &[vk], + ) + .await + .expect("Test failed"); assert_eq!(progress.left_to_fetch(), 0); } @@ -1957,19 +1967,19 @@ mod shielded_ctx_tests { ))) .expect("Test failed"); - shielded_ctx - .fetch::<_, _, _, TestingMaspClient<'_>>( - &client, - &progress, - RetryStrategy::Times(2), - None, - None, - 0, - &[], - &[vk], - ) - .await - .expect("Test failed"); + fetch_shielded_ctx::<_, _, _, TestingMaspClient<'_>, _>( + &shielded_ctx.utils, + &client, + &progress, + RetryStrategy::Times(2), + None, + None, + 0, + &[], + &[vk], + ) + .await + .expect("Test failed"); shielded_ctx.load_confirmed().await.expect("Test failed"); let keys = shielded_ctx @@ -2021,19 +2031,19 @@ mod shielded_ctx_tests { masp_tx_sender.send(None).expect("Test failed"); // we expect this to fail. - let result = shielded_ctx - .fetch::<_, _, _, TestingMaspClient<'_>>( - &client, - &progress, - RetryStrategy::Times(1), - None, - None, - 0, - &[], - &[vk], - ) - .await - .unwrap_err(); + let result = fetch_shielded_ctx::<_, _, _, TestingMaspClient<'_>, _>( + &shielded_ctx.utils, + &client, + &progress, + RetryStrategy::Times(1), + None, + None, + 0, + &[], + &[vk], + ) + .await + .unwrap_err(); match result { Error::Other(msg) => assert_eq!( msg.as_str(), @@ -2071,25 +2081,31 @@ mod shielded_ctx_tests { masp_tx_sender.send(None).expect("Test failed"); // we expect this to fail. - shielded_ctx - .fetch::<_, _, _, TestingMaspClient<'_>>( - &client, - &progress, - RetryStrategy::Times(1), - None, - None, - 0, - &[], - &[vk], - ) - .await - .unwrap_err(); + fetch_shielded_ctx::<_, _, _, TestingMaspClient<'_>, _>( + &shielded_ctx.utils, + &client, + &progress, + RetryStrategy::Times(1), + None, + None, + 0, + &[], + &[vk], + ) + .await + .unwrap_err(); // because of an error in scanning, there should be elements // in the decrypted cache. shielded_ctx.load_confirmed().await.expect("Test failed"); let result: HashMap<(IndexedTx, ViewingKey), DecryptedData> = - shielded_ctx.decrypted_note_cache.drain().collect(); + shielded_ctx + .decrypted_note_cache + .drain() + .map(|(indexed_tx, viewing_key, decrypted_data)| { + ((indexed_tx, viewing_key), decrypted_data) + }) + .collect(); // unfortunately we cannot easily assert what will be in this // cache as scanning is done in parallel, introducing non-determinism assert!(!result.is_empty()); diff --git a/crates/sdk/src/masp/types.rs b/crates/sdk/src/masp/types.rs index 89ee5bda32..45f3fa432a 100644 --- a/crates/sdk/src/masp/types.rs +++ b/crates/sdk/src/masp/types.rs @@ -1,7 +1,7 @@ //! The public types for using the MASP tooling use std::collections::{BTreeMap, BTreeSet}; use std::io::{Read, Write}; -use std::sync::{Arc, Mutex}; +use std::sync::{Arc, Mutex, RwLock}; use borsh_ext::BorshSerializeExt; use masp_primitives::asset_type::AssetType; @@ -24,6 +24,7 @@ use namada_core::address::Address; use namada_core::borsh::{BorshDeserialize, BorshSerialize}; use namada_core::collections::HashMap; use namada_core::dec::Dec; +use namada_core::hash::Hash; use namada_core::storage::{BlockHeight, Epoch}; use namada_core::uint::Uint; use namada_macros::BorshDeserializer; @@ -176,7 +177,13 @@ impl ScannedData { for (k, v) in self.memo_map.drain(..) { ctx.memo_map.insert(k, v); } - ctx.decrypted_note_cache.merge(self.decrypted_note_cache); + // NB: the `decrypted_note_cache` is not carried over + // from `self` because it is assumed they are pointing + // to the same underlying `Arc` + debug_assert_eq!( + Arc::as_ptr(&ctx.decrypted_note_cache.inner), + Arc::as_ptr(&self.decrypted_note_cache.inner), + ); } /// Merge to different instances of `Self`. @@ -202,9 +209,13 @@ impl ScannedData { for (k, v) in other.memo_map.drain(..) { self.memo_map.insert(k, v); } - for (k, v) in other.decrypted_note_cache.inner { - self.decrypted_note_cache.insert(k, v); - } + // NB: the `decrypted_note_cache` is not carried over + // from `other` because it is assumed they are pointing + // to the same underlying `Arc` + debug_assert_eq!( + Arc::as_ptr(&other.decrypted_note_cache.inner), + Arc::as_ptr(&self.decrypted_note_cache.inner), + ); } } @@ -225,43 +236,60 @@ pub struct DecryptedData { /// A cache of decrypted txs that have not yet been /// updated to the shielded ctx. Necessary in case /// scanning gets interrupted. -#[derive(Debug, Clone, Default, BorshSerialize, BorshDeserialize)] +#[derive(Debug, Clone, Default)] +#[allow(clippy::type_complexity)] pub struct DecryptedDataCache { - inner: HashMap<(IndexedTx, ViewingKey), DecryptedData>, + inner: Arc>>, +} + +impl BorshSerialize for DecryptedDataCache { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + let locked = self.inner.read().unwrap(); + locked.serialize(writer) + } +} + +impl BorshDeserialize for DecryptedDataCache { + fn deserialize_reader(reader: &mut R) -> std::io::Result { + let inner = BorshDeserialize::deserialize_reader(reader)?; + Ok(Self { + inner: Arc::new(RwLock::new(inner)), + }) + } } impl DecryptedDataCache { /// Add an entry to the cache pub fn insert( - &mut self, - key: (IndexedTx, ViewingKey), - value: DecryptedData, + &self, + indexed_tx: IndexedTx, + viewing_key: ViewingKey, + decrypted_data: DecryptedData, ) { - self.inner.insert(key, value); - } - - /// Merge another cache into `self`. - pub fn merge(&mut self, mut other: Self) { - for (k, v) in other.inner.drain(..) { - self.insert(k, v); - } + let mut locked = self.inner.write().unwrap(); + let key = Hash::sha256_borsh(&(&indexed_tx, &viewing_key)); + let value = (indexed_tx, viewing_key, decrypted_data); + locked.insert(key, value); } /// Check if the cache already contains an entry for a given IndexedTx and /// viewing key. - pub fn contains(&self, ix: &IndexedTx, vk: &ViewingKey) -> bool { - self.inner - .keys() - .find_map(|(i, v)| (i == ix && v == vk).then_some(())) - .is_some() + pub fn contains( + &self, + indexed_tx: &IndexedTx, + viewing_key: &ViewingKey, + ) -> bool { + let key = Hash::sha256_borsh(&(&indexed_tx, &viewing_key)); + let locked = self.inner.read().unwrap(); + locked.contains_key(&key) } /// Return an iterator over the cache that consumes it. pub fn drain( - &mut self, - ) -> impl Iterator + '_ - { - self.inner.drain(..) + &self, + ) -> impl Iterator { + let mut locked = self.inner.write().unwrap(); + std::mem::take(&mut *locked).into_values() } } From 1d5e74057e01c56907f8a1d709178f0dff69070c Mon Sep 17 00:00:00 2001 From: satan Date: Sat, 1 Jun 2024 13:46:10 +0200 Subject: [PATCH 26/29] [chore]: Added tests to ensure that trial decryption caching is actually used --- crates/apps_lib/src/client/masp.rs | 2 +- crates/sdk/src/masp/shielded_ctx.rs | 187 +++++++++++++++++++++++----- crates/sdk/src/masp/test_utils.rs | 13 +- crates/sdk/src/masp/types.rs | 28 ++--- crates/sdk/src/masp/utils.rs | 89 ++++++++----- 5 files changed, 238 insertions(+), 81 deletions(-) diff --git a/crates/apps_lib/src/client/masp.rs b/crates/apps_lib/src/client/masp.rs index b3a56187e1..d715b2ddae 100644 --- a/crates/apps_lib/src/client/masp.rs +++ b/crates/apps_lib/src/client/masp.rs @@ -40,7 +40,7 @@ pub async fn syncing< display_line!(io, "\n\n"); let logger = CliProgressTracker::new(io); let sync_result_fut = async move { - fetch_shielded_ctx::<_, _, _, LedgerMaspClient<'_, C>, _>( + fetch_shielded_ctx::<_, _, _, _, LedgerMaspClient<'_, C>>( shielded_utils, client, &logger, diff --git a/crates/sdk/src/masp/shielded_ctx.rs b/crates/sdk/src/masp/shielded_ctx.rs index 04ceebff0d..06f09f8a38 100644 --- a/crates/sdk/src/masp/shielded_ctx.rs +++ b/crates/sdk/src/masp/shielded_ctx.rs @@ -50,7 +50,7 @@ use crate::masp::types::{ Unscanned, WalletMap, }; use crate::masp::utils::{ - cloned_pair, fetch_channel, is_amount_required, to_viewing_key, + cloned_pair, fetch_channel, is_amount_required, to_viewing_key, Action, FetchQueueSender, MaspClient, ProgressTracker, RetryStrategy, ShieldedUtils, TaskManager, }; @@ -1282,7 +1282,7 @@ impl ShieldedContext { &vk, )?; scanned_data.merge(scanned); - scanned_data.decrypted_note_cache.insert( + self.decrypted_note_cache.insert( indexed_tx, vk, DecryptedData { @@ -1291,11 +1291,9 @@ impl ShieldedContext { }, ); } - let mut temp_cache = DecryptedDataCache::default(); - std::mem::swap(&mut temp_cache, &mut self.decrypted_note_cache); + scanned_data.apply_to(self); self.nullify_spent_notes()?; - std::mem::swap(&mut temp_cache, &mut self.decrypted_note_cache); // Save the speculative state for future usage self.save().await.map_err(|e| Error::Other(e.to_string()))?; @@ -1366,22 +1364,58 @@ impl ShieldedContext { /// [`ShieldedContext`]. #[allow(clippy::too_many_arguments)] #[cfg(not(target_family = "wasm"))] -pub async fn fetch_shielded_ctx<'client, C, IO, T, M, U>( +pub async fn fetch_shielded_ctx<'client, C, IO, U, TRACKER, MC>( + utils: &U, + client: &'client C, + progress: &TRACKER, + retry: RetryStrategy, + start_query_height: Option, + last_query_height: Option, + _batch_size: u64, + sks: &[ExtendedSpendingKey], + fvks: &[ViewingKey], +) -> Result<(), Error> +where + C: Client + Sync, + IO: Io + Send + Sync, + TRACKER: ProgressTracker + Sync, + MC: MaspClient<'client, C> + 'client, + U: ShieldedUtils + Send + Sync, +{ + fetch_shielded_aux::<_, _, _, _, MC>( + utils, + client, + progress, + retry, + start_query_height, + last_query_height, + _batch_size, + sks, + fvks, + None, + ) + .await +} + +#[allow(clippy::too_many_arguments)] +#[cfg(not(target_family = "wasm"))] +async fn fetch_shielded_aux<'client, C, IO, U, TRACKER, MC>( utils: &U, client: &'client C, - progress: &T, + progress: &TRACKER, retry: RetryStrategy, start_query_height: Option, last_query_height: Option, _batch_size: u64, sks: &[ExtendedSpendingKey], fvks: &[ViewingKey], + callback: Option, ) -> Result<(), Error> where C: Client + Sync, IO: Io + Send + Sync, - T: ProgressTracker + Sync, - M: MaspClient<'client, C> + 'client, + TRACKER: ProgressTracker + Sync, + MC: MaspClient<'client, C> + 'client, U: ShieldedUtils + Send + Sync, { // Reload the state from file to get the last confirmed state and @@ -1431,7 +1465,7 @@ where let last_query_height = std::cmp::min(last_query_height, last_block_height); // Update the commitment tree and witnesses - ctx.update_witness_map::<_, _, M>( + ctx.update_witness_map::<_, _, MC>( client, progress.io(), last_witnessed_tx.unwrap_or_default(), @@ -1444,7 +1478,8 @@ where // communicate errors and actions (such as saving and updating state). // The task manager runs on the main thread and performs the tasks // scheduled by the scheduler. - let (task_scheduler, mut task_manager) = TaskManager::::new(ctx.clone()); + let (task_scheduler, mut task_manager) = + TaskManager::::new(ctx.clone(), callback); // The main loop that performs // * fetching and caching MASP txs in sequence @@ -1467,10 +1502,9 @@ where // YOU COULD ACCIDENTALLY FREEZE EVERYTHING let txs = progress.scan(fetch_recv); txs.par_bridge().try_for_each(|(indexed_tx, stx)| { - let mut new_scanned_data = ScannedData { - decrypted_note_cache: ctx.decrypted_note_cache.clone(), - ..ScannedData::default() - }; + let decrypted_note_cache = ctx.decrypted_note_cache.clone(); + let mut new_scanned_data = ScannedData::default(); + for (vk, _) in vk_heights .iter() .filter(|(_vk, h)| **h < Some(indexed_tx)) @@ -1491,7 +1525,7 @@ where // add the new state changes to the aggregated new_scanned_data.merge(scanned); // add the note to the cache - new_scanned_data.decrypted_note_cache.insert( + decrypted_note_cache.insert( indexed_tx, *vk, DecryptedData { @@ -1515,7 +1549,11 @@ where tokio::runtime::Handle::current().block_on(async { tokio::join!( task_manager.run(), - >::fetch_shielded_transfers::<_, _, M>( + >::fetch_shielded_transfers::< + _, + _, + MC, + >( fetch_send, client, progress, @@ -1571,6 +1609,7 @@ async fn load_shielded_ctx( mod shielded_ctx_tests { use core::str::FromStr; + use assert_matches::assert_matches; use masp_primitives::zip32::ExtendedFullViewingKey; use namada_core::masp::ExtendedViewingKey; use namada_core::storage::TxIndex; @@ -1581,7 +1620,8 @@ mod shielded_ctx_tests { use crate::io::StdIo; use crate::masp::fs::FsShieldedUtils; use crate::masp::test_utils::{ - test_client, TestUnscannedTracker, TestingMaspClient, + publish_message, test_client, TestUnscannedTracker, TestingMaspClient, + RECEIVED, }; use crate::masp::utils::{DefaultTracker, RetryStrategy}; @@ -1699,7 +1739,7 @@ mod shielded_ctx_tests { // we first test that with no retries, a fetching failure // stops process - let result = fetch_shielded_ctx::<_, _, _, TestingMaspClient<'_>, _>( + let result = fetch_shielded_ctx::<_, _, _, _, TestingMaspClient<'_>>( &shielded_ctx.utils, &client, &progress, @@ -1746,7 +1786,7 @@ mod shielded_ctx_tests { .expect("Test failed"); // This should complete successfully - fetch_shielded_ctx::<_, _, _, TestingMaspClient<'_>, _>( + fetch_shielded_ctx::<_, _, _, _, TestingMaspClient<'_>>( &shielded_ctx.utils, &client, &progress, @@ -1809,7 +1849,7 @@ mod shielded_ctx_tests { // first fetch no blocks masp_tx_sender.send(None).expect("Test failed"); - fetch_shielded_ctx::<_, _, _, TestingMaspClient<'_>, _>( + fetch_shielded_ctx::<_, _, _, _, TestingMaspClient<'_>>( &shielded_ctx.utils, &client, &progress, @@ -1836,7 +1876,7 @@ mod shielded_ctx_tests { ))) .expect("Test failed"); masp_tx_sender.send(None).expect("Test failed"); - fetch_shielded_ctx::<_, _, _, TestingMaspClient<'_>, _>( + fetch_shielded_ctx::<_, _, _, _, TestingMaspClient<'_>>( &shielded_ctx.utils, &client, &progress, @@ -1853,7 +1893,7 @@ mod shielded_ctx_tests { // fetch no blocks masp_tx_sender.send(None).expect("Test failed"); - fetch_shielded_ctx::<_, _, _, TestingMaspClient<'_>, _>( + fetch_shielded_ctx::<_, _, _, _, TestingMaspClient<'_>>( &shielded_ctx.utils, &client, &progress, @@ -1872,7 +1912,7 @@ mod shielded_ctx_tests { // thus the amount left to fetch should increase let (client, masp_tx_sender) = test_client(3.into()); masp_tx_sender.send(None).expect("Test failed"); - fetch_shielded_ctx::<_, _, _, TestingMaspClient<'_>, _>( + fetch_shielded_ctx::<_, _, _, _, TestingMaspClient<'_>>( &shielded_ctx.utils, &client, &progress, @@ -1911,7 +1951,7 @@ mod shielded_ctx_tests { // this should not produce an error since we have fetched // all expected blocks masp_tx_sender.send(None).expect("Test failed"); - fetch_shielded_ctx::<_, _, _, TestingMaspClient<'_>, _>( + fetch_shielded_ctx::<_, _, _, _, TestingMaspClient<'_>>( &shielded_ctx.utils, &client, &progress, @@ -1967,7 +2007,7 @@ mod shielded_ctx_tests { ))) .expect("Test failed"); - fetch_shielded_ctx::<_, _, _, TestingMaspClient<'_>, _>( + fetch_shielded_ctx::<_, _, _, _, TestingMaspClient<'_>>( &shielded_ctx.utils, &client, &progress, @@ -2001,7 +2041,7 @@ mod shielded_ctx_tests { /// Test that we cache and persist trial-decryptions /// when the scanning process does not complete successfully. #[tokio::test(flavor = "multi_thread", worker_threads = 2)] - async fn test_decrypted_cache() { + async fn test_decrypted_cache_persisted() { let temp_dir = tempdir().unwrap(); let mut shielded_ctx = FsShieldedUtils::new(temp_dir.path().to_path_buf()); @@ -2031,7 +2071,7 @@ mod shielded_ctx_tests { masp_tx_sender.send(None).expect("Test failed"); // we expect this to fail. - let result = fetch_shielded_ctx::<_, _, _, TestingMaspClient<'_>, _>( + let result = fetch_shielded_ctx::<_, _, _, _, TestingMaspClient<'_>>( &shielded_ctx.utils, &client, &progress, @@ -2081,7 +2121,7 @@ mod shielded_ctx_tests { masp_tx_sender.send(None).expect("Test failed"); // we expect this to fail. - fetch_shielded_ctx::<_, _, _, TestingMaspClient<'_>, _>( + fetch_shielded_ctx::<_, _, _, _, TestingMaspClient<'_>>( &shielded_ctx.utils, &client, &progress, @@ -2110,4 +2150,93 @@ mod shielded_ctx_tests { // cache as scanning is done in parallel, introducing non-determinism assert!(!result.is_empty()); } + + /// Test that we cache trial-decryptions so that we don't try to + /// trial decrypt the same note twice. + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] + async fn test_decrypted_cache() { + // make sure that no unintended messages are contained. + RECEIVED.lock().unwrap().clear(); + + let temp_dir = tempdir().unwrap(); + let mut shielded_ctx = + FsShieldedUtils::new(temp_dir.path().to_path_buf()); + let (client, masp_tx_sender) = test_client(1.into()); + let io = StdIo; + let progress = DefaultTracker::new(&io); + let vk = ExtendedFullViewingKey::from( + ExtendedViewingKey::from_str(AA_VIEWING_KEY).expect("Test failed"), + ) + .fvk + .vk; + + // Put a note into the decrypted cache + let masp_tx = arbitrary_masp_tx(); + shielded_ctx.decrypted_note_cache.insert( + IndexedTx { + height: 1.into(), + index: TxIndex(1), + inner_tx: Default::default(), + }, + vk, + DecryptedData { + tx: masp_tx.clone(), + delta: Default::default(), + }, + ); + shielded_ctx.save().await.expect("Test failed"); + shielded_ctx.load_confirmed().await.expect("Test failed"); + assert!(shielded_ctx.decrypted_note_cache.contains( + &IndexedTx { + height: 1.into(), + index: TxIndex(1), + inner_tx: Default::default(), + }, + &vk + )); + masp_tx_sender + .send(Some(( + IndexedTx { + height: 1.into(), + index: TxIndex(1), + inner_tx: Default::default(), + }, + masp_tx.clone(), + ))) + .expect("Test failed"); + + // we expect this to succeed. + fetch_shielded_aux::<_, _, _, _, TestingMaspClient<'_>>( + &shielded_ctx.utils, + &client, + &progress, + RetryStrategy::Times(1), + None, + None, + 0, + &[], + &[vk], + Some(publish_message), + ) + .await + .expect("Test failed"); + + // reload the shielded context + shielded_ctx.load_confirmed().await.expect("Test failed"); + assert!(shielded_ctx.decrypted_note_cache.is_empty()); + + let received = std::mem::take(&mut *RECEIVED.lock().unwrap()); + let _action_data = Action::Data( + ScannedData::default(), + IndexedTx { + height: 1.into(), + index: TxIndex(1), + inner_tx: Default::default(), + }, + ); + assert_matches!( + received.as_slice(), + [_action_data, Action::Complete { with_error: false }] + ) + } } diff --git a/crates/sdk/src/masp/test_utils.rs b/crates/sdk/src/masp/test_utils.rs index 1658bb6ac6..6dd5e0bb66 100644 --- a/crates/sdk/src/masp/test_utils.rs +++ b/crates/sdk/src/masp/test_utils.rs @@ -2,6 +2,7 @@ use std::collections::BTreeMap; use std::ops::{Deref, DerefMut}; use std::sync::{Arc, Mutex}; +use lazy_static::lazy_static; use masp_primitives::merkle_tree::CommitmentTree; use masp_primitives::sapling::Node; use namada_core::storage::BlockHeight; @@ -13,13 +14,18 @@ use crate::error::Error; use crate::io::Io; use crate::masp::types::IndexedNoteEntry; use crate::masp::utils::{ - CommitmentTreeUpdates, FetchQueueSender, IterProgress, MaspClient, + Action, CommitmentTreeUpdates, FetchQueueSender, IterProgress, MaspClient, PeekableIter, ProgressTracker, }; use crate::masp::{ShieldedContext, ShieldedUtils}; use crate::queries::testing::TestClient; use crate::queries::{Client, EncodedResponseQuery, Rpc, RPC}; +lazy_static! { + /// N.B. Don't share me among tests running in parallel + pub(super) static ref RECEIVED: Arc>> = Arc::new(Mutex::new(vec![])); +} + /// A client for testing the shielded-sync functionality pub struct TestingClient { /// An actual mocked client for querying @@ -171,6 +177,11 @@ impl<'a> MaspClient<'a, TestingClient> for TestingMaspClient<'a> { } } +/// Publish a message to the task manager into a globally readable buffer +pub(super) fn publish_message(action: &Action) { + RECEIVED.lock().unwrap().push(action.clone()); +} + /// An iterator that yields its first element only struct YieldOnceIterator { first: Option, diff --git a/crates/sdk/src/masp/types.rs b/crates/sdk/src/masp/types.rs index 45f3fa432a..50d3219ffc 100644 --- a/crates/sdk/src/masp/types.rs +++ b/crates/sdk/src/masp/types.rs @@ -134,12 +134,12 @@ pub struct MaspChange { pub change: token::Change, } -#[derive(Debug, Default)] /// Data returned by successfully scanning a tx /// /// This is append-only data that will be sent /// to a [`TaskManager`] to be applied to the /// shielded context. +#[derive(Debug, Clone, Default)] pub(super) struct ScannedData { pub div_map: HashMap, pub memo_map: HashMap, @@ -147,7 +147,6 @@ pub(super) struct ScannedData { pub nf_map: HashMap, pub pos_map: HashMap>, pub vk_map: HashMap, - pub decrypted_note_cache: DecryptedDataCache, } impl ScannedData { @@ -177,16 +176,10 @@ impl ScannedData { for (k, v) in self.memo_map.drain(..) { ctx.memo_map.insert(k, v); } - // NB: the `decrypted_note_cache` is not carried over - // from `self` because it is assumed they are pointing - // to the same underlying `Arc` - debug_assert_eq!( - Arc::as_ptr(&ctx.decrypted_note_cache.inner), - Arc::as_ptr(&self.decrypted_note_cache.inner), - ); } /// Merge to different instances of `Self`. + /// This keeps the pointer pub(super) fn merge(&mut self, mut other: Self) { for (k, v) in other.note_map.drain(..) { self.note_map.insert(k, v); @@ -209,13 +202,6 @@ impl ScannedData { for (k, v) in other.memo_map.drain(..) { self.memo_map.insert(k, v); } - // NB: the `decrypted_note_cache` is not carried over - // from `other` because it is assumed they are pointing - // to the same underlying `Arc` - debug_assert_eq!( - Arc::as_ptr(&other.decrypted_note_cache.inner), - Arc::as_ptr(&self.decrypted_note_cache.inner), - ); } } @@ -291,6 +277,16 @@ impl DecryptedDataCache { let mut locked = self.inner.write().unwrap(); std::mem::take(&mut *locked).into_values() } + + /// Get the size of the current cache + pub fn len(&self) -> usize { + self.inner.read().unwrap().len() + } + + /// Check if cache is empty + pub fn is_empty(&self) -> bool { + self.len() == 0 + } } /// A cache of fetched indexed transactions. diff --git a/crates/sdk/src/masp/utils.rs b/crates/sdk/src/masp/utils.rs index 70416ff679..1de7b73e39 100644 --- a/crates/sdk/src/masp/utils.rs +++ b/crates/sdk/src/masp/utils.rs @@ -568,7 +568,8 @@ pub mod fetch_channel { /// The actions that the scanning process can /// schedule to be run on the main thread. #[allow(clippy::large_enum_variant)] -enum Action { +#[derive(Debug, Clone)] +pub(super) enum Action { /// Signal that the scanning process has ended and if it did so with /// an error Complete { with_error: bool }, @@ -583,8 +584,10 @@ enum Action { /// schedules. pub struct TaskManager { action: Receiver, - pub(super) latest_idx: IndexedTx, + latest_idx: IndexedTx, ctx: Arc>>, + // for testing purposes + callback: Option, } #[derive(Clone)] @@ -600,7 +603,10 @@ pub(super) struct TaskScheduler { impl TaskManager { /// Create a new [`TaskManage`] and a [`TaskScheduler`] which can be used /// to schedule tasks to be run by the manager. - pub(super) fn new(ctx: ShieldedContext) -> (TaskScheduler, Self) { + pub(super) fn new( + ctx: ShieldedContext, + callback: Option, + ) -> (TaskScheduler, Self) { let (action_send, action_recv) = tokio::sync::mpsc::channel(100); ( TaskScheduler { @@ -611,6 +617,7 @@ impl TaskManager { action: action_recv, latest_idx: Default::default(), ctx: Arc::new(futures_locks::Mutex::new(ctx)), + callback, }, ) } @@ -619,42 +626,56 @@ impl TaskManager { /// that process indicates it has finished. pub async fn run(&mut self) -> Result<(), Error> { while let Some(action) = self.action.recv().await { - match action { - // On completion, update the height to which all keys have been - // synced and then save. - Action::Complete { with_error } => { - if !with_error { - let mut locked = self.ctx.lock().await; - // possibly remove unneeded elements from the cache. - locked.unscanned.scanned(&self.latest_idx); - // update each key to be synced to the latest scanned - // height. - for (_, h) in locked.vk_heights.iter_mut() { - // Due to a failure to fetch new blocks, we - // may not have made scanning progress. Hence - // the max computation. - *h = std::cmp::max(*h, Some(self.latest_idx)); - } - // updated the spent notes and balances - locked.nullify_spent_notes()?; - _ = locked.save().await; - } - return Ok(()); - } - Action::Data(scanned, idx) => { - // track the latest scanned height. Due to parallelism, - // these won't come in ascending order, thus we should - // track the maximum seen. - self.latest_idx = std::cmp::max(self.latest_idx, idx); - // apply state changes from the scanning process + if let Some(f) = self.callback { + f(&action) + } + let finished = self.dispatch_action(action).await?; + if finished { + return Ok(()); + } + } + Ok(()) + } + + pub(super) async fn dispatch_action( + &mut self, + action: Action, + ) -> Result { + match action { + // On completion, update the height to which all keys have been + // synced and then save. + Action::Complete { with_error } => { + if !with_error { let mut locked = self.ctx.lock().await; - scanned.apply_to(&mut locked); - // persist the changes + // possibly remove unneeded elements from the cache. + locked.unscanned.scanned(&self.latest_idx); + // update each key to be synced to the latest scanned + // height. + for (_, h) in locked.vk_heights.iter_mut() { + // Due to a failure to fetch new blocks, we + // may not have made scanning progress. Hence + // the max computation. + *h = std::cmp::max(*h, Some(self.latest_idx)); + } + // updated the spent notes and balances + locked.nullify_spent_notes()?; _ = locked.save().await; } + Ok(true) + } + Action::Data(scanned, idx) => { + // track the latest scanned height. Due to parallelism, + // these won't come in ascending order, thus we should + // track the maximum seen. + self.latest_idx = std::cmp::max(self.latest_idx, idx); + // apply state changes from the scanning process + let mut locked = self.ctx.lock().await; + scanned.apply_to(&mut locked); + // persist the changes + _ = locked.save().await; + Ok(false) } } - Ok(()) } } From 1539e09667dcebd6cc667e6b1922676bdaf2dd14 Mon Sep 17 00:00:00 2001 From: satan Date: Mon, 3 Jun 2024 18:31:08 +0200 Subject: [PATCH 27/29] Fixed a clone implementation on tempdir in benchmarks --- crates/node/src/bench_utils.rs | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/crates/node/src/bench_utils.rs b/crates/node/src/bench_utils.rs index ab7b10c97a..2e5efe365b 100644 --- a/crates/node/src/bench_utils.rs +++ b/crates/node/src/bench_utils.rs @@ -10,7 +10,7 @@ use std::io::Write; use std::ops::{Deref, DerefMut}; use std::path::PathBuf; use std::str::FromStr; -use std::sync::Once; +use std::sync::{Arc, Once}; use borsh::{BorshDeserialize, BorshSerialize}; use borsh_ext::BorshSerializeExt; @@ -650,20 +650,14 @@ pub struct BenchShieldedCtx { pub wallet: Wallet, } -#[derive(Debug)] -struct WrapperTempDir(TempDir); +#[derive(Debug, Clone)] +struct WrapperTempDir(Arc); // Mock the required traits for ShieldedUtils impl Default for WrapperTempDir { fn default() -> Self { - Self(TempDir::new().unwrap()) - } -} - -impl Clone for WrapperTempDir { - fn clone(&self) -> Self { - Self(TempDir::new().unwrap()) + Self(Arc::new(TempDir::new().unwrap())) } } From 5bc4268796315defe427cba52478e37a6369711a Mon Sep 17 00:00:00 2001 From: satan Date: Mon, 3 Jun 2024 21:09:18 +0200 Subject: [PATCH 28/29] rebasing --- Cargo.lock | 13 - crates/sdk/src/masp/mod.rs | 407 +++++++++++----------------- crates/sdk/src/masp/shielded_ctx.rs | 205 +++++++------- crates/sdk/src/masp/types.rs | 20 +- crates/sdk/src/masp/utils.rs | 214 ++++++--------- wasm/Cargo.lock | 37 ++- wasm_for_tests/Cargo.lock | 37 ++- 7 files changed, 420 insertions(+), 513 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8c0d52d47a..0c189121f3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -376,11 +376,7 @@ dependencies = [ "percent-encoding", "pin-project-lite", "rustversion", -<<<<<<< HEAD "serde", -======= - "serde 1.0.193", ->>>>>>> 7229d4d13 (Update Cargo.lock) "sync_wrapper 0.1.2", "tower", "tower-layer", @@ -5590,15 +5586,6 @@ dependencies = [ "getrandom 0.2.15", ] -[[package]] -name = "nanorand" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a51313c5820b0b02bd422f4b44776fbf47961755c74ce64afc73bfad10226c3" -dependencies = [ - "getrandom 0.2.11", -] - [[package]] name = "native-tls" version = "0.2.11" diff --git a/crates/sdk/src/masp/mod.rs b/crates/sdk/src/masp/mod.rs index dd67e537d8..1dd497dc4b 100644 --- a/crates/sdk/src/masp/mod.rs +++ b/crates/sdk/src/masp/mod.rs @@ -17,27 +17,23 @@ use lazy_static::lazy_static; use masp_primitives::consensus::MainNetwork as Network; #[cfg(not(feature = "mainnet"))] use masp_primitives::consensus::TestNetwork as Network; -use masp_primitives::group::GroupEncoding; -use masp_primitives::sapling::redjubjub::PublicKey; -use masp_primitives::transaction::components::transparent::builder::TransparentBuilder; -use masp_primitives::transaction::components::{ - ConvertDescription, I128Sum, OutputDescription, SpendDescription, TxOut, +use masp_primitives::transaction::components::sapling::{ + Authorized as SaplingAuthorized, Bundle as SaplingBundle, }; +use masp_primitives::transaction::components::transparent::builder::TransparentBuilder; +use masp_primitives::transaction::components::TxOut; use masp_primitives::transaction::sighash::{signature_hash, SignableInput}; use masp_primitives::transaction::txid::TxIdDigester; -use masp_primitives::transaction::{ - Authorization, Authorized, Transaction, TransactionData, -}; -use masp_proofs::bellman::groth16::PreparedVerifyingKey; -use masp_proofs::bls12_381::Bls12; +use masp_primitives::transaction::{Authorized, Transaction, TransactionData}; use masp_proofs::prover::LocalTxProver; -#[cfg(not(feature = "testing"))] -use masp_proofs::sapling::SaplingVerificationContext; +use masp_proofs::sapling::BatchValidator; +use namada_core::arith::checked; pub use namada_core::masp::{ encode_asset_type, AssetData, BalanceOwner, ExtendedViewingKey, PaymentAddress, TransferSource, TransferTarget, }; use namada_state::StorageError; +use rand_core::OsRng; pub use shielded_ctx::ShieldedContext; pub use types::PVKs; pub use utils::{ @@ -97,83 +93,13 @@ lazy_static! { convert_path.as_path(), ); PVKs { - spend_vk: params.spend_vk, - convert_vk: params.convert_vk, - output_vk: params.output_vk + spend_vk: params.spend_params.vk, + convert_vk: params.convert_params.vk, + output_vk: params.output_params.vk, } }; } -/// check_spend wrapper -pub fn check_spend( - spend: &SpendDescription<::SaplingAuth>, - sighash: &[u8; 32], - #[cfg(not(feature = "testing"))] ctx: &mut SaplingVerificationContext, - #[cfg(feature = "testing")] - ctx: &mut testing::MockSaplingVerificationContext, - parameters: &PreparedVerifyingKey, -) -> bool { - let zkproof = - masp_proofs::bellman::groth16::Proof::read(spend.zkproof.as_slice()); - let zkproof = match zkproof { - Ok(zkproof) => zkproof, - _ => return false, - }; - - ctx.check_spend( - spend.cv, - spend.anchor, - &spend.nullifier.0, - PublicKey(spend.rk.0), - sighash, - spend.spend_auth_sig, - zkproof, - parameters, - ) -} - -/// check_output wrapper -pub fn check_output( - output: &OutputDescription<<::SaplingAuth as masp_primitives::transaction::components::sapling::Authorization>::Proof>, - #[cfg(not(feature = "testing"))] ctx: &mut SaplingVerificationContext, - #[cfg(feature = "testing")] - ctx: &mut testing::MockSaplingVerificationContext, - parameters: &PreparedVerifyingKey, -) -> bool { - let zkproof = - masp_proofs::bellman::groth16::Proof::read(output.zkproof.as_slice()); - let zkproof = match zkproof { - Ok(zkproof) => zkproof, - _ => return false, - }; - let epk = - masp_proofs::jubjub::ExtendedPoint::from_bytes(&output.ephemeral_key.0); - let epk = match epk.into() { - Some(p) => p, - None => return false, - }; - - ctx.check_output(output.cv, output.cmu, epk, zkproof, parameters) -} - -/// check convert wrapper -pub fn check_convert( - convert: &ConvertDescription<<::SaplingAuth as masp_primitives::transaction::components::sapling::Authorization>::Proof>, - #[cfg(not(feature = "testing"))] ctx: &mut SaplingVerificationContext, - #[cfg(feature = "testing")] - ctx: &mut testing::MockSaplingVerificationContext, - parameters: &PreparedVerifyingKey, -) -> bool { - let zkproof = - masp_proofs::bellman::groth16::Proof::read(convert.zkproof.as_slice()); - let zkproof = match zkproof { - Ok(zkproof) => zkproof, - _ => return false, - }; - - ctx.check_convert(convert.cv, convert.anchor, zkproof, parameters) -} - /// Partially deauthorize the transparent bundle pub fn partial_deauthorize( tx_data: &TransactionData, @@ -210,10 +136,10 @@ pub fn partial_deauthorize( /// Verify a shielded transaction. pub fn verify_shielded_tx( transaction: &Transaction, - mut consume_verify_gas: F, + consume_verify_gas: F, ) -> Result<(), StorageError> where - F: FnMut(u64) -> std::result::Result<(), StorageError>, + F: Fn(u64) -> std::result::Result<(), StorageError>, { tracing::info!("entered verify_shielded_tx()"); @@ -250,49 +176,98 @@ where } = load_pvks(); #[cfg(not(feature = "testing"))] - let mut ctx = SaplingVerificationContext::new(true); + let mut ctx = BatchValidator::new(); #[cfg(feature = "testing")] - let mut ctx = testing::MockSaplingVerificationContext::new(true); - for spend in &sapling_bundle.shielded_spends { - consume_verify_gas(namada_gas::MASP_VERIFY_SPEND_GAS)?; - if !check_spend(spend, sighash.as_ref(), &mut ctx, spend_vk) { - return Err(StorageError::SimpleMessage("Invalid shielded spend")); - } - } - for convert in &sapling_bundle.shielded_converts { - consume_verify_gas(namada_gas::MASP_VERIFY_CONVERT_GAS)?; - if !check_convert(convert, &mut ctx, convert_vk) { - return Err(StorageError::SimpleMessage( - "Invalid shielded conversion", - )); - } + let mut ctx = testing::MockBatchValidator::default(); + + // Charge gas before check bundle + charge_masp_check_bundle_gas(sapling_bundle, &consume_verify_gas)?; + + if !ctx.check_bundle(sapling_bundle.to_owned(), sighash.as_ref().to_owned()) + { + tracing::debug!("failed check bundle"); + return Err(StorageError::SimpleMessage("Invalid sapling bundle")); } - for output in &sapling_bundle.shielded_outputs { - consume_verify_gas(namada_gas::MASP_VERIFY_OUTPUT_GAS)?; - if !check_output(output, &mut ctx, output_vk) { - return Err(StorageError::SimpleMessage("Invalid shielded output")); - } + tracing::debug!("passed check bundle"); + + // Charge gas before final validation + charge_masp_validate_gas(sapling_bundle, consume_verify_gas)?; + if !ctx.validate(spend_vk, convert_vk, output_vk, OsRng) { + return Err(StorageError::SimpleMessage( + "Invalid proofs or signatures", + )); } + Ok(()) +} - tracing::info!("passed spend/output verification"); +fn charge_masp_check_bundle_gas( + sapling_bundle: &SaplingBundle, + consume_verify_gas: F, +) -> Result<(), namada_state::StorageError> +where + F: Fn(u64) -> std::result::Result<(), namada_state::StorageError>, +{ + consume_verify_gas(checked!( + (sapling_bundle.shielded_spends.len() as u64) + * namada_gas::MASP_SPEND_CHECK_GAS + )?)?; + + consume_verify_gas(checked!( + (sapling_bundle.shielded_converts.len() as u64) + * namada_gas::MASP_CONVERT_CHECK_GAS + )?)?; + + consume_verify_gas(checked!( + (sapling_bundle.shielded_outputs.len() as u64) + * namada_gas::MASP_OUTPUT_CHECK_GAS + )?) +} - let assets_and_values: I128Sum = sapling_bundle.value_balance.clone(); +fn charge_masp_validate_gas( + sapling_bundle: &SaplingBundle, + consume_verify_gas: F, +) -> Result<(), namada_state::StorageError> +where + F: Fn(u64) -> std::result::Result<(), namada_state::StorageError>, +{ + consume_verify_gas(checked!( + ((sapling_bundle.shielded_spends.len() as u64) + 1) + * namada_gas::MASP_VERIFY_SIG_GAS + )?)?; + + // If at least one note is present charge the fixed costs. Then charge the + // variable cost for every other note, amortized on the fixed expected + // number of cores + if let Some(remaining_notes) = + sapling_bundle.shielded_spends.len().checked_sub(1) + { + consume_verify_gas(namada_gas::MASP_FIXED_SPEND_GAS)?; + consume_verify_gas(checked!( + namada_gas::MASP_VARIABLE_SPEND_GAS * remaining_notes as u64 + / namada_gas::MASP_PARALLEL_GAS_DIVIDER + )?)?; + } - tracing::info!( - "accumulated {} assets/values", - assets_and_values.components().len() - ); + if let Some(remaining_notes) = + sapling_bundle.shielded_converts.len().checked_sub(1) + { + consume_verify_gas(namada_gas::MASP_FIXED_CONVERT_GAS)?; + consume_verify_gas(checked!( + namada_gas::MASP_VARIABLE_CONVERT_GAS * remaining_notes as u64 + / namada_gas::MASP_PARALLEL_GAS_DIVIDER + )?)?; + } - consume_verify_gas(namada_gas::MASP_VERIFY_FINAL_GAS)?; - let result = ctx.final_check( - assets_and_values, - sighash.as_ref(), - sapling_bundle.authorization.binding_sig, - ); - tracing::info!("final check result {result}"); - if !result { - return Err(StorageError::SimpleMessage("MASP final check failed")); + if let Some(remaining_notes) = + sapling_bundle.shielded_spends.len().checked_sub(1) + { + consume_verify_gas(namada_gas::MASP_FIXED_OUTPUT_GAS)?; + consume_verify_gas(checked!( + namada_gas::MASP_VARIABLE_OUTPUT_GAS * remaining_notes as u64 + / namada_gas::MASP_PARALLEL_GAS_DIVIDER + )?)?; } + Ok(()) } @@ -413,28 +388,35 @@ pub mod testing { use std::ops::AddAssign; use std::sync::Mutex; - use bls12_381::{G1Affine, G2Affine}; + use bls12_381::{Bls12, G1Affine, G2Affine}; use masp_primitives::asset_type::AssetType; use masp_primitives::consensus::testing::arb_height; use masp_primitives::constants::SPENDING_KEY_GENERATOR; use masp_primitives::convert::AllowedConversion; use masp_primitives::ff::PrimeField; + use masp_primitives::group::GroupEncoding; use masp_primitives::memo::MemoBytes; use masp_primitives::merkle_tree::MerklePath; use masp_primitives::sapling::note_encryption::{ try_sapling_note_decryption, PreparedIncomingViewingKey, }; use masp_primitives::sapling::prover::TxProver; - use masp_primitives::sapling::redjubjub::Signature; + use masp_primitives::sapling::redjubjub::{PublicKey, Signature}; use masp_primitives::sapling::{ Diversifier, Node, Note, ProofGenerationKey, Rseed, }; use masp_primitives::transaction::builder::Builder; - use masp_primitives::transaction::components::sapling::builder::RngBuildParams; - use masp_primitives::transaction::components::{U64Sum, GROTH_PROOF_SIZE}; + use masp_primitives::transaction::components::sapling::builder::{ + RngBuildParams, StoredBuildParams, + }; + use masp_primitives::transaction::components::sapling::Bundle; + use masp_primitives::transaction::components::{ + I128Sum, OutputDescription, U64Sum, GROTH_PROOF_SIZE, + }; use masp_primitives::transaction::fees::fixed::FeeRule; - use masp_primitives::transaction::TransparentAddress; + use masp_primitives::transaction::{Authorization, TransparentAddress}; use masp_primitives::zip32::{ExtendedFullViewingKey, ExtendedSpendingKey}; + use masp_proofs::bellman::groth16; use masp_proofs::bellman::groth16::Proof; use namada_core::collections::HashMap; use namada_core::token::MaspDigitPos; @@ -453,129 +435,59 @@ pub mod testing { use crate::masp_primitives::sapling::keys::OutgoingViewingKey; use crate::masp_primitives::sapling::redjubjub::PrivateKey; use crate::masp_primitives::transaction::components::transparent::testing::arb_transparent_address; - use crate::masp_proofs::sapling::SaplingVerificationContextInner; use crate::storage::testing::arb_epoch; use crate::token::testing::arb_denomination; - /// A context object for verifying the Sapling components of a single Zcash - /// transaction. Same as SaplingVerificationContext, but always assumes the - /// proofs to be valid. - pub struct MockSaplingVerificationContext { - inner: SaplingVerificationContextInner, - zip216_enabled: bool, + /// A context object for verifying the Sapling components of MASP + /// transactions. Same as BatchValidator, but always assumes the + /// proofs and signatures to be valid. + pub struct MockBatchValidator { + inner: BatchValidator, } - impl MockSaplingVerificationContext { - /// Construct a new context to be used with a single transaction. - pub fn new(zip216_enabled: bool) -> Self { - MockSaplingVerificationContext { - inner: SaplingVerificationContextInner::new(), - zip216_enabled, + impl Default for MockBatchValidator { + fn default() -> Self { + MockBatchValidator { + inner: BatchValidator::new(), } } + } - /// Perform consensus checks on a Sapling SpendDescription, while - /// accumulating its value commitment inside the context for later use. - #[allow(clippy::too_many_arguments)] - pub fn check_spend( - &mut self, - cv: jubjub::ExtendedPoint, - anchor: bls12_381::Scalar, - nullifier: &[u8; 32], - rk: PublicKey, - sighash_value: &[u8; 32], - spend_auth_sig: Signature, - zkproof: Proof, - _verifying_key: &PreparedVerifyingKey, - ) -> bool { - let zip216_enabled = true; - self.inner.check_spend( - cv, - anchor, - nullifier, - rk, - sighash_value, - spend_auth_sig, - zkproof, - &mut (), - |_, rk, msg, spend_auth_sig| { - rk.verify_with_zip216( - &msg, - &spend_auth_sig, - SPENDING_KEY_GENERATOR, - zip216_enabled, - ) - }, - |_, _proof, _public_inputs| true, - ) - } - - /// Perform consensus checks on a Sapling SpendDescription, while - /// accumulating its value commitment inside the context for later use. - #[allow(clippy::too_many_arguments)] - pub fn check_convert( - &mut self, - cv: jubjub::ExtendedPoint, - anchor: bls12_381::Scalar, - zkproof: Proof, - _verifying_key: &PreparedVerifyingKey, - ) -> bool { - self.inner.check_convert( - cv, - anchor, - zkproof, - &mut (), - |_, _proof, _public_inputs| true, - ) - } - - /// Perform consensus checks on a Sapling OutputDescription, while - /// accumulating its value commitment inside the context for later use. - pub fn check_output( + impl MockBatchValidator { + /// Checks the bundle against Sapling-specific consensus rules, and adds + /// its proof and signatures to the validator. + /// + /// Returns `false` if the bundle doesn't satisfy all of the consensus + /// rules. This `BatchValidator` can continue to be used + /// regardless, but some or all of the proofs and signatures + /// from this bundle may have already been added to the batch even if + /// it fails other consensus rules. + pub fn check_bundle( &mut self, - cv: jubjub::ExtendedPoint, - cmu: bls12_381::Scalar, - epk: jubjub::ExtendedPoint, - zkproof: Proof, - _verifying_key: &PreparedVerifyingKey, + bundle: Bundle< + masp_primitives::transaction::components::sapling::Authorized, + >, + sighash: [u8; 32], ) -> bool { - self.inner.check_output( - cv, - cmu, - epk, - zkproof, - |_proof, _public_inputs| true, - ) + self.inner.check_bundle(bundle, sighash) } - /// Perform consensus checks on the valueBalance and bindingSig parts of - /// a Sapling transaction. All SpendDescriptions and - /// OutputDescriptions must have been checked before calling - /// this function. - pub fn final_check( - &self, - value_balance: I128Sum, - sighash_value: &[u8; 32], - binding_sig: Signature, + /// Batch-validates the accumulated bundles. + /// + /// Returns `true` if every proof and signature in every bundle added to + /// the batch validator is valid, or `false` if one or more are + /// invalid. No attempt is made to figure out which of the + /// accumulated bundles might be invalid; if that information is + /// desired, construct separate [`BatchValidator`]s for sub-batches of + /// the bundles. + pub fn validate( + self, + _spend_vk: &groth16::VerifyingKey, + _convert_vk: &groth16::VerifyingKey, + _output_vk: &groth16::VerifyingKey, + mut _rng: R, ) -> bool { - self.inner.final_check( - value_balance, - sighash_value, - binding_sig, - |bvk, msg, binding_sig| { - // Compute the signature's message for bvk/binding_sig - let mut data_to_be_signed = [0u8; 64]; - data_to_be_signed[0..32].copy_from_slice(&bvk.0.to_bytes()); - data_to_be_signed[32..64].copy_from_slice(msg); - - bvk.verify_with_zip216( - &data_to_be_signed, - &binding_sig, - VALUE_COMMITMENT_RANDOMNESS_GENERATOR, - self.zip216_enabled, - ) - }, - ) + true } } @@ -1209,19 +1121,20 @@ pub mod testing { prover_rng in arb_rng().prop_map(TestCsprng), mut rng in arb_rng().prop_map(TestCsprng), bparams_rng in arb_rng().prop_map(TestCsprng), - ) -> (ShieldedTransfer, HashMap) { + ) -> (ShieldedTransfer, HashMap, StoredBuildParams) { + let mut rng_build_params = RngBuildParams::new(bparams_rng); let (masp_tx, metadata) = builder.clone().build( &MockTxProver(Mutex::new(prover_rng)), &FeeRule::non_standard(U64Sum::zero()), &mut rng, - &mut RngBuildParams::new(bparams_rng), + &mut rng_build_params, ).unwrap(); (ShieldedTransfer { builder: builder.map_builder(WalletMap), metadata, masp_tx, epoch, - }, asset_types) + }, asset_types, rng_build_params.to_stored().unwrap()) } } @@ -1239,19 +1152,20 @@ pub mod testing { prover_rng in arb_rng().prop_map(TestCsprng), mut rng in arb_rng().prop_map(TestCsprng), bparams_rng in arb_rng().prop_map(TestCsprng), - ) -> (ShieldedTransfer, HashMap) { + ) -> (ShieldedTransfer, HashMap, StoredBuildParams) { + let mut rng_build_params = RngBuildParams::new(bparams_rng); let (masp_tx, metadata) = builder.clone().build( &MockTxProver(Mutex::new(prover_rng)), &FeeRule::non_standard(U64Sum::zero()), &mut rng, - &mut RngBuildParams::new(bparams_rng), + &mut rng_build_params, ).unwrap(); (ShieldedTransfer { builder: builder.map_builder(WalletMap), metadata, masp_tx, epoch, - }, asset_types) + }, asset_types, rng_build_params.to_stored().unwrap()) } } @@ -1269,19 +1183,20 @@ pub mod testing { prover_rng in arb_rng().prop_map(TestCsprng), mut rng in arb_rng().prop_map(TestCsprng), bparams_rng in arb_rng().prop_map(TestCsprng), - ) -> (ShieldedTransfer, HashMap) { + ) -> (ShieldedTransfer, HashMap, StoredBuildParams) { + let mut rng_build_params = RngBuildParams::new(bparams_rng); let (masp_tx, metadata) = builder.clone().build( &MockTxProver(Mutex::new(prover_rng)), &FeeRule::non_standard(U64Sum::zero()), &mut rng, - &mut RngBuildParams::new(bparams_rng), + &mut rng_build_params, ).unwrap(); (ShieldedTransfer { builder: builder.map_builder(WalletMap), metadata, masp_tx, epoch, - }, asset_types) + }, asset_types, rng_build_params.to_stored().unwrap()) } } } diff --git a/crates/sdk/src/masp/shielded_ctx.rs b/crates/sdk/src/masp/shielded_ctx.rs index 06f09f8a38..915747fc14 100644 --- a/crates/sdk/src/masp/shielded_ctx.rs +++ b/crates/sdk/src/masp/shielded_ctx.rs @@ -36,7 +36,7 @@ use namada_core::masp::{ use namada_core::storage::{BlockHeight, Epoch}; use namada_core::time::{DateTimeUtc, DurationSecs}; use namada_token::{self as token, Denomination, MaspDigitPos}; -use namada_tx::{IndexedTx, TxCommitments}; +use namada_tx::IndexedTx; use rand_core::OsRng; use rayon::prelude::*; use ripemd::Digest as RipemdDigest; @@ -216,7 +216,7 @@ impl ShieldedContext { sync_status: ContextSyncStatus, indexed_tx: IndexedTx, tx_note_map: &BTreeMap, - shielded: &Transaction, + txs: &[Transaction], vk: &ViewingKey, ) -> Result<(ScannedData, TransactionDelta), Error> { // For tracking the account changes caused by this Transaction @@ -231,56 +231,64 @@ impl ShieldedContext { indexed_tx )) })?; - // Listen for notes sent to our viewing keys, only if we are syncing - // (i.e. in a confirmed status) - for so in shielded - .sapling_bundle() - .map_or(&vec![], |x| &x.shielded_outputs) - { - // Let's try to see if this viewing key can decrypt latest - // note - let notes = scanned_data.pos_map.entry(*vk).or_default(); - let decres = try_sapling_note_decryption::<_, OutputDescription<<::SaplingAuth as masp_primitives::transaction::components::sapling::Authorization>::Proof>>( - &NETWORK, - 1.into(), - &PreparedIncomingViewingKey::new(&vk.ivk()), - so, - ); - // So this current viewing key does decrypt this current note... - if let Some((note, pa, memo)) = decres { - // Add this note to list of notes decrypted by this viewing - // key - notes.insert(note_pos); - // Compute the nullifier now to quickly recognize when spent - let nf = note.nf( - &vk.nk, - note_pos.try_into().map_err(|_| { - Error::Other("Can not get nullifier".to_string()) - })?, - ); - scanned_data.note_map.insert(note_pos, note); - scanned_data.memo_map.insert(note_pos, memo); - // The payment address' diversifier is required to spend + for shielded in txs { + // Listen for notes sent to our viewing keys, only if we are + // syncing (i.e. in a confirmed status) + for so in shielded + .sapling_bundle() + .map_or(&vec![], |x| &x.shielded_outputs) + { + // Let's try to see if this viewing key can decrypt latest // note - scanned_data.div_map.insert(note_pos, *pa.diversifier()); - scanned_data.nf_map.insert(nf, note_pos); - // Note the account changes - let balance = transaction_delta - .entry(*vk) - .or_insert_with(I128Sum::zero); - *balance += I128Sum::from_nonnegative( - note.asset_type, - note.value as i128, - ) - .map_err(|()| { - Error::Other( - "found note with invalid value or asset type" - .to_string(), + let notes = scanned_data.pos_map.entry(*vk).or_default(); + let decres = try_sapling_note_decryption::<_, OutputDescription<<::SaplingAuth as masp_primitives::transaction::components::sapling::Authorization>::Proof>>( + &NETWORK, + 1.into(), + &PreparedIncomingViewingKey::new(&vk.ivk()), + so, + ); + // So this current viewing key does decrypt this current + // note... + if let Some((note, pa, memo)) = decres { + // Add this note to list of notes decrypted by this + // viewing key + notes.insert(note_pos); + // Compute the nullifier now to quickly recognize when + // spent + let nf = note.nf( + &vk.nk, + note_pos.try_into().map_err(|_| { + Error::Other( + "Can not get nullifier".to_string(), + ) + })?, + ); + scanned_data.note_map.insert(note_pos, note); + scanned_data.memo_map.insert(note_pos, memo); + // The payment address' diversifier is required to spend + // note + scanned_data + .div_map + .insert(note_pos, *pa.diversifier()); + scanned_data.nf_map.insert(nf, note_pos); + // Note the account changes + let balance = transaction_delta + .entry(*vk) + .or_insert_with(I128Sum::zero); + *balance += I128Sum::from_nonnegative( + note.asset_type, + note.value as i128, ) - })?; - scanned_data.vk_map.insert(note_pos, *vk); + .map_err(|()| { + Error::Other( + "found note with invalid value or asset type" + .to_string(), + ) + })?; + scanned_data.vk_map.insert(note_pos, *vk); + } + note_pos += 1; } - note_pos += 1; } } Ok((scanned_data, transaction_delta)) @@ -292,35 +300,37 @@ impl ShieldedContext { pub(super) fn nullify_spent_notes(&mut self) -> Result<(), Error> { for (_, _, decrypted_data) in self.decrypted_note_cache.drain() { let DecryptedData { - tx: shielded, + txs, delta: mut transaction_delta, } = decrypted_data; - // Cancel out those of our notes that have been spent - for ss in shielded - .sapling_bundle() - .map_or(&vec![], |x| &x.shielded_spends) - { - // If the shielded spend's nullifier is in our map, then target - // note is rendered unusable - if let Some(note_pos) = self.nf_map.get(&ss.nullifier) { - self.spents.insert(*note_pos); - // Note the account changes - let balance = transaction_delta - .entry(self.vk_map[note_pos]) - .or_insert_with(I128Sum::zero); - let note = self.note_map[note_pos]; - - *balance -= I128Sum::from_nonnegative( - note.asset_type, - note.value as i128, - ) - .map_err(|_| { - Error::Other( - "found note with invalid value or asset type" - .to_string(), + for shielded in txs { + // Cancel out those of our notes that have been spent + for ss in shielded + .sapling_bundle() + .map_or(&vec![], |x| &x.shielded_spends) + { + // If the shielded spend's nullifier is in our map, then + // target note is rendered unusable + if let Some(note_pos) = self.nf_map.get(&ss.nullifier) { + self.spents.insert(*note_pos); + // Note the account changes + let balance = transaction_delta + .entry(self.vk_map[note_pos]) + .or_insert_with(I128Sum::zero); + let note = self.note_map[note_pos]; + + *balance -= I128Sum::from_nonnegative( + note.asset_type, + note.value as i128, ) - })?; + .map_err(|_| { + Error::Other( + "found note with invalid value or asset type" + .to_string(), + ) + })?; + } } } } @@ -1266,7 +1276,6 @@ impl ShieldedContext { .index .checked_add(1) .expect("Tx index shouldn't overflow"), - inner_tx: TxCommitments::default(), } }); self.sync_status = ContextSyncStatus::Speculative; @@ -1278,7 +1287,7 @@ impl ShieldedContext { ContextSyncStatus::Speculative, indexed_tx, &self.tx_note_map, - masp_tx, + &[masp_tx.clone()], &vk, )?; scanned_data.merge(scanned); @@ -1286,7 +1295,7 @@ impl ShieldedContext { indexed_tx, vk, DecryptedData { - tx: masp_tx.clone(), + txs: vec![masp_tx.clone()], delta: tx_delta, }, ); @@ -1529,7 +1538,7 @@ where indexed_tx, *vk, DecryptedData { - tx: stx.clone(), + txs: stx.clone(), delta: tx_delta, }, ); @@ -1769,9 +1778,8 @@ mod shielded_ctx_tests { IndexedTx { height: 1.into(), index: TxIndex(1), - inner_tx: Default::default(), }, - masp_tx.clone(), + vec![masp_tx.clone()], ))) .expect("Test failed"); masp_tx_sender @@ -1779,9 +1787,8 @@ mod shielded_ctx_tests { IndexedTx { height: 1.into(), index: TxIndex(2), - inner_tx: Default::default(), }, - masp_tx.clone(), + vec![masp_tx.clone()], ))) .expect("Test failed"); @@ -1810,12 +1817,10 @@ mod shielded_ctx_tests { IndexedTx { height: 1.into(), index: TxIndex(1), - inner_tx: Default::default(), }, IndexedTx { height: 1.into(), index: TxIndex(2), - inner_tx: Default::default(), }, ]); @@ -1825,7 +1830,6 @@ mod shielded_ctx_tests { IndexedTx { height: 1.into(), index: TxIndex(2), - inner_tx: Default::default(), } ); assert_eq!(shielded_ctx.note_map.len(), 2); @@ -1870,9 +1874,8 @@ mod shielded_ctx_tests { IndexedTx { height: 1.into(), index: Default::default(), - inner_tx: Default::default(), }, - masp_tx.clone(), + vec![masp_tx.clone()], ))) .expect("Test failed"); masp_tx_sender.send(None).expect("Test failed"); @@ -1933,9 +1936,8 @@ mod shielded_ctx_tests { IndexedTx { height: 2.into(), index: Default::default(), - inner_tx: Default::default(), }, - masp_tx.clone(), + vec![masp_tx.clone()], ))) .expect("Test failed"); masp_tx_sender @@ -1943,9 +1945,8 @@ mod shielded_ctx_tests { IndexedTx { height: 3.into(), index: Default::default(), - inner_tx: Default::default(), }, - masp_tx.clone(), + vec![masp_tx.clone()], ))) .expect("Test failed"); // this should not produce an error since we have fetched @@ -1991,9 +1992,8 @@ mod shielded_ctx_tests { IndexedTx { height: 1.into(), index: TxIndex(1), - inner_tx: Default::default(), }, - masp_tx.clone(), + vec![masp_tx.clone()], ))) .expect("Test failed"); masp_tx_sender @@ -2001,9 +2001,8 @@ mod shielded_ctx_tests { IndexedTx { height: 1.into(), index: TxIndex(2), - inner_tx: Default::default(), }, - masp_tx.clone(), + vec![masp_tx.clone()], ))) .expect("Test failed"); @@ -2033,7 +2032,6 @@ mod shielded_ctx_tests { let expected = vec![IndexedTx { height: 1.into(), index: TxIndex(2), - inner_tx: Default::default(), }]; assert_eq!(keys, expected); } @@ -2062,9 +2060,8 @@ mod shielded_ctx_tests { IndexedTx { height: h.into(), index: TxIndex(1), - inner_tx: Default::default(), }, - masp_tx.clone(), + vec![masp_tx.clone()], ))) .expect("Test failed"); } @@ -2101,7 +2098,6 @@ mod shielded_ctx_tests { shielded_ctx.tx_note_map.remove(&IndexedTx { height: 18.into(), index: TxIndex(1), - inner_tx: Default::default(), }); shielded_ctx.save().await.expect("Test failed"); @@ -2112,9 +2108,8 @@ mod shielded_ctx_tests { IndexedTx { height: h.into(), index: TxIndex(1), - inner_tx: Default::default(), }, - masp_tx.clone(), + vec![masp_tx.clone()], ))) .expect("Test failed"); } @@ -2176,11 +2171,10 @@ mod shielded_ctx_tests { IndexedTx { height: 1.into(), index: TxIndex(1), - inner_tx: Default::default(), }, vk, DecryptedData { - tx: masp_tx.clone(), + txs: vec![masp_tx.clone()], delta: Default::default(), }, ); @@ -2190,7 +2184,6 @@ mod shielded_ctx_tests { &IndexedTx { height: 1.into(), index: TxIndex(1), - inner_tx: Default::default(), }, &vk )); @@ -2199,9 +2192,8 @@ mod shielded_ctx_tests { IndexedTx { height: 1.into(), index: TxIndex(1), - inner_tx: Default::default(), }, - masp_tx.clone(), + vec![masp_tx.clone()], ))) .expect("Test failed"); @@ -2231,7 +2223,6 @@ mod shielded_ctx_tests { IndexedTx { height: 1.into(), index: TxIndex(1), - inner_tx: Default::default(), }, ); assert_matches!( diff --git a/crates/sdk/src/masp/types.rs b/crates/sdk/src/masp/types.rs index 50d3219ffc..2cb76848ba 100644 --- a/crates/sdk/src/masp/types.rs +++ b/crates/sdk/src/masp/types.rs @@ -18,7 +18,7 @@ use masp_primitives::transaction::{ builder, Authorization, Authorized, Transaction, Unauthorized, }; use masp_primitives::zip32::{ExtendedFullViewingKey, ExtendedSpendingKey}; -use masp_proofs::bellman::groth16::PreparedVerifyingKey; +use masp_proofs::bellman::groth16::VerifyingKey; use masp_proofs::bls12_381::Bls12; use namada_core::address::Address; use namada_core::borsh::{BorshDeserialize, BorshSerialize}; @@ -31,17 +31,17 @@ use namada_macros::BorshDeserializer; #[cfg(feature = "migrations")] use namada_migrations::*; use namada_token as token; -use namada_tx::{IndexedTx, TxCommitments}; +use namada_tx::IndexedTx; use thiserror::Error; use crate::error::Error; use crate::masp::{ShieldedContext, ShieldedUtils}; /// Type alias for convenience and profit -pub type IndexedNoteData = BTreeMap; +pub type IndexedNoteData = BTreeMap>; /// Type alias for the entries of [`IndexedNoteData`] iterators -pub type IndexedNoteEntry = (IndexedTx, Transaction); +pub type IndexedNoteEntry = (IndexedTx, Vec); /// Represents the amount used of different conversions pub type Conversions = @@ -101,18 +101,14 @@ pub struct MaspTokenRewardData { pub locked_amount_target: Uint, } -/// The MASP transaction(s) found in a Namada tx. -#[derive(Debug, Clone)] -pub(crate) struct ExtractedMaspTxs(pub Vec<(TxCommitments, Transaction)>); - /// MASP verifying keys pub struct PVKs { /// spend verifying key - pub spend_vk: PreparedVerifyingKey, + pub spend_vk: VerifyingKey, /// convert verifying key - pub convert_vk: PreparedVerifyingKey, + pub convert_vk: VerifyingKey, /// output verifying key - pub output_vk: PreparedVerifyingKey, + pub output_vk: VerifyingKey, } #[derive(BorshSerialize, BorshDeserialize, Debug, Copy, Clone)] @@ -214,7 +210,7 @@ impl ScannedData { /// is not parallelizable). pub struct DecryptedData { /// The actual transaction - pub tx: Transaction, + pub txs: Vec, /// balance changes from the tx pub delta: TransactionDelta, } diff --git a/crates/sdk/src/masp/utils.rs b/crates/sdk/src/masp/utils.rs index 1de7b73e39..81e3fb0a9e 100644 --- a/crates/sdk/src/masp/utils.rs +++ b/crates/sdk/src/masp/utils.rs @@ -15,12 +15,12 @@ use masp_primitives::transaction::Transaction; use masp_primitives::zip32::{ExtendedFullViewingKey, ExtendedSpendingKey}; use masp_proofs::prover::LocalTxProver; use namada_core::collections::HashMap; +use namada_core::masp::MaspTxRefs; use namada_core::storage::{BlockHeight, TxIndex}; -use namada_core::token::Transfer; use namada_events::extend::{ + MaspTxBatchRefs as MaspTxBatchRefsAttr, MaspTxBlockIndex as MaspTxBlockIndexAttr, ReadFromEventAttributes, }; -use namada_ibc::IbcMessage; use namada_tx::{IndexedTx, Tx}; use rand_core::{CryptoRng, RngCore}; use tokio::sync::mpsc::{Receiver, Sender}; @@ -29,8 +29,8 @@ use crate::error::{Error, QueryError}; use crate::io::Io; use crate::masp::shielded_ctx::ShieldedContext; use crate::masp::types::{ - ContextSyncStatus, ExtractedMaspTxs, IndexedNoteEntry, PVKs, ScannedData, - TransactionDelta, Unscanned, + ContextSyncStatus, IndexedNoteEntry, PVKs, ScannedData, TransactionDelta, + Unscanned, }; use crate::masp::{ENV_VAR_MASP_PARAMS_DIR, VERIFIYING_KEYS}; use crate::queries::Client; @@ -140,7 +140,7 @@ pub(super) async fn get_indexed_masp_events_at_height( client: &C, height: BlockHeight, first_idx_to_query: Option, -) -> Result>, Error> { +) -> Result>, Error> { let first_idx_to_query = first_idx_to_query.unwrap_or_default(); Ok(client @@ -159,7 +159,13 @@ pub(super) async fn get_indexed_masp_events_at_height( .ok()?; if tx_index >= first_idx_to_query { - Some(tx_index) + let masp_section_refs = + MaspTxBatchRefsAttr::read_from_event_attributes( + &event.attributes, + ) + .ok()?; + + Some((tx_index, masp_section_refs)) } else { None } @@ -168,99 +174,36 @@ pub(super) async fn get_indexed_masp_events_at_height( })) } -/// Extract the relevant shielded portions of a [`Tx`], if any. -pub(super) async fn extract_masp_tx( +/// Extract the relevant shield portions of a [`Tx`], if any. +async fn extract_masp_tx( tx: &Tx, -) -> Result { + masp_section_refs: &MaspTxRefs, +) -> Result, Error> { // NOTE: simply looking for masp sections attached to the tx // is not safe. We don't validate the sections attached to a // transaction se we could end up with transactions carrying // an unnecessary masp section. We must instead look for the - // required masp sections in the signed commitments (hashes) - // of the transactions' data sections - let mut txs = vec![]; - - // Expect transaction - for cmt in tx.commitments() { - let tx_data = tx - .data(cmt) - .ok_or_else(|| Error::Other("Missing data section".to_string()))?; - let maybe_masp_tx = match Transfer::try_from_slice(&tx_data) { - Ok(transfer) => Some(transfer), - Err(_) => { - // This should be a MASP over IBC transaction, it - // could be a ShieldedTransfer or an Envelope - // message, need to try both - extract_payload_from_shielded_action(&tx_data).await.ok() - } - } - .map(|transfer| { - if let Some(hash) = transfer.shielded { - let masp_tx = tx - .get_section(&hash) - .ok_or_else(|| { - Error::Other( - "Missing masp section in transaction".to_string(), - ) - })? - .masp_tx() - .ok_or_else(|| { - Error::Other("Missing masp transaction".to_string()) - })?; - - Ok::<_, Error>(Some(masp_tx)) - } else { - Ok(None) + // required masp sections coming from the events + + masp_section_refs + .0 + .iter() + .try_fold(vec![], |mut acc, hash| { + match tx + .get_section(hash) + .and_then(|section| section.masp_tx()) + .ok_or_else(|| { + Error::Other( + "Missing expected masp transaction".to_string(), + ) + }) { + Ok(transaction) => { + acc.push(transaction); + Ok(acc) + } + Err(e) => Err(e), } }) - .transpose()? - .flatten(); - - if let Some(transaction) = maybe_masp_tx { - txs.push((cmt.to_owned(), transaction)); - } - } - - Ok(ExtractedMaspTxs(txs)) -} - -/// Extract the changed keys and Transaction hash from a MASP over ibc message -pub(super) async fn extract_payload_from_shielded_action( - tx_data: &[u8], -) -> Result { - let message = namada_ibc::decode_message(tx_data) - .map_err(|e| Error::Other(e.to_string()))?; - - let result = match message { - IbcMessage::Transfer(msg) => msg.transfer.ok_or_else(|| { - Error::Other("Missing masp tx in the ibc message".to_string()) - })?, - IbcMessage::NftTransfer(msg) => msg.transfer.ok_or_else(|| { - Error::Other("Missing masp tx in the ibc message".to_string()) - })?, - IbcMessage::RecvPacket(msg) => msg.transfer.ok_or_else(|| { - Error::Other("Missing masp tx in the ibc message".to_string()) - })?, - IbcMessage::AckPacket(msg) => { - // Refund tokens by the ack message - msg.transfer.ok_or_else(|| { - Error::Other("Missing masp tx in the ibc message".to_string()) - })? - } - IbcMessage::Timeout(msg) => { - // Refund tokens by the timeout message - msg.transfer.ok_or_else(|| { - Error::Other("Missing masp tx in the ibc message".to_string()) - })? - } - IbcMessage::Envelope(_) => { - return Err(Error::Other( - "Unexpected ibc message for masp".to_string(), - )); - } - }; - - Ok(result) } /// The updates to the commitment tree and witness maps @@ -372,35 +315,41 @@ where note_map_delta: Default::default(), }; let mut note_pos = updates.commitment_tree.size(); - for (indexed_tx, ref shielded) in tx_receiver { + for (indexed_tx, ref txs) in tx_receiver { updates.note_map_delta.insert(indexed_tx, note_pos); - for so in shielded - .sapling_bundle() - .map_or(&vec![], |x| &x.shielded_outputs) - { - // Create merkle tree leaf node from note commitment - let node = Node::new(so.cmu.to_repr()); - // Update each merkle tree in the witness map with the - // latest addition - for (_, witness) in updates.witness_map.iter_mut() { - witness.append(node).map_err(|()| { - Error::Other( - "note commitment tree is full".to_string(), - ) - })?; + for shielded in txs { + for so in shielded + .sapling_bundle() + .map_or(&vec![], |x| &x.shielded_outputs) + { + // Create merkle tree leaf node from note commitment + let node = Node::new(so.cmu.to_repr()); + // Update each merkle tree in the witness map with + // the latest addition + for (_, witness) in updates.witness_map.iter_mut() { + witness.append(node).map_err(|()| { + Error::Other( + "note commitment tree is full" + .to_string(), + ) + })?; + } + updates.commitment_tree.append(node).map_err( + |()| { + Error::Other( + "note commitment tree is full" + .to_string(), + ) + }, + )?; + // Finally, make it easier to construct merkle paths + // to this new note + let witness = IncrementalWitness::::from_tree( + &updates.commitment_tree, + ); + updates.witness_map.insert(note_pos, witness); + note_pos += 1; } - updates.commitment_tree.append(node).map_err(|()| { - Error::Other( - "note commitment tree is full".to_string(), - ) - })?; - // Finally, make it easier to construct merkle paths to - // this new note - let witness = IncrementalWitness::::from_tree( - &updates.commitment_tree, - ); - updates.witness_map.insert(note_pos, witness); - note_pos += 1; } } Ok(updates) @@ -427,7 +376,7 @@ where continue; } - let txs_results = match get_indexed_masp_events_at_height::( + let txs_results = match get_indexed_masp_events_at_height( self.client, height.into(), None, @@ -454,20 +403,19 @@ where .block .data; - for idx in txs_results { + for (idx, masp_sections_refs) in txs_results { let tx = Tx::try_from(block[idx.0 as usize].as_ref()) .map_err(|e| Error::Other(e.to_string()))?; - let extracted_masp_txs = extract_masp_tx(&tx).await?; - for (inner_tx, transaction) in extracted_masp_txs.0 { - tx_sender.send(( - IndexedTx { - height: height.into(), - index: idx, - inner_tx, - }, - transaction, - )) - } + let extracted_masp_txs = + extract_masp_tx(&tx, &masp_sections_refs).await?; + + tx_sender.send(( + IndexedTx { + height: height.into(), + index: idx, + }, + extracted_masp_txs, + )); } fetch_iter.next(); } @@ -698,7 +646,7 @@ impl TaskScheduler { sync_status: ContextSyncStatus, indexed_tx: IndexedTx, tx_note_map: &BTreeMap, - shielded: &Transaction, + shielded: &[Transaction], vk: &ViewingKey, ) -> Result<(ScannedData, TransactionDelta), Error> { let res = ShieldedContext::::scan_tx( diff --git a/wasm/Cargo.lock b/wasm/Cargo.lock index 99bb9183e2..f87f4ce646 100644 --- a/wasm/Cargo.lock +++ b/wasm/Cargo.lock @@ -319,7 +319,7 @@ dependencies = [ "pin-project-lite", "rustversion", "serde", - "sync_wrapper", + "sync_wrapper 0.1.2", "tower", "tower-layer", "tower-service", @@ -2167,6 +2167,18 @@ dependencies = [ "paste", ] +[[package]] +name = "flume" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55ac459de2512911e4b674ce33cf20befaba382d05b62b008afc1c8b57cbf181" +dependencies = [ + "futures-core", + "futures-sink", + "nanorand", + "spin 0.9.8", +] + [[package]] name = "fnv" version = "1.0.7" @@ -2264,6 +2276,7 @@ checksum = "45ec6fe3675af967e67c5536c0b9d44e34e6c52f86bedc4ea49c5317b8e94d06" dependencies = [ "futures-channel", "futures-task", + "tokio", ] [[package]] @@ -4089,7 +4102,9 @@ dependencies = [ "ethers", "eyre", "fd-lock", + "flume", "futures", + "futures-locks", "itertools 0.12.1", "jubjub", "lazy_static", @@ -4122,6 +4137,7 @@ dependencies = [ "prost", "rand 0.8.5", "rand_core 0.6.4", + "rayon", "regex", "ripemd", "serde", @@ -4129,6 +4145,7 @@ dependencies = [ "sha2 0.9.9", "slip10_ed25519", "smooth-operator", + "sync_wrapper 1.0.1", "tendermint-rpc", "thiserror", "tiny-bip39", @@ -4371,6 +4388,15 @@ dependencies = [ "sha2 0.9.9", ] +[[package]] +name = "nanorand" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a51313c5820b0b02bd422f4b44776fbf47961755c74ce64afc73bfad10226c3" +dependencies = [ + "getrandom 0.2.15", +] + [[package]] name = "nonempty" version = "0.7.0" @@ -6046,6 +6072,9 @@ name = "spin" version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" +dependencies = [ + "lock_api", +] [[package]] name = "spki" @@ -6193,6 +6222,12 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" +[[package]] +name = "sync_wrapper" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7065abeca94b6a8a577f9bd45aa0867a2238b74e8eb67cf10d492bc39351394" + [[package]] name = "system-configuration" version = "0.5.1" diff --git a/wasm_for_tests/Cargo.lock b/wasm_for_tests/Cargo.lock index 330d229b52..11a85458dc 100644 --- a/wasm_for_tests/Cargo.lock +++ b/wasm_for_tests/Cargo.lock @@ -319,7 +319,7 @@ dependencies = [ "pin-project-lite", "rustversion", "serde", - "sync_wrapper", + "sync_wrapper 0.1.2", "tower", "tower-layer", "tower-service", @@ -2167,6 +2167,18 @@ dependencies = [ "paste", ] +[[package]] +name = "flume" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55ac459de2512911e4b674ce33cf20befaba382d05b62b008afc1c8b57cbf181" +dependencies = [ + "futures-core", + "futures-sink", + "nanorand", + "spin 0.9.8", +] + [[package]] name = "fnv" version = "1.0.7" @@ -2264,6 +2276,7 @@ checksum = "45ec6fe3675af967e67c5536c0b9d44e34e6c52f86bedc4ea49c5317b8e94d06" dependencies = [ "futures-channel", "futures-task", + "tokio", ] [[package]] @@ -4044,7 +4057,9 @@ dependencies = [ "ethers", "eyre", "fd-lock", + "flume", "futures", + "futures-locks", "itertools 0.12.1", "jubjub", "lazy_static", @@ -4075,6 +4090,7 @@ dependencies = [ "prost", "rand 0.8.5", "rand_core 0.6.4", + "rayon", "regex", "ripemd", "serde", @@ -4082,6 +4098,7 @@ dependencies = [ "sha2 0.9.9", "slip10_ed25519", "smooth-operator", + "sync_wrapper 1.0.1", "tendermint-rpc", "thiserror", "tiny-bip39", @@ -4316,6 +4333,15 @@ dependencies = [ "sha2 0.9.9", ] +[[package]] +name = "nanorand" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a51313c5820b0b02bd422f4b44776fbf47961755c74ce64afc73bfad10226c3" +dependencies = [ + "getrandom 0.2.15", +] + [[package]] name = "nonempty" version = "0.7.0" @@ -5983,6 +6009,9 @@ name = "spin" version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" +dependencies = [ + "lock_api", +] [[package]] name = "spki" @@ -6130,6 +6159,12 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" +[[package]] +name = "sync_wrapper" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7065abeca94b6a8a577f9bd45aa0867a2238b74e8eb67cf10d492bc39351394" + [[package]] name = "system-configuration" version = "0.5.1" From bdb61c317229ea204b10cb04821d2f8a2858ff94 Mon Sep 17 00:00:00 2001 From: satan Date: Tue, 4 Jun 2024 09:55:43 +0200 Subject: [PATCH 29/29] added print statements to try and debug in ci --- crates/apps_lib/src/client/masp.rs | 2 +- crates/sdk/src/masp/shielded_ctx.rs | 5 ++++- crates/sdk/src/masp/utils.rs | 8 ++++++-- 3 files changed, 11 insertions(+), 4 deletions(-) diff --git a/crates/apps_lib/src/client/masp.rs b/crates/apps_lib/src/client/masp.rs index d715b2ddae..8a84610bb9 100644 --- a/crates/apps_lib/src/client/masp.rs +++ b/crates/apps_lib/src/client/masp.rs @@ -44,7 +44,7 @@ pub async fn syncing< shielded_utils, client, &logger, - RetryStrategy::Forever, + RetryStrategy::Times(1), start_query_height, last_query_height, batch_size, diff --git a/crates/sdk/src/masp/shielded_ctx.rs b/crates/sdk/src/masp/shielded_ctx.rs index 915747fc14..e53b0763a1 100644 --- a/crates/sdk/src/masp/shielded_ctx.rs +++ b/crates/sdk/src/masp/shielded_ctx.rs @@ -3,6 +3,7 @@ use std::cmp::Ordering; use std::collections::{btree_map, BTreeMap, BTreeSet}; use std::convert::TryInto; +use std::sync::atomic::AtomicU64; use borsh::{BorshDeserialize, BorshSerialize}; use borsh_ext::BorshSerializeExt; @@ -1489,7 +1490,7 @@ where // scheduled by the scheduler. let (task_scheduler, mut task_manager) = TaskManager::::new(ctx.clone(), callback); - + let counter = AtomicU64::default(); // The main loop that performs // * fetching and caching MASP txs in sequence // * trial decryption of each note to determine if it is owned by a viewing @@ -1511,6 +1512,7 @@ where // YOU COULD ACCIDENTALLY FREEZE EVERYTHING let txs = progress.scan(fetch_recv); txs.par_bridge().try_for_each(|(indexed_tx, stx)| { + counter.fetch_add(1, core::sync::atomic::Ordering::SeqCst); let decrypted_note_cache = ctx.decrypted_note_cache.clone(); let mut new_scanned_data = ScannedData::default(); @@ -1574,6 +1576,7 @@ where }); // shut down the scanning thread. decryption_handle.join().unwrap()?; + println!("\n\n\n\n\nNUMBER OF SCANNED TXS {}\n\n\n\n\n", counter.load(core::sync::atomic::Ordering::SeqCst)); // if the scanning process errored, return that error here and // exit. decrypt_res?; diff --git a/crates/sdk/src/masp/utils.rs b/crates/sdk/src/masp/utils.rs index 81e3fb0a9e..6a4cc34429 100644 --- a/crates/sdk/src/masp/utils.rs +++ b/crates/sdk/src/masp/utils.rs @@ -536,6 +536,7 @@ pub struct TaskManager { ctx: Arc>>, // for testing purposes callback: Option, + saves: u64, } #[derive(Clone)] @@ -566,6 +567,7 @@ impl TaskManager { latest_idx: Default::default(), ctx: Arc::new(futures_locks::Mutex::new(ctx)), callback, + saves: 0, }, ) } @@ -607,7 +609,8 @@ impl TaskManager { } // updated the spent notes and balances locked.nullify_spent_notes()?; - _ = locked.save().await; + locked.save().await.unwrap(); + println!("\n\n\n\n\n\n\n\n Number of saves {}\n\n\n\n\n\n", self.saves); } Ok(true) } @@ -620,7 +623,8 @@ impl TaskManager { let mut locked = self.ctx.lock().await; scanned.apply_to(&mut locked); // persist the changes - _ = locked.save().await; + locked.save().await.unwrap(); + self.saves += 1; Ok(false) } }