diff --git a/.changelog/unreleased/improvements/3744-mask-sdk-refactor.md b/.changelog/unreleased/improvements/3744-mask-sdk-refactor.md new file mode 100644 index 0000000000..3da54fa92e --- /dev/null +++ b/.changelog/unreleased/improvements/3744-mask-sdk-refactor.md @@ -0,0 +1,2 @@ +- Factored most of the masp code out of the sdk and into shielded token crate. These + required the creation of two futher crates: "namada_io" and "namada_wallet". ([\#3744](https://github.com/anoma/namada/pull/3744)) \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock index b024136a0c..66c134bad5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -318,9 +318,9 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.77" +version = "0.1.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c980ee35e870bd1a4d2c8294d4c04d0499e67bca1e4b5cefcc693c2fa00caea9" +checksum = "6e0c28dcc82d7c8ead5cb13beb15405b57b8546e93215673ff8ca0349a028107" dependencies = [ "proc-macro2", "quote", @@ -3045,9 +3045,9 @@ checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" [[package]] name = "hermit-abi" -version = "0.3.3" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d77f7ec81a6d05a3abb01ab6eb7590f6083d08449fe5a1c8b1e620283546ccb7" +checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" [[package]] name = "hex" @@ -4581,13 +4581,14 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.11" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" +checksum = "80e04d1dcff3aae0704555fe5fee3bcfaf3d1fdf8a7e521d5b9d2b42acb52cec" dependencies = [ + "hermit-abi", "libc", "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -4688,6 +4689,7 @@ dependencies = [ "namada_migrations", "namada_sdk", "namada_vm", + "namada_wallet", "pretty_assertions", "proptest", "prost 0.12.3", @@ -4782,6 +4784,7 @@ dependencies = [ "prost-types 0.12.3", "rand 0.8.5", "rand_core 0.6.4", + "rayon", "ripemd", "serde", "serde_json", @@ -4994,6 +4997,18 @@ dependencies = [ "tracing", ] +[[package]] +name = "namada_io" +version = "0.43.0" +dependencies = [ + "async-trait", + "kdam", + "namada_core", + "tendermint-rpc", + "thiserror", + "tokio", +] + [[package]] name = "namada_light_sdk" version = "0.43.0" @@ -5180,26 +5195,22 @@ dependencies = [ "arbitrary", "assert_matches", "async-trait", - "base58", "bimap", "borsh", "borsh-ext", "circular-queue", "clap", "data-encoding", - "derivation-path", "duration-str", "either", "ethbridge-bridge-contract", "ethers", "eyre", "fd-lock", - "flume", "futures", "init-once", "itertools 0.12.1", "jubjub 0.10.0 (git+https://github.com/heliaxdev/jubjub.git?rev=a373686962f4e9d0edb3b4716f86ff6bbd9aa86c)", - "kdam", "lazy_static", "linkme", "masp_primitives", @@ -5211,6 +5222,7 @@ dependencies = [ "namada_gas", "namada_governance", "namada_ibc", + "namada_io", "namada_macros", "namada_migrations", "namada_parameters", @@ -5222,9 +5234,9 @@ dependencies = [ "namada_vm", "namada_vote_ext", "namada_vp", + "namada_wallet", "num-traits 0.2.17", "num256", - "orion", "owo-colors", "paste", "patricia_tree", @@ -5238,17 +5250,14 @@ dependencies = [ "serde", "serde_json", "sha2 0.9.9", - "slip10_ed25519", "smooth-operator", "tempfile", "tendermint-rpc", "thiserror", "tiny-bip39", - "tiny-hderive", "tokio", "toml 0.5.11", "tracing", - "typed-builder", "xorf", "zeroize", ] @@ -5257,30 +5266,47 @@ dependencies = [ name = "namada_shielded_token" version = "0.43.0" dependencies = [ + "async-trait", "borsh", + "eyre", + "flume", + "futures", + "itertools 0.12.1", "lazy_static", + "linkme", "masp_primitives", "masp_proofs", "namada_account", "namada_controller", "namada_core", + "namada_events", "namada_gas", + "namada_io", + "namada_macros", + "namada_migrations", "namada_parameters", "namada_state", "namada_systems", "namada_trans_token", "namada_tx", "namada_vp", + "namada_wallet", "proptest", + "rand 0.8.5", "rand_core 0.6.4", "rayon", "ripemd", "serde", + "serde_json", "sha2 0.9.9", "smooth-operator", + "tempfile", "test-log", "thiserror", + "tokio", "tracing", + "typed-builder", + "xorf", ] [[package]] @@ -5409,9 +5435,12 @@ version = "0.43.0" dependencies = [ "arbitrary", "borsh", + "linkme", + "masp_primitives", "namada_core", "namada_events", "namada_macros", + "namada_migrations", "namada_shielded_token", "namada_systems", "namada_trans_token", @@ -5611,6 +5640,36 @@ dependencies = [ "sha2 0.9.9", ] +[[package]] +name = "namada_wallet" +version = "0.43.0" +dependencies = [ + "base58", + "bimap", + "borsh", + "borsh-ext", + "data-encoding", + "derivation-path", + "fd-lock", + "itertools 0.12.1", + "masp_primitives", + "namada_core", + "namada_ibc", + "namada_macros", + "namada_migrations", + "orion", + "rand 0.8.5", + "rand_core 0.6.4", + "serde", + "slip10_ed25519", + "smooth-operator", + "thiserror", + "tiny-bip39", + "tiny-hderive", + "toml 0.5.11", + "zeroize", +] + [[package]] name = "nanorand" version = "0.7.0" @@ -8394,21 +8453,20 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.34.0" +version = "1.39.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0c014766411e834f7af5b8f4cf46257aab4036ca95e9d2c144a10f59ad6f5b9" +checksum = "9babc99b9923bfa4804bd74722ff02c0381021eafa4db9949217e3be8e84fff5" dependencies = [ "backtrace", "bytes", "libc", "mio", - "num_cpus", "parking_lot", "pin-project-lite", "signal-hook-registry", "socket2 0.5.5", "tokio-macros", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -8423,9 +8481,9 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "2.2.0" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" +checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", diff --git a/Cargo.toml b/Cargo.toml index e483d554bc..10cb44fa03 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,6 +14,7 @@ members = [ "crates/gas", "crates/governance", "crates/ibc", + "crates/io", "crates/light_sdk", "crates/macros", "crates/migrations", @@ -40,6 +41,7 @@ members = [ "crates/vp", "crates/vp_env", "crates/vp_prelude", + "crates/wallet", "examples", "fuzz", ] diff --git a/crates/apps_lib/Cargo.toml b/crates/apps_lib/Cargo.toml index d86d20844a..c0fd42e600 100644 --- a/crates/apps_lib/Cargo.toml +++ b/crates/apps_lib/Cargo.toml @@ -35,6 +35,7 @@ namada_macros = {path = "../macros"} namada_migrations = {path = "../migrations", optional = true} namada_sdk = {path = "../sdk", features = ["download-params", "multicore"]} namada_vm = {path = "../vm"} +namada_wallet = { path = "../wallet", features = ["std"]} async-trait.workspace = true base64.workspace = true diff --git a/crates/apps_lib/src/cli.rs b/crates/apps_lib/src/cli.rs index 6781374b97..7124945d0f 100644 --- a/crates/apps_lib/src/cli.rs +++ b/crates/apps_lib/src/cli.rs @@ -3220,6 +3220,7 @@ pub mod args { use std::str::FromStr; use data_encoding::HEXUPPER; + use namada_core::masp::{MaspEpoch, PaymentAddress}; use namada_sdk::address::{Address, EstablishedAddress}; pub use namada_sdk::args::*; use namada_sdk::chain::{ChainId, ChainIdPrefix}; @@ -3231,7 +3232,6 @@ pub mod args { use namada_sdk::keccak::KeccakHash; use namada_sdk::key::*; use namada_sdk::masp::utils::RetryStrategy; - use namada_sdk::masp::{MaspEpoch, PaymentAddress}; use namada_sdk::storage::{self, BlockHeight, Epoch}; use namada_sdk::time::DateTimeUtc; use namada_sdk::token::NATIVE_MAX_DECIMAL_PLACES; diff --git a/crates/apps_lib/src/cli/api.rs b/crates/apps_lib/src/cli/api.rs index efe19e0a62..d9737b5174 100644 --- a/crates/apps_lib/src/cli/api.rs +++ b/crates/apps_lib/src/cli/api.rs @@ -1,6 +1,5 @@ use namada_sdk::error::Error; -use namada_sdk::io::Io; -use namada_sdk::queries::Client; +use namada_sdk::io::{Client, Io}; use namada_sdk::rpc::wait_until_node_is_synched; use crate::tendermint_rpc::client::CompatMode; diff --git a/crates/apps_lib/src/cli/client.rs b/crates/apps_lib/src/cli/client.rs index 6724f24a73..fb708d235f 100644 --- a/crates/apps_lib/src/cli/client.rs +++ b/crates/apps_lib/src/cli/client.rs @@ -1,8 +1,9 @@ use std::io::Read; use color_eyre::eyre::Result; -use namada_sdk::io::Io; -use namada_sdk::{display_line, Namada, NamadaImpl}; +use namada_sdk::io::{display_line, Io, NamadaIo}; +use namada_sdk::masp::ShieldedContext; +use namada_sdk::{Namada, NamadaImpl}; use crate::cli; use crate::cli::api::{CliApi, CliClient}; @@ -353,7 +354,7 @@ impl CliApi { ); crate::client::masp::syncing( - chain_ctx.shielded, + ShieldedContext::new(chain_ctx.shielded), client, args, &io, diff --git a/crates/apps_lib/src/cli/context.rs b/crates/apps_lib/src/cli/context.rs index aa6c05bf2a..16f4304f98 100644 --- a/crates/apps_lib/src/cli/context.rs +++ b/crates/apps_lib/src/cli/context.rs @@ -6,6 +6,10 @@ use std::path::{Path, PathBuf}; use std::str::FromStr; use color_eyre::eyre::Result; +use namada_core::masp::{ + BalanceOwner, ExtendedSpendingKey, ExtendedViewingKey, PaymentAddress, + TransferSource, TransferTarget, +}; use namada_sdk::address::{Address, InternalAddress}; use namada_sdk::chain::ChainId; use namada_sdk::ethereum_events::EthAddress; @@ -13,7 +17,7 @@ use namada_sdk::ibc::trace::{ibc_token, is_ibc_denom, is_nft_trace}; use namada_sdk::io::Io; use namada_sdk::key::*; use namada_sdk::masp::fs::FsShieldedUtils; -use namada_sdk::masp::{ShieldedContext, *}; +use namada_sdk::masp::ShieldedWallet; use namada_sdk::wallet::{DatedSpendingKey, DatedViewingKey, Wallet}; use namada_sdk::{Namada, NamadaImpl}; @@ -128,7 +132,7 @@ pub struct ChainContext { /// The ledger configuration for a specific chain ID pub config: Config, /// The context fr shielded operations - pub shielded: ShieldedContext, + pub shielded: ShieldedWallet, /// Native token's address pub native_token: Address, } @@ -231,7 +235,7 @@ impl Context { /// Make an implementation of Namada from this object and parameters. pub fn to_sdk(self, client: C, io: IO) -> impl Namada where - C: namada_sdk::queries::Client + Sync, + C: namada_sdk::io::Client + Sync, IO: Io, { let chain_ctx = self.take_chain_or_exit(); diff --git a/crates/apps_lib/src/cli/relayer.rs b/crates/apps_lib/src/cli/relayer.rs index fc84673a36..e539a6143d 100644 --- a/crates/apps_lib/src/cli/relayer.rs +++ b/crates/apps_lib/src/cli/relayer.rs @@ -14,7 +14,7 @@ impl CliApi { where C: CliClient, { - use namada_sdk::display_line; + use namada_sdk::io::display_line; display_line!(&io, "The Namada Ethereum bridge is disabled"); Ok(()) diff --git a/crates/apps_lib/src/cli/wallet.rs b/crates/apps_lib/src/cli/wallet.rs index 9145d3ca80..e6a763f234 100644 --- a/crates/apps_lib/src/cli/wallet.rs +++ b/crates/apps_lib/src/cli/wallet.rs @@ -10,16 +10,14 @@ use color_eyre::eyre::Result; use itertools::sorted; use ledger_namada_rs::{BIP44Path, NamadaApp}; use namada_core::chain::BlockHeight; +use namada_core::masp::{ExtendedSpendingKey, MaspValue, PaymentAddress}; use namada_sdk::address::{Address, DecodeError}; -use namada_sdk::io::Io; +use namada_sdk::io::{display_line, edisplay_line, Io}; use namada_sdk::key::*; -use namada_sdk::masp::{ - find_valid_diversifier, ExtendedSpendingKey, MaspValue, PaymentAddress, -}; +use namada_sdk::masp::find_valid_diversifier; use namada_sdk::wallet::{ DecryptionError, DerivationPath, DerivationPathError, FindKeyError, Wallet, }; -use namada_sdk::{display_line, edisplay_line}; use rand_core::OsRng; use crate::cli; diff --git a/crates/apps_lib/src/client/masp.rs b/crates/apps_lib/src/client/masp.rs index 3a6abd32bd..7e81ac07fd 100644 --- a/crates/apps_lib/src/client/masp.rs +++ b/crates/apps_lib/src/client/masp.rs @@ -6,13 +6,11 @@ use namada_sdk::control_flow::install_shutdown_signal; use namada_sdk::error::Error; #[cfg(any(test, feature = "testing"))] use namada_sdk::io::DevNullProgressBar; -use namada_sdk::io::Io; -use namada_sdk::masp::utils::{IndexerMaspClient, LedgerMaspClient}; +use namada_sdk::io::{display, display_line, Client, Io, MaybeSend, MaybeSync}; use namada_sdk::masp::{ - MaspLocalTaskEnv, ShieldedContext, ShieldedSyncConfig, ShieldedUtils, + IndexerMaspClient, LedgerMaspClient, MaspLocalTaskEnv, ShieldedContext, + ShieldedSyncConfig, ShieldedUtils, }; -use namada_sdk::queries::Client; -use namada_sdk::{display, display_line, MaybeSend, MaybeSync}; #[allow(clippy::too_many_arguments)] pub async fn syncing< @@ -88,7 +86,8 @@ pub async fn syncing< .retry_strategy(args.retry_strategy) .build(); - let env = MaspLocalTaskEnv::new(500)?; + let env = MaspLocalTaskEnv::new(500) + .map_err(|e| Error::Other(e.to_string()))?; let ctx = shielded .sync( env, @@ -98,7 +97,8 @@ pub async fn syncing< &vks, ) .await - .map(|_| shielded); + .map(|_| shielded) + .map_err(|e| Error::Other(e.to_string())); display!(io, "\nSyncing finished\n"); diff --git a/crates/apps_lib/src/client/rpc.rs b/crates/apps_lib/src/client/rpc.rs index a642bd4856..6c2a1a929b 100644 --- a/crates/apps_lib/src/client/rpc.rs +++ b/crates/apps_lib/src/client/rpc.rs @@ -10,6 +10,7 @@ use masp_primitives::merkle_tree::MerklePath; use masp_primitives::sapling::Node; use masp_primitives::transaction::components::I128Sum; use masp_primitives::zip32::ExtendedFullViewingKey; +use namada_core::masp::{BalanceOwner, MaspEpoch}; use namada_sdk::address::{Address, InternalAddress, MASP}; use namada_sdk::chain::{BlockHeight, Epoch}; use namada_sdk::collections::{HashMap, HashSet}; @@ -25,16 +26,17 @@ use namada_sdk::governance::storage::proposal::{ use namada_sdk::governance::utils::{ProposalVotes, VotePower}; use namada_sdk::governance::ProposalVote; use namada_sdk::hash::Hash; -use namada_sdk::io::Io; +use namada_sdk::io::{display, display_line, edisplay_line, Client, Io}; use namada_sdk::key::*; -use namada_sdk::masp::{BalanceOwner, MaspEpoch, MaspTokenRewardData}; +use namada_sdk::masp::shielded_wallet::ShieldedApi; +use namada_sdk::masp::MaspTokenRewardData; use namada_sdk::parameters::{storage as param_storage, EpochDuration}; use namada_sdk::proof_of_stake::types::{ CommissionPair, Slash, ValidatorMetaData, ValidatorState, ValidatorStateInfo, WeightedValidator, }; use namada_sdk::proof_of_stake::PosParams; -use namada_sdk::queries::{Client, RPC}; +use namada_sdk::queries::RPC; use namada_sdk::rpc::{ self, enriched_bonds_and_unbonds, format_denominated_amount, query_epoch, TxResponse, @@ -44,10 +46,7 @@ use namada_sdk::tendermint_rpc::endpoint::status; use namada_sdk::token::MaspDigitPos; use namada_sdk::tx::display_batch_resp; use namada_sdk::wallet::AddressVpType; -use namada_sdk::{ - display, display_line, edisplay_line, error, state as storage, token, - Namada, -}; +use namada_sdk::{error, state as storage, token, Namada}; use crate::cli::{self, args}; use crate::tendermint::merkle::proof::ProofOps; @@ -145,7 +144,7 @@ pub async fn query_block(context: &impl Namada) { } /// Query the results of the last committed block -pub async fn query_results( +pub async fn query_results( client: &C, _args: args::Query, ) -> Vec { @@ -338,7 +337,7 @@ pub async fn query_proposal(context: &impl Namada, args: args::QueryProposal) { } /// Query proposal by Id -pub async fn query_proposal_by_id( +pub async fn query_proposal_by_id( client: &C, proposal_id: u64, ) -> Result, error::Error> { @@ -771,7 +770,7 @@ pub async fn query_protocol_parameters( ); } -pub async fn query_bond( +pub async fn query_bond( client: &C, source: &Address, validator: &Address, @@ -782,9 +781,7 @@ pub async fn query_bond( ) } -pub async fn query_unbond_with_slashing< - C: namada_sdk::queries::Client + Sync, ->( +pub async fn query_unbond_with_slashing( client: &C, source: &Address, validator: &Address, @@ -797,15 +794,13 @@ pub async fn query_unbond_with_slashing< ) } -pub async fn query_pos_parameters( - client: &C, -) -> PosParams { +pub async fn query_pos_parameters(client: &C) -> PosParams { unwrap_client_response::( RPC.vp().pos().pos_params(client).await, ) } -pub async fn query_consensus_keys( +pub async fn query_consensus_keys( client: &C, ) -> BTreeSet { unwrap_client_response::>( @@ -813,19 +808,19 @@ pub async fn query_consensus_keys( ) } -pub async fn query_pgf_stewards( +pub async fn query_pgf_stewards( client: &C, ) -> Vec { unwrap_client_response::(RPC.vp().pgf().stewards(client).await) } -pub async fn query_pgf_fundings( +pub async fn query_pgf_fundings( client: &C, ) -> Vec { unwrap_client_response::(RPC.vp().pgf().funding(client).await) } -pub async fn query_pgf_parameters( +pub async fn query_pgf_parameters( client: &C, ) -> PgfParameters { unwrap_client_response::(RPC.vp().pgf().parameters(client).await) @@ -872,9 +867,7 @@ pub async fn query_and_print_unbonds( } } -pub async fn query_withdrawable_tokens< - C: namada_sdk::queries::Client + Sync, ->( +pub async fn query_withdrawable_tokens( client: &C, bond_source: &Address, validator: &Address, @@ -1129,7 +1122,7 @@ pub async fn query_bonded_stake( /// Query and return validator's commission rate and max commission rate change /// per epoch -pub async fn query_commission_rate( +pub async fn query_commission_rate( client: &C, validator: &Address, epoch: Option, @@ -1143,7 +1136,7 @@ pub async fn query_commission_rate( } /// Query and return validator's metadata -pub async fn query_metadata( +pub async fn query_metadata( client: &C, validator: &Address, ) -> Option { @@ -1153,7 +1146,7 @@ pub async fn query_metadata( } /// Query and return validator's state -pub async fn query_validator_state( +pub async fn query_validator_state( client: &C, validator: &Address, epoch: Option, @@ -1167,7 +1160,7 @@ pub async fn query_validator_state( } /// Query and return the available reward tokens corresponding to the bond -pub async fn query_rewards( +pub async fn query_rewards( client: &C, source: &Option
, validator: &Address, @@ -1661,7 +1654,7 @@ pub async fn query_find_validator( } /// Get account's public key stored in its storage sub-space -pub async fn get_public_key( +pub async fn get_public_key( client: &C, address: &Address, index: u8, @@ -1670,7 +1663,7 @@ pub async fn get_public_key( } /// Check if the given address has any bonds. -pub async fn is_validator( +pub async fn is_validator( client: &C, address: &Address, ) -> bool { @@ -1680,7 +1673,7 @@ pub async fn is_validator( } /// Check if a given address is a known delegator -pub async fn is_delegator( +pub async fn is_delegator( client: &C, address: &Address, ) -> bool { @@ -1689,7 +1682,7 @@ pub async fn is_delegator( .unwrap() } -pub async fn is_delegator_at( +pub async fn is_delegator_at( client: &C, address: &Address, epoch: Epoch, @@ -1700,7 +1693,7 @@ pub async fn is_delegator_at( } /// Check if the given address has any bonds. -pub async fn has_bonds( +pub async fn has_bonds( client: &C, address: &Address, ) -> bool { @@ -1710,7 +1703,7 @@ pub async fn has_bonds( /// Check if the address exists on chain. Established address exists if it has a /// stored validity predicate. Implicit and internal addresses always return /// true. -pub async fn known_address( +pub async fn known_address( client: &C, address: &Address, ) -> bool { @@ -1798,7 +1791,7 @@ pub async fn query_conversions( } /// Query a conversion. -pub async fn query_conversion( +pub async fn query_conversion( client: &C, asset_type: AssetType, ) -> Option<( @@ -1848,7 +1841,7 @@ pub async fn query_wasm_code_hash( } /// Query a storage value and decode it with [`BorshDeserialize`]. -pub async fn query_storage_value( +pub async fn query_storage_value( client: &C, key: &storage::Key, ) -> Result @@ -1859,9 +1852,7 @@ where } /// Query a storage value and the proof without decoding. -pub async fn query_storage_value_bytes< - C: namada_sdk::queries::Client + Sync, ->( +pub async fn query_storage_value_bytes( client: &C, key: &storage::Key, height: Option, @@ -1886,7 +1877,7 @@ where } /// Query to check if the given storage key exists. -pub async fn query_has_storage_key( +pub async fn query_has_storage_key( client: &C, key: &storage::Key, ) -> bool { @@ -1897,11 +1888,10 @@ pub async fn query_has_storage_key( /// Call the corresponding `tx_event_query` RPC method, to fetch /// the current status of a transaction. -pub async fn query_tx_events( +pub async fn query_tx_events( client: &C, tx_event_query: namada_sdk::rpc::TxEventQuery<'_>, -) -> std::result::Result, ::Error> -{ +) -> std::result::Result, ::Error> { namada_sdk::rpc::query_tx_events(client, tx_event_query).await } @@ -1951,7 +1941,7 @@ pub async fn epoch_sleep(context: &impl Namada, _args: args::Query) { } } -pub async fn get_bond_amount_at( +pub async fn get_bond_amount_at( client: &C, delegator: &Address, validator: &Address, @@ -1966,7 +1956,7 @@ pub async fn get_bond_amount_at( Some(total_active) } -pub async fn get_all_validators( +pub async fn get_all_validators( client: &C, epoch: Epoch, ) -> HashSet
{ @@ -1975,7 +1965,7 @@ pub async fn get_all_validators( .unwrap() } -pub async fn get_total_staked_tokens( +pub async fn get_total_staked_tokens( client: &C, epoch: Epoch, ) -> token::Amount { @@ -1988,7 +1978,7 @@ pub async fn get_total_staked_tokens( /// sum of validator's self-bonds and delegations to their address. /// Returns `None` when the given address is not a validator address. For a /// validator with `0` stake, this returns `Ok(token::Amount::zero())`. -async fn get_validator_stake( +async fn get_validator_stake( client: &C, epoch: Epoch, validator: &Address, @@ -2001,9 +1991,7 @@ async fn get_validator_stake( ) } -pub async fn get_delegation_validators< - C: namada_sdk::queries::Client + Sync, ->( +pub async fn get_delegation_validators( client: &C, address: &Address, ) -> HashSet
{ @@ -2013,9 +2001,7 @@ pub async fn get_delegation_validators< .unwrap() } -pub async fn get_delegations_of_delegator_at< - C: namada_sdk::queries::Client + Sync, ->( +pub async fn get_delegations_of_delegator_at( client: &C, address: &Address, epoch: Epoch, @@ -2025,18 +2011,14 @@ pub async fn get_delegations_of_delegator_at< .unwrap() } -pub async fn query_governance_parameters< - C: namada_sdk::queries::Client + Sync, ->( +pub async fn query_governance_parameters( client: &C, ) -> GovernanceParameters { namada_sdk::rpc::query_governance_parameters(client).await } /// A helper to unwrap client's response. Will shut down process on error. -fn unwrap_client_response( - response: Result, -) -> T { +fn unwrap_client_response(response: Result) -> T { response.unwrap_or_else(|err| { eprintln!("Error in the query: {:?}", err); cli::safe_exit(1) @@ -2051,7 +2033,7 @@ fn unwrap_sdk_result(response: Result) -> T { }) } -pub async fn compute_proposal_votes( +pub async fn compute_proposal_votes( client: &C, proposal_id: u64, epoch: Epoch, diff --git a/crates/apps_lib/src/client/tx.rs b/crates/apps_lib/src/client/tx.rs index 7716773737..6b39bbd4f6 100644 --- a/crates/apps_lib/src/client/tx.rs +++ b/crates/apps_lib/src/client/tx.rs @@ -11,7 +11,7 @@ use namada_sdk::governance::cli::onchain::{ DefaultProposal, PgfFundingProposal, PgfStewardProposal, }; use namada_sdk::ibc::convert_masp_tx_to_ibc_memo; -use namada_sdk::io::Io; +use namada_sdk::io::{display_line, edisplay_line, Io}; use namada_sdk::key::*; use namada_sdk::rpc::{InnerTxResult, TxBroadcastData, TxResponse}; use namada_sdk::state::EPOCH_SWITCH_BLOCKS_DELAY; @@ -19,7 +19,7 @@ use namada_sdk::tx::data::compute_inner_tx_hash; use namada_sdk::tx::{CompressedAuthorization, Section, Signer, Tx}; use namada_sdk::wallet::alias::{validator_address, validator_consensus_key}; use namada_sdk::wallet::{Wallet, WalletIo}; -use namada_sdk::{display_line, edisplay_line, error, signing, tx, Namada}; +use namada_sdk::{error, signing, tx, Namada}; use rand::rngs::OsRng; use tokio::sync::RwLock; @@ -231,7 +231,7 @@ async fn batch_opt_reveal_pk_and_submit( tx_data: (Tx, SigningTxData), ) -> Result where - ::Error: std::fmt::Display, + ::Error: std::fmt::Display, { let mut batched_tx_data = vec![]; @@ -279,7 +279,7 @@ pub async fn submit_custom( args: args::TxCustom, ) -> Result<(), error::Error> where - ::Error: std::fmt::Display, + ::Error: std::fmt::Display, { let custom_tx_data = args.build(namada).await?; @@ -303,7 +303,7 @@ pub async fn submit_update_account( args: args::TxUpdateAccount, ) -> Result<(), error::Error> where - ::Error: std::fmt::Display, + ::Error: std::fmt::Display, { let (mut tx, signing_data) = args.build(namada).await?; @@ -323,7 +323,7 @@ pub async fn submit_init_account( args: args::TxInitAccount, ) -> Result, error::Error> where - ::Error: std::fmt::Display, + ::Error: std::fmt::Display, { let (mut tx, signing_data) = tx::build_init_account(namada, &args).await?; @@ -907,7 +907,7 @@ pub async fn submit_ibc_transfer( args: args::TxIbcTransfer, ) -> Result<(), error::Error> where - ::Error: std::fmt::Display, + ::Error: std::fmt::Display, { let (tx, signing_data, _) = args.build(namada).await?; @@ -933,7 +933,7 @@ pub async fn submit_init_proposal( args: args::InitProposal, ) -> Result<(), error::Error> where - ::Error: std::fmt::Display, + ::Error: std::fmt::Display, { let current_epoch = rpc::query_and_print_epoch(namada).await; let governance_parameters = @@ -1040,7 +1040,7 @@ pub async fn submit_vote_proposal( args: args::VoteProposal, ) -> Result<(), error::Error> where - ::Error: std::fmt::Display, + ::Error: std::fmt::Display, { let (mut tx_builder, signing_data) = args.build(namada).await?; @@ -1065,7 +1065,7 @@ pub async fn sign_tx( }: args::SignTx, ) -> Result<(), error::Error> where - ::Error: std::fmt::Display, + ::Error: std::fmt::Display, { let tx = if let Ok(transaction) = Tx::deserialize(tx_data.as_ref()) { transaction @@ -1135,7 +1135,7 @@ pub async fn submit_reveal_pk( args: args::RevealPk, ) -> Result<(), error::Error> where - ::Error: std::fmt::Display, + ::Error: std::fmt::Display, { let tx_data = submit_reveal_aux(namada, &args.tx, &(&args.public_key).into()).await?; @@ -1153,7 +1153,7 @@ pub async fn submit_bond( args: args::Bond, ) -> Result<(), error::Error> where - ::Error: std::fmt::Display, + ::Error: std::fmt::Display, { let submit_bond_tx_data = args.build(namada).await?; @@ -1178,7 +1178,7 @@ pub async fn submit_unbond( args: args::Unbond, ) -> Result<(), error::Error> where - ::Error: std::fmt::Display, + ::Error: std::fmt::Display, { let (mut tx, signing_data, latest_withdrawal_pre) = args.build(namada).await?; @@ -1209,7 +1209,7 @@ pub async fn submit_withdraw( args: args::Withdraw, ) -> Result<(), error::Error> where - ::Error: std::fmt::Display, + ::Error: std::fmt::Display, { let (mut tx, signing_data) = args.build(namada).await?; @@ -1229,7 +1229,7 @@ pub async fn submit_claim_rewards( args: args::ClaimRewards, ) -> Result<(), error::Error> where - ::Error: std::fmt::Display, + ::Error: std::fmt::Display, { let (mut tx, signing_data) = args.build(namada).await?; @@ -1249,7 +1249,7 @@ pub async fn submit_redelegate( args: args::Redelegate, ) -> Result<(), error::Error> where - ::Error: std::fmt::Display, + ::Error: std::fmt::Display, { let (mut tx, signing_data) = args.build(namada).await?; @@ -1269,7 +1269,7 @@ pub async fn submit_validator_commission_change( args: args::CommissionRateChange, ) -> Result<(), error::Error> where - ::Error: std::fmt::Display, + ::Error: std::fmt::Display, { let (mut tx, signing_data) = args.build(namada).await?; @@ -1289,7 +1289,7 @@ pub async fn submit_validator_metadata_change( args: args::MetaDataChange, ) -> Result<(), error::Error> where - ::Error: std::fmt::Display, + ::Error: std::fmt::Display, { let (mut tx, signing_data) = args.build(namada).await?; @@ -1309,7 +1309,7 @@ pub async fn submit_unjail_validator( args: args::TxUnjailValidator, ) -> Result<(), error::Error> where - ::Error: std::fmt::Display, + ::Error: std::fmt::Display, { let (mut tx, signing_data) = args.build(namada).await?; @@ -1329,7 +1329,7 @@ pub async fn submit_deactivate_validator( args: args::TxDeactivateValidator, ) -> Result<(), error::Error> where - ::Error: std::fmt::Display, + ::Error: std::fmt::Display, { let (mut tx, signing_data) = args.build(namada).await?; @@ -1349,7 +1349,7 @@ pub async fn submit_reactivate_validator( args: args::TxReactivateValidator, ) -> Result<(), error::Error> where - ::Error: std::fmt::Display, + ::Error: std::fmt::Display, { let (mut tx, signing_data) = args.build(namada).await?; @@ -1369,7 +1369,7 @@ pub async fn submit_update_steward_commission( args: args::UpdateStewardCommission, ) -> Result<(), error::Error> where - ::Error: std::fmt::Display, + ::Error: std::fmt::Display, { let (mut tx, signing_data) = args.build(namada).await?; @@ -1389,7 +1389,7 @@ pub async fn submit_resign_steward( args: args::ResignSteward, ) -> Result<(), error::Error> where - ::Error: std::fmt::Display, + ::Error: std::fmt::Display, { let (mut tx, signing_data) = args.build(namada).await?; diff --git a/crates/apps_lib/src/wallet/mod.rs b/crates/apps_lib/src/wallet/mod.rs index e39ffd47af..09c17430fc 100644 --- a/crates/apps_lib/src/wallet/mod.rs +++ b/crates/apps_lib/src/wallet/mod.rs @@ -11,12 +11,12 @@ use std::{env, fs}; use namada_sdk::bip39::{Language, Mnemonic}; use namada_sdk::key::*; pub use namada_sdk::wallet::alias::Alias; -use namada_sdk::wallet::fs::FsWalletStorage; use namada_sdk::wallet::store::Store; use namada_sdk::wallet::{ ConfirmationResponse, FindKeyError, LoadStoreError, Wallet, WalletIo, }; pub use namada_sdk::wallet::{ValidatorData, ValidatorKeys}; +use namada_wallet::fs::FsWalletStorage; use rand_core::OsRng; pub use store::wallet_file; pub use transport::{TransportTcp, WalletTransport}; diff --git a/crates/benches/native_vps.rs b/crates/benches/native_vps.rs index 536d337f23..4f4ab800c3 100644 --- a/crates/benches/native_vps.rs +++ b/crates/benches/native_vps.rs @@ -37,15 +37,14 @@ use namada_apps_lib::ibc::primitives::ToProto; use namada_apps_lib::ibc::{ IbcActions, NftTransferModule, TransferModule, COMMITMENT_PREFIX, }; -use namada_apps_lib::masp::{ - partial_deauthorize, preload_verifying_keys, PVKs, TransferSource, - TransferTarget, -}; use namada_apps_lib::masp_primitives::merkle_tree::CommitmentTree; use namada_apps_lib::masp_primitives::transaction::Transaction; use namada_apps_lib::masp_proofs::sapling::SaplingVerificationContextInner; use namada_apps_lib::proof_of_stake::KeySeg; use namada_apps_lib::state::{Epoch, StorageRead, StorageWrite, TxIndex}; +use namada_apps_lib::token::masp::{ + partial_deauthorize, preload_verifying_keys, PVKs, +}; use namada_apps_lib::token::{Amount, Transfer}; use namada_apps_lib::tx::{BatchedTx, Code, Section, Tx}; use namada_apps_lib::validation::{ @@ -53,7 +52,10 @@ use namada_apps_lib::validation::{ IbcVpContext, MaspVp, MultitokenVp, ParametersVp, PgfVp, PosVp, }; use namada_apps_lib::wallet::defaults; -use namada_apps_lib::{governance, parameters, proof_of_stake, storage, token}; +use namada_apps_lib::{ + governance, parameters, proof_of_stake, storage, token, TransferSource, + TransferTarget, +}; use namada_node::bench_utils::{ generate_foreign_key_tx, BenchShell, BenchShieldedCtx, ALBERT_PAYMENT_ADDRESS, ALBERT_SPENDING_KEY, BERTHA_PAYMENT_ADDRESS, diff --git a/crates/core/Cargo.toml b/crates/core/Cargo.toml index 17a1e1e462..85048c93b8 100644 --- a/crates/core/Cargo.toml +++ b/crates/core/Cargo.toml @@ -37,6 +37,7 @@ arbitrary = [ "indexmap/arbitrary", "masp_primitives/arbitrary", ] +task_env = ["tokio"] [dependencies] namada_macros = {path = "../macros"} @@ -86,8 +87,11 @@ uint = "0.9.5" zeroize.workspace = true wasmtimer = { workspace = true, optional = true } + + [target.'cfg(not(target_family = "wasm"))'.dependencies] tokio = { workspace = true, optional = true, features = ["full"] } +rayon.workspace = true [target.'cfg(target_family = "wasm")'.dependencies] tokio = { workspace = true, optional = true, default-features = false, features = ["sync"] } diff --git a/crates/core/src/lib.rs b/crates/core/src/lib.rs index bf94f26e4f..2c2a1e63fa 100644 --- a/crates/core/src/lib.rs +++ b/crates/core/src/lib.rs @@ -80,6 +80,8 @@ pub mod masp; pub mod parameters; pub mod storage; pub mod string_encoding; +#[cfg(any(test, feature = "task_env"))] +pub mod task_env; pub mod time; pub mod token; pub mod uint; diff --git a/crates/sdk/src/task_env.rs b/crates/core/src/task_env.rs similarity index 100% rename from crates/sdk/src/task_env.rs rename to crates/core/src/task_env.rs diff --git a/crates/io/Cargo.toml b/crates/io/Cargo.toml new file mode 100644 index 0000000000..20ddac9794 --- /dev/null +++ b/crates/io/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "namada_io" +description = "Namada IO" +resolver = "2" +authors.workspace = true +edition.workspace = true +documentation.workspace = true +homepage.workspace = true +keywords.workspace = true +license.workspace = true +readme.workspace = true +repository.workspace = true +version.workspace = true + +[features] +async-send = [] + +[dependencies] +namada_core = { path = "../core" } +async-trait.workspace = true +tendermint-rpc.workspace = true +thiserror.workspace = true + +[target.'cfg(target_family = "wasm")'.dependencies] +tokio = { workspace = true, default-features = false, features = ["sync"] } + +[target.'cfg(not(target_family = "wasm"))'.dependencies] +kdam.workspace = true +tokio = { workspace = true, features = ["full"] } \ No newline at end of file diff --git a/crates/io/src/client.rs b/crates/io/src/client.rs new file mode 100644 index 0000000000..c380433623 --- /dev/null +++ b/crates/io/src/client.rs @@ -0,0 +1,323 @@ +use std::fmt::{Debug, Display}; + +use namada_core::chain::BlockHeight; +use namada_core::tendermint::merkle::proof::ProofOps; +use tendermint_rpc::endpoint::{ + abci_info, block, block_results, blockchain, commit, consensus_params, + consensus_state, health, net_info, status, +}; +use tendermint_rpc::query::Query; +use tendermint_rpc::{Error as RpcError, Order}; +use thiserror::Error; + +use crate::tendermint::abci::response::Info; +use crate::tendermint::block::Height; +use crate::MaybeSend; + +const HEIGHT_CAST_ERR: &str = "Failed to cast block height"; + +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub enum Error { + #[error("{0}")] + Tendermint(#[from] tendermint_rpc::Error), + #[error("Decoding error: {0}")] + Decoding(#[from] std::io::Error), + #[error("Info log: {0}, error code: {1}")] + Query(String, u32), + #[error("Invalid block height: {0} (overflown i64)")] + InvalidHeight(BlockHeight), +} + +/// Generic response from a query +#[derive(Clone, Debug, Default)] +pub struct ResponseQuery { + /// Response data to be borsh encoded + pub data: T, + /// Non-deterministic log of the request execution + pub info: String, + /// Optional proof - used for storage value reads which request `prove` + pub proof: Option, + /// Block height from which data was derived + pub height: BlockHeight, +} + +/// [`ResponseQuery`] with borsh-encoded `data` field +pub type EncodedResponseQuery = ResponseQuery>; + +/// A client with async request dispatcher method, which can be used to invoke +/// type-safe methods from a root [`Router`], generated +/// via `router!` macro. +#[cfg_attr(feature = "async-send", async_trait::async_trait)] +#[cfg_attr(not(feature = "async-send"), async_trait::async_trait(?Send))] +pub trait Client { + /// `std::io::Error` can happen in decoding with + /// `BorshDeserialize::try_from_slice` + type Error: From + Display + Debug; + + /// Send a simple query request at the given path. For more options, use the + /// `request` method. + async fn simple_request( + &self, + path: String, + ) -> Result, Self::Error> { + self.request(path, None, None, false) + .await + .map(|response| response.data) + } + + /// Send a query request at the given path. + async fn request( + &self, + path: String, + data: Option>, + height: Option, + prove: bool, + ) -> Result; + + /// `/abci_info`: get information about the ABCI application. + async fn abci_info(&self) -> Result { + Ok(self.perform(abci_info::Request).await?.response) + } + + /// `/broadcast_tx_sync`: broadcast a transaction, returning the response + /// from `CheckTx`. + async fn broadcast_tx_sync( + &self, + tx: impl Into> + MaybeSend, + ) -> Result + { + self.perform( + tendermint_rpc::endpoint::broadcast::tx_sync::Request::new(tx), + ) + .await + } + + /// `/block`: get the latest block. + async fn latest_block(&self) -> Result { + self.perform(block::Request::default()).await + } + + /// `/block`: get block at a given height. + async fn block(&self, height: H) -> Result + where + H: TryInto + Send, + { + self.perform(block::Request::new( + height + .try_into() + .map_err(|_| RpcError::parse(HEIGHT_CAST_ERR.to_string()))?, + )) + .await + } + + /// `/block_search`: search for blocks by BeginBlock and EndBlock events. + async fn block_search( + &self, + query: Query, + page: u32, + per_page: u8, + order: Order, + ) -> Result + { + self.perform(tendermint_rpc::endpoint::block_search::Request::new( + query, page, per_page, order, + )) + .await + } + + /// `/block_results`: get ABCI results for a block at a particular height. + async fn block_results( + &self, + height: H, + ) -> Result + where + H: TryInto + Send, + { + self.perform(tendermint_rpc::endpoint::block_results::Request::new( + height + .try_into() + .map_err(|_| RpcError::parse(HEIGHT_CAST_ERR.to_string()))?, + )) + .await + } + + /// `/tx_search`: search for transactions with their results. + async fn tx_search( + &self, + query: Query, + prove: bool, + page: u32, + per_page: u8, + order: Order, + ) -> Result { + self.perform(tendermint_rpc::endpoint::tx_search::Request::new( + query, prove, page, per_page, order, + )) + .await + } + + /// `/abci_query`: query the ABCI application + async fn abci_query( + &self, + path: Option, + data: V, + height: Option, + prove: bool, + ) -> Result + where + V: Into> + Send, + { + Ok(self + .perform(tendermint_rpc::endpoint::abci_query::Request::new( + path, data, height, prove, + )) + .await? + .response) + } + + /// `/block_results`: get ABCI results for the latest block. + async fn latest_block_results( + &self, + ) -> Result { + self.perform(block_results::Request::default()).await + } + + /// `/blockchain`: get block headers for `min` <= `height` <= `max`. + /// + /// Block headers are returned in descending order (highest first). + /// + /// Returns at most 20 items. + async fn blockchain( + &self, + min: H, + max: H, + ) -> Result + where + H: TryInto + Send, + { + self.perform(blockchain::Request::new( + min.try_into() + .map_err(|_| RpcError::parse(HEIGHT_CAST_ERR.to_string()))?, + max.try_into() + .map_err(|_| RpcError::parse(HEIGHT_CAST_ERR.to_string()))?, + )) + .await + } + + /// `/commit`: get block commit at a given height. + async fn commit(&self, height: H) -> Result + where + H: TryInto + Send, + { + self.perform(commit::Request::new( + height + .try_into() + .map_err(|_| RpcError::parse(HEIGHT_CAST_ERR.to_string()))?, + )) + .await + } + + /// `/consensus_params`: get current consensus parameters at the specified + /// height. + async fn consensus_params( + &self, + height: H, + ) -> Result + where + H: TryInto + Send, + { + self.perform(consensus_params::Request::new(Some( + height + .try_into() + .map_err(|_| RpcError::parse(HEIGHT_CAST_ERR.to_string()))?, + ))) + .await + } + + /// `/consensus_state`: get current consensus state + async fn consensus_state( + &self, + ) -> Result { + self.perform(consensus_state::Request::new()).await + } + + /// `/consensus_params`: get the latest consensus parameters. + async fn latest_consensus_params( + &self, + ) -> Result { + self.perform(consensus_params::Request::new(None)).await + } + + /// `/commit`: get the latest block commit + async fn latest_commit(&self) -> Result { + self.perform(commit::Request::default()).await + } + + /// `/health`: get node health. + /// + /// Returns empty result (200 OK) on success, no response in case of an + /// error. + async fn health(&self) -> Result<(), RpcError> { + self.perform(health::Request).await?; + Ok(()) + } + + /// `/net_info`: obtain information about P2P and other network connections. + async fn net_info(&self) -> Result { + self.perform(net_info::Request).await + } + + /// `/status`: get Tendermint status including node info, pubkey, latest + /// block hash, app hash, block height and time. + async fn status(&self) -> Result { + self.perform(status::Request).await + } + + /// Perform a request against the RPC endpoint + async fn perform(&self, request: R) -> Result + where + R: tendermint_rpc::SimpleRequest; +} + +#[cfg_attr(feature = "async-send", async_trait::async_trait)] +#[cfg_attr(not(feature = "async-send"), async_trait::async_trait(?Send))] +impl Client for C { + type Error = Error; + + async fn request( + &self, + path: String, + data: Option>, + height: Option, + prove: bool, + ) -> Result { + use crate::tendermint::abci::Code; + + let data = data.unwrap_or_default(); + let height = height + .map(|height| { + crate::tendermint::block::Height::try_from(height.0) + .map_err(|_err| Error::InvalidHeight(height)) + }) + .transpose()?; + + let response = self.abci_query(Some(path), data, height, prove).await?; + match response.code { + Code::Ok => Ok(EncodedResponseQuery { + data: response.value, + info: response.info, + proof: response.proof, + height: response.height.value().into(), + }), + Code::Err(code) => Err(Error::Query(response.info, code.into())), + } + } + + async fn perform(&self, request: R) -> Result + where + R: tendermint_rpc::SimpleRequest, + { + tendermint_rpc::client::Client::perform(self, request).await + } +} diff --git a/crates/sdk/src/io.rs b/crates/io/src/lib.rs similarity index 86% rename from crates/sdk/src/io.rs rename to crates/io/src/lib.rs index 66c45d69fd..5cc39dea09 100644 --- a/crates/sdk/src/io.rs +++ b/crates/io/src/lib.rs @@ -4,7 +4,27 @@ #![allow(clippy::print_stdout, clippy::print_stderr)] -use crate::{MaybeSend, MaybeSync}; +pub mod client; + +#[cfg(feature = "async-send")] +pub use std::marker::Send as MaybeSend; +#[cfg(feature = "async-send")] +pub use std::marker::Sync as MaybeSync; + +pub use client::Client; +use namada_core::*; + +#[allow(missing_docs)] +#[cfg(not(feature = "async-send"))] +pub trait MaybeSync {} +#[cfg(not(feature = "async-send"))] +impl MaybeSync for T where T: ?Sized {} + +#[allow(missing_docs)] +#[cfg(not(feature = "async-send"))] +pub trait MaybeSend {} +#[cfg(not(feature = "async-send"))] +impl MaybeSend for T where T: ?Sized {} /// NOOP progress bar implementation. #[derive(Debug, Clone, Copy)] @@ -279,3 +299,18 @@ macro_rules! prompt { $io.prompt(format!("{}", format_args!($($arg)*))) }} } + +#[cfg_attr(feature = "async-send", async_trait::async_trait)] +#[cfg_attr(not(feature = "async-send"), async_trait::async_trait(?Send))] +pub trait NamadaIo: Sized + MaybeSync + MaybeSend { + /// A client with async request dispatcher method + type Client: Client + MaybeSend + Sync; + /// Captures the input/output streams used by this object + type Io: Io + MaybeSend + MaybeSync; + + /// Obtain the client for communicating with the ledger + fn client(&self) -> &Self::Client; + + /// Obtain the input/output handle for this context + fn io(&self) -> &Self::Io; +} diff --git a/crates/light_sdk/src/writing/asynchronous/mod.rs b/crates/light_sdk/src/writing/asynchronous/mod.rs index bb93b20089..a71b747bfb 100644 --- a/crates/light_sdk/src/writing/asynchronous/mod.rs +++ b/crates/light_sdk/src/writing/asynchronous/mod.rs @@ -1,7 +1,7 @@ use std::str::FromStr; use namada_sdk::error::{EncodingError, Error, TxSubmitError}; -use namada_sdk::queries::Client; +use namada_sdk::io::Client; use namada_sdk::tx::Tx; use tendermint_config::net::Address as TendermintAddress; use tendermint_rpc::endpoint::broadcast::tx_sync::Response; diff --git a/crates/node/src/bench_utils.rs b/crates/node/src/bench_utils.rs index 9dc6da077f..2ae4950568 100644 --- a/crates/node/src/bench_utils.rs +++ b/crates/node/src/bench_utils.rs @@ -69,21 +69,23 @@ use namada_sdk::ibc::storage::{ channel_key, connection_key, mint_limit_key, port_key, throughput_limit_key, }; use namada_sdk::ibc::{MsgTransfer, COMMITMENT_PREFIX}; -use namada_sdk::io::StdIo; +use namada_sdk::io::{Client, NamadaIo, StdIo}; use namada_sdk::key::common::SecretKey; +use namada_sdk::masp::shielded_wallet::ShieldedApi; use namada_sdk::masp::utils::RetryStrategy; use namada_sdk::masp::{ - self, ContextSyncStatus, DispatcherCache, ExtendedViewingKey, - MaspTransferData, MaspTxRefs, PaymentAddress, ShieldedContext, - ShieldedUtils, TransferSource, TransferTarget, + self, ContextSyncStatus, DispatcherCache, MaspTransferData, + ShieldedContext, ShieldedUtils, ShieldedWallet, }; use namada_sdk::queries::{ - Client, EncodedResponseQuery, RequestCtx, RequestQuery, Router, RPC, + EncodedResponseQuery, RequestCtx, RequestQuery, Router, RPC, }; use namada_sdk::state::StorageRead; use namada_sdk::storage::{Key, KeySeg, TxIndex}; use namada_sdk::time::DateTimeUtc; -use namada_sdk::token::{self, Amount, DenominatedAmount, Transfer}; +use namada_sdk::token::{ + self, Amount, DenominatedAmount, MaspTxRefs, Transfer, +}; use namada_sdk::tx::data::pos::Bond; use namada_sdk::tx::data::{BatchedTxResult, Fee, TxResult, VpsResult}; use namada_sdk::tx::event::{new_tx_event, Batch}; @@ -104,7 +106,10 @@ pub use namada_sdk::tx::{ TX_WITHDRAW_WASM, VP_USER_WASM, }; use namada_sdk::wallet::Wallet; -use namada_sdk::{parameters, proof_of_stake, tendermint, Namada, NamadaImpl}; +use namada_sdk::{ + parameters, proof_of_stake, tendermint, Namada, NamadaImpl, PaymentAddress, + TransferSource, TransferTarget, +}; use namada_test_utils::tx_data::TxWriteData; use namada_vm::wasm::run; use rand_core::OsRng; @@ -760,7 +765,7 @@ impl ShieldedUtils for BenchShieldedUtils { /// directory. If this fails, then leave the current context unchanged. async fn load( &self, - ctx: &mut ShieldedContext, + ctx: &mut ShieldedWallet, force_confirmed: bool, ) -> std::io::Result<()> { // Try to load shielded context from file @@ -778,9 +783,9 @@ impl ShieldedUtils for BenchShieldedUtils { let mut bytes = Vec::new(); ctx_file.read_to_end(&mut bytes)?; // Fill the supplied context with the deserialized object - *ctx = ShieldedContext { + *ctx = ShieldedWallet { utils: ctx.utils.clone(), - ..ShieldedContext::deserialize(&mut &bytes[..])? + ..ShieldedWallet::deserialize(&mut &bytes[..])? }; Ok(()) } @@ -788,7 +793,7 @@ impl ShieldedUtils for BenchShieldedUtils { /// Save this shielded context into its associated context directory async fn save( &self, - ctx: &ShieldedContext, + ctx: &ShieldedWallet, ) -> std::io::Result<()> { let (tmp_file_name, file_name) = match ctx.sync_status { ContextSyncStatus::Confirmed => (TMP_FILE_NAME, FILE_NAME), @@ -1122,21 +1127,21 @@ impl Default for BenchShieldedCtx { ] .map(|(p, s)| (p.to_owned(), s.to_owned())) { - let viewing_key: FromContext = FromContext::new( - chain_ctx - .wallet - .find_viewing_key(viewing_alias) - .unwrap() - .key - .to_string(), - ); + let viewing_key: FromContext = + FromContext::new( + chain_ctx + .wallet + .find_viewing_key(viewing_alias) + .unwrap() + .key + .to_string(), + ); let viewing_key = ExtendedFullViewingKey::from( chain_ctx.get_cached(&viewing_key), ) .fvk .vk; - let (div, _g_d) = - namada_sdk::masp::find_valid_diversifier(&mut OsRng); + let (div, _g_d) = masp::find_valid_diversifier(&mut OsRng); let payment_addr = viewing_key.to_payment_address(div).unwrap(); let _ = chain_ctx .wallet @@ -1194,7 +1199,7 @@ impl BenchShieldedCtx { let namada = NamadaImpl::native_new( self.shell, self.wallet, - self.shielded, + self.shielded.into(), StdIo, native_token, ); @@ -1205,14 +1210,20 @@ impl BenchShieldedCtx { amount: denominated_amount, }; let shielded = async_runtime - .block_on( - ShieldedContext::::gen_shielded_transfer( - &namada, - vec![masp_transfer_data], - None, - true, - ), - ) + .block_on(async { + let expiration = + Namada::tx_builder(&namada).expiration.to_datetime(); + let mut shielded_ctx = namada.shielded_mut().await; + shielded_ctx + .gen_shielded_transfer( + &namada, + vec![masp_transfer_data], + None, + expiration, + true, + ) + .await + }) .unwrap() .map( |masp::ShieldedTransfer { diff --git a/crates/node/src/dry_run_tx.rs b/crates/node/src/dry_run_tx.rs index 794b509d12..4e697a09bd 100644 --- a/crates/node/src/dry_run_tx.rs +++ b/crates/node/src/dry_run_tx.rs @@ -141,8 +141,9 @@ mod test { use namada_sdk::chain::BlockHeight; use namada_sdk::events::log::EventLog; use namada_sdk::hash::Hash; + use namada_sdk::io::Client; use namada_sdk::queries::{ - Client, EncodedResponseQuery, RequestCtx, RequestQuery, Router, RPC, + EncodedResponseQuery, RequestCtx, RequestQuery, Router, RPC, }; use namada_sdk::state::testing::TestState; use namada_sdk::state::StorageWrite; diff --git a/crates/node/src/protocol.rs b/crates/node/src/protocol.rs index be8c2e3d17..e5c443f40c 100644 --- a/crates/node/src/protocol.rs +++ b/crates/node/src/protocol.rs @@ -805,7 +805,7 @@ fn get_optional_masp_ref>( state: &S, cmt: &TxCommitments, is_masp_tx: Either, -) -> Result>> { +) -> Result>> { // Always check that the transaction was indeed a MASP one by looking at the // changed keys. A malicious tx could push a MASP Action without touching // any storage keys associated with the shielded pool diff --git a/crates/node/src/shell/testing/node.rs b/crates/node/src/shell/testing/node.rs index 5aff0a933f..1799e28d93 100644 --- a/crates/node/src/shell/testing/node.rs +++ b/crates/node/src/shell/testing/node.rs @@ -20,6 +20,7 @@ use namada_sdk::events::extend::Height as HeightAttr; use namada_sdk::events::log::dumb_queries; use namada_sdk::events::Event; use namada_sdk::hash::Hash; +use namada_sdk::io::Client; use namada_sdk::key::tm_consensus_key_raw_hash; use namada_sdk::proof_of_stake::storage::{ read_consensus_validator_set_addresses_with_stake, read_pos_params, @@ -27,7 +28,7 @@ use namada_sdk::proof_of_stake::storage::{ }; use namada_sdk::proof_of_stake::types::WeightedValidator; use namada_sdk::queries::{ - Client, EncodedResponseQuery, RequestCtx, RequestQuery, Router, RPC, + EncodedResponseQuery, RequestCtx, RequestQuery, Router, RPC, }; use namada_sdk::state::{ LastBlock, Sha256Hasher, StorageRead, DB, EPOCH_SWITCH_BLOCKS_DELAY, @@ -745,7 +746,7 @@ impl Client for MockNode { data: Option>, height: Option, prove: bool, - ) -> std::result::Result { + ) -> std::result::Result { self.drive_mock_services_bg().await; let rpc = RPC; let data = data.unwrap_or_default(); diff --git a/crates/sdk/Cargo.toml b/crates/sdk/Cargo.toml index 6cad716918..53708ab291 100644 --- a/crates/sdk/Cargo.toml +++ b/crates/sdk/Cargo.toml @@ -16,14 +16,14 @@ version.workspace = true [features] default = ["std"] -mainnet = ["namada_core/mainnet", "namada_events/mainnet"] +mainnet = ["namada_core/mainnet", "namada_events/mainnet", "namada_token/mainnet"] multicore = ["masp_proofs/multicore", "namada_token/multicore"] -std = ["fd-lock", "download-params"] -async-send = [] +std = ["fd-lock", "download-params", "namada_token/std"] +async-send = ["namada_io/async-send"] namada-eth-bridge = ["namada_ethereum_bridge/namada-eth-bridge"] benches = ["namada_core/benches", "namada_core/testing", "namada_state/benches"] wasm-runtime = ["namada_vm/wasm-runtime"] - +masp = ["namada_core/task_env", "namada_core/control_flow", "namada_token/masp"] # for tests and test utilities testing = [ "masp_primitives/test-dependencies", @@ -60,6 +60,7 @@ migrations = [ "namada_proof_of_stake/migrations", "namada_state/migrations", "namada_storage/migrations", + "namada_token/migrations", "namada_tx/migrations", "namada_vote_ext/migrations", "namada_gas/migrations", @@ -68,23 +69,25 @@ migrations = [ [dependencies] namada_account = { path = "../account" } -namada_core = { path = "../core", features = ["control_flow", "rand"] } +namada_core = { path = "../core" } namada_ethereum_bridge = { path = "../ethereum_bridge", default-features = false } namada_events = { path = "../events" } namada_gas = { path = "../gas" } namada_governance = { path = "../governance" } namada_ibc = { path = "../ibc" } +namada_io = { path = "../io" } namada_macros = { path = "../macros" } namada_migrations = { path = "../migrations", optional = true } namada_parameters = { path = "../parameters" } namada_proof_of_stake = { path = "../proof_of_stake" } namada_state = { path = "../state" } namada_storage = { path = "../storage" } -namada_token = { path = "../token" } +namada_token = { path = "../token", features = ["masp"] } namada_tx = { path = "../tx" } namada_vm = { path = "../vm", default-features = false } namada_vote_ext = { path = "../vote_ext" } namada_vp = { path = "../vp" } +namada_wallet = {path = "../wallet" } arbitrary = { workspace = true, optional = true } async-trait.workspace = true @@ -94,14 +97,12 @@ borsh-ext.workspace = true circular-queue.workspace = true clap = { version = "4.3", default-features = false, features = ["std"] } data-encoding.workspace = true -derivation-path.workspace = true duration-str.workspace = true either.workspace = true ethbridge-bridge-contract.workspace = true ethers.workspace = true eyre.workspace = true fd-lock = { workspace = true, optional = true } -flume.workspace = true futures.workspace = true init-once.workspace = true itertools.workspace = true @@ -112,7 +113,6 @@ masp_primitives.workspace = true masp_proofs.workspace = true num256.workspace = true num-traits.workspace = true -orion.workspace = true owo-colors.workspace = true paste.workspace = true patricia_tree.workspace = true @@ -121,27 +121,24 @@ prost.workspace = true rand.workspace = true rand_core.workspace = true regex.workspace = true +reqwest.workspace = true serde.workspace = true serde_json.workspace = true sha2.workspace = true -slip10_ed25519.workspace = true smooth-operator.workspace = true tendermint-rpc.workspace = true thiserror.workspace = true tiny-bip39.workspace = true -tiny-hderive.workspace = true toml.workspace = true tracing.workspace = true -typed-builder.workspace = true zeroize.workspace = true +xorf.workspace = true [target.'cfg(not(target_family = "wasm"))'.dependencies] -kdam.workspace = true rayon.workspace = true -reqwest.workspace = true tempfile.workspace = true tokio = { workspace = true, features = ["full"] } -xorf.workspace = true + [target.'cfg(target_family = "wasm")'.dependencies] tokio = { workspace = true, default-features = false, features = ["sync"] } @@ -163,14 +160,14 @@ namada_proof_of_stake = { path = "../proof_of_stake", default-features = false, ] } namada_state = { path = "../state", features = ["testing"] } namada_storage = { path = "../storage", features = ["testing"] } -namada_token = { path = "../token", features = ["testing"] } +namada_token = { path = "../token", features = ["testing", "masp"] } namada_tx = { path = "../tx", features = ["testing"]} namada_vm = { path = "../vm" } namada_vote_ext = { path = "../vote_ext" } namada_vp = { path = "../vp" } assert_matches.workspace = true -base58.workspace = true + jubjub.workspace = true masp_primitives = { workspace = true, features = ["test-dependencies"] } proptest.workspace = true diff --git a/crates/sdk/src/args.rs b/crates/sdk/src/args.rs index 5949a16ba4..ac382e9e95 100644 --- a/crates/sdk/src/args.rs +++ b/crates/sdk/src/args.rs @@ -19,6 +19,7 @@ use namada_governance::cli::onchain::{ DefaultProposal, PgfFundingProposal, PgfStewardProposal, }; use namada_ibc::IbcShieldingData; +use namada_token::masp::utils::RetryStrategy; use namada_tx::data::GasLimit; use namada_tx::Memo; use serde::{Deserialize, Serialize}; @@ -26,7 +27,6 @@ use zeroize::Zeroizing; use crate::eth_bridge::bridge_pool; use crate::ibc::core::host::types::identifiers::{ChannelId, PortId}; -use crate::masp::utils::RetryStrategy; use crate::signing::SigningTxData; use crate::wallet::{DatedSpendingKey, DatedViewingKey}; use crate::{rpc, tx, Namada}; diff --git a/crates/sdk/src/eth_bridge/bridge_pool.rs b/crates/sdk/src/eth_bridge/bridge_pool.rs index f851f67d15..4c35eda13e 100644 --- a/crates/sdk/src/eth_bridge/bridge_pool.rs +++ b/crates/sdk/src/eth_bridge/bridge_pool.rs @@ -20,6 +20,7 @@ use namada_core::ethereum_events::EthAddress; use namada_core::keccak::KeccakHash; use namada_core::voting_power::FractionalVotingPower; use namada_ethereum_bridge::storage::bridge_pool::get_pending_key; +use namada_io::{display, display_line, edisplay_line, Client, Io}; use namada_token::storage_key::balance_key; use namada_token::Amount; use namada_tx::Tx; @@ -33,18 +34,14 @@ use crate::error::{ }; use crate::eth_bridge::ethers::abi::AbiDecode; use crate::internal_macros::echo_error; -use crate::io::Io; use crate::queries::{ - Client, GenBridgePoolProofReq, GenBridgePoolProofRsp, TransferToErcArgs, + GenBridgePoolProofReq, GenBridgePoolProofRsp, TransferToErcArgs, TransferToEthereumStatus, RPC, }; use crate::rpc::{query_storage_value, query_wasm_code_hash, validate_amount}; use crate::signing::{aux_signing_data, validate_transparent_fee}; use crate::tx::prepare_tx; -use crate::{ - args, display, display_line, edisplay_line, MaybeSync, Namada, - SigningTxData, -}; +use crate::{args, MaybeSync, Namada, SigningTxData}; /// Craft a transaction that adds a transfer to the Ethereum bridge pool. pub async fn build_bridge_pool_tx( @@ -746,12 +743,12 @@ mod recommendations { use namada_core::ethereum_events::Uint as EthUint; use namada_core::uint::{self, Uint, I256}; use namada_ethereum_bridge::storage::proof::BridgePoolRootProof; + use namada_io::edisplay_line; use namada_vote_ext::validator_set_update::{ EthAddrBook, VotingPowersMap, VotingPowersMapExt, }; use super::*; - use crate::edisplay_line; use crate::eth_bridge::storage::bridge_pool::{ get_nonce_key, get_signed_root_key, }; @@ -1221,9 +1218,9 @@ mod recommendations { #[cfg(test)] mod test_recommendations { use namada_core::address; + use namada_io::StdIo; use super::*; - use crate::io::StdIo; /// An established user address for testing & development pub fn bertha_address() -> Address { diff --git a/crates/sdk/src/eth_bridge/mod.rs b/crates/sdk/src/eth_bridge/mod.rs index bcbad77a87..e2f47bd0f8 100644 --- a/crates/sdk/src/eth_bridge/mod.rs +++ b/crates/sdk/src/eth_bridge/mod.rs @@ -13,14 +13,13 @@ pub use namada_ethereum_bridge::storage::eth_bridge_queries::*; pub use namada_ethereum_bridge::storage::parameters::*; pub use namada_ethereum_bridge::storage::wrapped_erc20s; pub use namada_ethereum_bridge::{ADDRESS, *}; +use namada_io::{display_line, edisplay_line, Io}; use num256::Uint256; use crate::control_flow::time::{ Constant, Duration, Instant, LinearBackoff, Sleep, }; use crate::error::{Error, EthereumBridgeError}; -use crate::io::Io; -use crate::{display_line, edisplay_line}; const DEFAULT_BACKOFF: Duration = std::time::Duration::from_millis(500); const DEFAULT_CEILING: Duration = std::time::Duration::from_secs(30); diff --git a/crates/sdk/src/eth_bridge/validator_set.rs b/crates/sdk/src/eth_bridge/validator_set.rs index a2aac05857..725983bbae 100644 --- a/crates/sdk/src/eth_bridge/validator_set.rs +++ b/crates/sdk/src/eth_bridge/validator_set.rs @@ -14,20 +14,20 @@ use namada_core::eth_abi::EncodeCell; use namada_core::ethereum_events::EthAddress; use namada_core::hints; use namada_ethereum_bridge::storage::proof::EthereumProof; +use namada_io::{display_line, edisplay_line, Client, Io}; use namada_vote_ext::validator_set_update::{ ValidatorSetArgs, VotingPowersMap, }; use super::{block_on_eth_sync, eth_sync_or, eth_sync_or_exit, BlockOnEthSync}; +use crate::args; use crate::control_flow::time::{self, Duration, Instant}; use crate::error::{Error as SdkError, EthereumBridgeError, QueryError}; use crate::eth_bridge::ethers::abi::{AbiDecode, AbiType, Tokenizable}; use crate::eth_bridge::ethers::types::TransactionReceipt; use crate::eth_bridge::structs::Signature; use crate::internal_macros::{echo_error, trace_error}; -use crate::io::Io; -use crate::queries::{Client, RPC}; -use crate::{args, display_line, edisplay_line}; +use crate::queries::RPC; /// Relayer related errors. #[derive(Debug, Default)] diff --git a/crates/sdk/src/internal_macros.rs b/crates/sdk/src/internal_macros.rs index b864faa948..d7ab2bf959 100644 --- a/crates/sdk/src/internal_macros.rs +++ b/crates/sdk/src/internal_macros.rs @@ -1,7 +1,7 @@ macro_rules! echo_error { ($io:expr, $($arg:tt)*) => {{ let msg = ::alloc::format!($($arg)*); - $crate::edisplay_line!($io, "{msg}"); + namada_io::edisplay_line!($io, "{msg}"); msg }} } diff --git a/crates/sdk/src/lib.rs b/crates/sdk/src/lib.rs index 1ebec43146..0dd3b93653 100644 --- a/crates/sdk/src/lib.rs +++ b/crates/sdk/src/lib.rs @@ -37,31 +37,28 @@ pub mod validation; pub mod error; pub mod events; pub(crate) mod internal_macros; -pub mod io; + #[cfg(feature = "migrations")] pub mod migrations; pub mod queries; -pub mod task_env; -pub mod wallet; - -#[cfg(feature = "async-send")] -pub use std::marker::Send as MaybeSend; -#[cfg(feature = "async-send")] -pub use std::marker::Sync as MaybeSync; use std::path::PathBuf; use std::str::FromStr; use args::{DeviceTransport, InputAmount, SdkTypes}; -use io::Io; -use masp::{ShieldedContext, ShieldedUtils}; use namada_core::address::Address; use namada_core::collections::HashSet; -pub use namada_core::control_flow; use namada_core::dec::Dec; use namada_core::ethereum_events::EthAddress; use namada_core::ibc::core::host::types::identifiers::{ChannelId, PortId}; use namada_core::key::*; -use namada_core::masp::{ExtendedSpendingKey, PaymentAddress, TransferSource}; +pub use namada_core::masp::{ + ExtendedSpendingKey, ExtendedViewingKey, PaymentAddress, TransferSource, + TransferTarget, +}; +pub use namada_core::{control_flow, task_env}; +use namada_io::{Client, Io, NamadaIo}; +pub use namada_io::{MaybeSend, MaybeSync}; +pub use namada_token::masp::{ShieldedUtils, ShieldedWallet}; use namada_tx::data::wrapper::GasLimit; use namada_tx::Tx; use rpc::{denominate_amount, format_denominated_amount, query_native_token}; @@ -80,41 +77,22 @@ use tx::{ VP_USER_WASM, }; use wallet::{Wallet, WalletIo, WalletStorage}; +pub use {namada_io as io, namada_wallet as wallet}; + +use crate::masp::ShieldedContext; /// Default gas-limit pub const DEFAULT_GAS_LIMIT: u64 = 150_000; -#[allow(missing_docs)] -#[cfg(not(feature = "async-send"))] -pub trait MaybeSync {} -#[cfg(not(feature = "async-send"))] -impl MaybeSync for T where T: ?Sized {} - -#[allow(missing_docs)] -#[cfg(not(feature = "async-send"))] -pub trait MaybeSend {} -#[cfg(not(feature = "async-send"))] -impl MaybeSend for T where T: ?Sized {} - #[cfg_attr(feature = "async-send", async_trait::async_trait)] #[cfg_attr(not(feature = "async-send"), async_trait::async_trait(?Send))] /// An interface for high-level interaction with the Namada SDK -pub trait Namada: Sized + MaybeSync + MaybeSend { - /// A client with async request dispatcher method - type Client: queries::Client + MaybeSend + Sync; +pub trait Namada: NamadaIo { /// Captures the interactive parts of the wallet's functioning type WalletUtils: WalletIo + WalletStorage + MaybeSend + MaybeSync; /// Abstracts platform specific details away from the logic of shielded pool /// operations. type ShieldedUtils: ShieldedUtils + MaybeSend + MaybeSync; - /// Captures the input/output streams used by this object - type Io: Io + MaybeSend + MaybeSync; - - /// Obtain the client for communicating with the ledger - fn client(&self) -> &Self::Client; - - /// Obtain the input/output handle for this context - fn io(&self) -> &Self::Io; /// Obtain read guard on the wallet async fn wallet(&self) -> RwLockReadGuard<'_, Wallet>; @@ -680,7 +658,7 @@ pub trait Namada: Sized + MaybeSync + MaybeSend { /// Provides convenience methods for common Namada interactions pub struct NamadaImpl where - C: queries::Client, + C: Client, U: WalletIo, V: ShieldedUtils, I: Io, @@ -701,7 +679,7 @@ where impl NamadaImpl where - C: queries::Client + Sync, + C: Client + Sync, U: WalletIo, V: ShieldedUtils, I: Io, @@ -710,14 +688,14 @@ where pub fn native_new( client: C, wallet: Wallet, - shielded: ShieldedContext, + shielded: ShieldedWallet, io: I, native_token: Address, ) -> Self { NamadaImpl { client, wallet: RwLock::new(wallet), - shielded: RwLock::new(shielded), + shielded: RwLock::new(ShieldedContext::new(shielded)), io, native_token: native_token.clone(), prototype: args::Tx { @@ -754,7 +732,7 @@ where pub async fn new( client: C, wallet: Wallet, - shielded: ShieldedContext, + shielded: ShieldedWallet, io: I, ) -> crate::error::Result> { let native_token = query_native_token(&client).await?; @@ -776,36 +754,36 @@ where } } -#[cfg_attr(feature = "async-send", async_trait::async_trait)] -#[cfg_attr(not(feature = "async-send"), async_trait::async_trait(?Send))] -impl Namada for NamadaImpl +impl NamadaIo for NamadaImpl where - C: queries::Client + MaybeSend + Sync, + C: Client + MaybeSend + Sync, U: WalletIo + WalletStorage + MaybeSync + MaybeSend, - V: ShieldedUtils + MaybeSend + MaybeSync, - I: Io + MaybeSend + MaybeSync, + V: ShieldedUtils + MaybeSync + MaybeSend, + I: Io + MaybeSync + MaybeSend, { type Client = C; type Io = I; - type ShieldedUtils = V; - type WalletUtils = U; - - /// Obtain the prototypical Tx builder - fn tx_builder(&self) -> args::Tx { - self.prototype.clone() - } - fn native_token(&self) -> Address { - self.native_token.clone() + fn client(&self) -> &Self::Client { + &self.client } fn io(&self) -> &Self::Io { &self.io } +} - fn client(&self) -> &Self::Client { - &self.client - } +#[cfg_attr(feature = "async-send", async_trait::async_trait)] +#[cfg_attr(not(feature = "async-send"), async_trait::async_trait(?Send))] +impl Namada for NamadaImpl +where + C: Client + MaybeSend + Sync, + U: WalletIo + WalletStorage + MaybeSync + MaybeSend, + V: ShieldedUtils + MaybeSend + MaybeSync, + I: Io + MaybeSend + MaybeSync, +{ + type ShieldedUtils = V; + type WalletUtils = U; async fn wallet(&self) -> RwLockReadGuard<'_, Wallet> { self.wallet.read().await @@ -817,6 +795,10 @@ where self.wallet.write().await } + fn wallet_lock(&self) -> &RwLock> { + &self.wallet + } + async fn shielded( &self, ) -> RwLockReadGuard<'_, ShieldedContext> { @@ -829,15 +811,20 @@ where self.shielded.write().await } - fn wallet_lock(&self) -> &RwLock> { - &self.wallet + fn native_token(&self) -> Address { + self.native_token.clone() + } + + /// Obtain the prototypical Tx builder + fn tx_builder(&self) -> args::Tx { + self.prototype.clone() } } /// Allow the prototypical Tx builder to be modified impl args::TxBuilder for NamadaImpl where - C: queries::Client + Sync, + C: Client + Sync, U: WalletIo, V: ShieldedUtils, I: Io, @@ -875,7 +862,10 @@ pub mod testing { use namada_governance::{InitProposalData, VoteProposalData}; use namada_ibc::testing::{arb_ibc_msg_nft_transfer, arb_ibc_msg_transfer}; use namada_ibc::{MsgNftTransfer, MsgTransfer}; - use namada_token::testing::arb_denominated_amount; + use namada_token::masp::ShieldedTransfer; + use namada_token::testing::{ + arb_denominated_amount, arb_shielded_transfer, + }; use namada_token::Transfer; use namada_tx::data::pgf::UpdateStewardCommission; use namada_tx::data::pos::{ @@ -892,8 +882,6 @@ pub mod testing { use crate::chain::ChainId; use crate::eth_bridge_pool::testing::arb_pending_transfer; use crate::key::testing::arb_common_pk; - use crate::masp::testing::arb_shielded_transfer; - use crate::masp::ShieldedTransfer; use crate::time::{DateTime, DateTimeUtc, TimeZone, Utc}; use crate::tx::data::pgf::tests::arb_update_steward_commission; use crate::tx::data::pos::tests::{ diff --git a/crates/sdk/src/masp.rs b/crates/sdk/src/masp.rs index 561d56c407..433c0adee2 100644 --- a/crates/sdk/src/masp.rs +++ b/crates/sdk/src/masp.rs @@ -1,2089 +1,44 @@ //! MASP verification wrappers. -mod shielded_sync; -#[cfg(test)] -mod test_utils; +mod utilities; -use std::cmp::Ordering; -use std::collections::{btree_map, BTreeMap, BTreeSet}; -use std::fmt::Debug; - -use borsh::{BorshDeserialize, BorshSerialize}; -use itertools::Itertools; +use eyre::eyre; use masp_primitives::asset_type::AssetType; -#[cfg(feature = "mainnet")] -use masp_primitives::consensus::MainNetwork as Network; -#[cfg(not(feature = "mainnet"))] -use masp_primitives::consensus::TestNetwork as Network; -use masp_primitives::convert::AllowedConversion; -use masp_primitives::ff::PrimeField; -use masp_primitives::memo::MemoBytes; -use masp_primitives::merkle_tree::{ - CommitmentTree, IncrementalWitness, MerklePath, -}; -use masp_primitives::sapling::keys::FullViewingKey; -use masp_primitives::sapling::{ - Diversifier, Node, Note, Nullifier, ViewingKey, -}; -use masp_primitives::transaction::builder::{self, *}; -use masp_primitives::transaction::components::sapling::builder::{ - RngBuildParams, SaplingMetadata, -}; -use masp_primitives::transaction::components::{ - I128Sum, TxOut, U64Sum, ValueSum, -}; -use masp_primitives::transaction::fees::fixed::FeeRule; +use masp_primitives::merkle_tree::MerklePath; +use masp_primitives::sapling::Node; +use masp_primitives::transaction::components::I128Sum; use masp_primitives::transaction::Transaction; -use masp_primitives::zip32::{ - ExtendedFullViewingKey, ExtendedSpendingKey as MaspExtendedSpendingKey, -}; -use masp_proofs::prover::LocalTxProver; use namada_core::address::Address; -use namada_core::arith::CheckedAdd; use namada_core::chain::BlockHeight; -use namada_core::collections::{HashMap, HashSet}; -use namada_core::dec::Dec; use namada_core::ibc::IbcTxDataRefs; -pub use namada_core::masp::*; +use namada_core::masp::{MaspEpoch, MaspTxRefs}; use namada_core::storage::TxIndex; -use namada_core::time::DateTimeUtc; -use namada_core::uint::Uint; +use namada_core::time::DurationSecs; +use namada_core::token::{Denomination, MaspDigitPos}; use namada_events::extend::{ IbcMaspTxBatchRefs as IbcMaspTxBatchRefsAttr, MaspTxBatchRefs as MaspTxBatchRefsAttr, MaspTxBlockIndex as MaspTxBlockIndexAttr, ReadFromEventAttributes, }; use namada_ibc::{decode_message, extract_masp_tx_from_envelope, IbcMessage}; -use namada_macros::BorshDeserializer; -#[cfg(feature = "migrations")] -use namada_migrations::*; -pub use namada_token::validation::{ - partial_deauthorize, preload_verifying_keys, PVKs, CONVERT_NAME, - ENV_VAR_MASP_PARAMS_DIR, OUTPUT_NAME, SPEND_NAME, -}; -use namada_token::{self as token, Denomination, MaspDigitPos}; -use namada_tx::{IndexedTx, Tx}; -use rand::rngs::StdRng; -use rand_core::{CryptoRng, OsRng, RngCore, SeedableRng}; -use smooth_operator::checked; -use thiserror::Error; +use namada_io::client::Client; +use namada_token::masp::shielded_wallet::ShieldedQueries; +pub use namada_token::masp::{utils, *}; +use namada_tx::Tx; +pub use utilities::{IndexerMaspClient, LedgerMaspClient}; use crate::error::{Error, QueryError}; -use crate::io::{Io, ProgressBar}; -pub use crate::masp::shielded_sync::dispatcher::{Dispatcher, DispatcherCache}; -use crate::masp::shielded_sync::utils::MaspClient; -#[cfg(not(target_family = "wasm"))] -pub use crate::masp::shielded_sync::MaspLocalTaskEnv; -pub use crate::masp::shielded_sync::{ - utils, ShieldedSyncConfig, ShieldedSyncConfigBuilder, -}; -use crate::queries::Client; -use crate::rpc::{query_conversion, query_denom}; -use crate::task_env::TaskEnvironment; -use crate::wallet::{DatedKeypair, DatedSpendingKey}; -use crate::{ - control_flow, display_line, edisplay_line, query_native_token, rpc, - MaybeSend, MaybeSync, Namada, +use crate::rpc::{ + query_block, query_conversion, query_denom, query_masp_epoch, + query_max_block_time_estimate, query_native_token, }; - -/// Randomness seed for MASP integration tests to build proofs with -/// deterministic rng. -pub const ENV_VAR_MASP_TEST_SEED: &str = "NAMADA_MASP_TEST_SEED"; - -/// The network to use for MASP -const NETWORK: Network = Network; - -/// Shielded transfer -#[derive(Clone, Debug, BorshSerialize, BorshDeserialize, BorshDeserializer)] -pub struct ShieldedTransfer { - /// Shielded transfer builder - pub builder: Builder<(), ExtendedFullViewingKey, ()>, - /// MASP transaction - pub masp_tx: Transaction, - /// Metadata - pub metadata: SaplingMetadata, - /// Epoch in which the transaction was created - pub epoch: MaspEpoch, -} - -/// The data for a masp fee payment -#[allow(missing_docs)] -#[derive(Debug)] -pub struct MaspFeeData { - pub sources: Vec, - pub target: Address, - pub token: Address, - pub amount: token::DenominatedAmount, -} - -/// The data for a single masp transfer -#[allow(missing_docs)] -#[derive(Debug)] -pub struct MaspTransferData { - pub source: TransferSource, - pub target: TransferTarget, - pub token: Address, - pub amount: token::DenominatedAmount, -} - -// The data for a masp transfer relative to a given source -#[derive(Hash, Eq, PartialEq)] -struct MaspSourceTransferData { - source: TransferSource, - token: Address, -} - -// The data for a masp transfer relative to a given target -#[derive(Hash, Eq, PartialEq)] -struct MaspTargetTransferData { - source: TransferSource, - target: TransferTarget, - token: Address, -} - -/// Data to log masp transactions' errors -#[allow(missing_docs)] -#[derive(Debug)] -pub struct MaspDataLog { - pub source: Option, - pub token: Address, - pub amount: token::DenominatedAmount, -} - -struct MaspTxReorderedData { - source_data: HashMap, - target_data: HashMap, - denoms: HashMap, -} - -// Data about the unspent amounts for any given shielded source coming from the -// spent notes in their posses that have been added to the builder. Can be used -// to either pay fees or to return a change -type Changes = HashMap; - -/// Shielded pool data for a token -#[allow(missing_docs)] -#[derive(Debug, BorshSerialize, BorshDeserialize, BorshDeserializer)] -pub struct MaspTokenRewardData { - pub name: String, - pub address: Address, - pub max_reward_rate: Dec, - pub kp_gain: Dec, - pub kd_gain: Dec, - pub locked_amount_target: Uint, -} - -/// A return type for gen_shielded_transfer -#[allow(clippy::large_enum_variant)] -#[derive(Error, Debug)] -pub enum TransferErr { - /// Build error for masp errors - #[error("{error}")] - Build { - /// The error - error: builder::Error, - /// The optional associated transfer data for logging purposes - data: Option, - }, - /// errors - #[error("{0}")] - General(#[from] Error), -} - -/// Freeze a Builder into the format necessary for inclusion in a Tx. This is -/// the format used by hardware wallets to validate a MASP Transaction. -struct WalletMap; - -impl - masp_primitives::transaction::components::sapling::builder::MapBuilder< - P1, - MaspExtendedSpendingKey, - (), - ExtendedFullViewingKey, - > for WalletMap -{ - fn map_params(&self, _s: P1) {} - - fn map_key(&self, s: MaspExtendedSpendingKey) -> ExtendedFullViewingKey { - (&s).into() - } -} - -impl - MapBuilder - for WalletMap -{ - fn map_notifier(&self, _s: N1) {} -} - -/// Abstracts platform specific details away from the logic of shielded pool -/// operations. -#[cfg_attr(feature = "async-send", async_trait::async_trait)] -#[cfg_attr(not(feature = "async-send"), async_trait::async_trait(?Send))] -pub trait ShieldedUtils: - Sized + BorshDeserialize + BorshSerialize + Default + Clone -{ - /// Get a MASP transaction prover - fn local_tx_prover(&self) -> LocalTxProver; - - /// Load up the currently saved ShieldedContext - async fn load( - &self, - ctx: &mut ShieldedContext, - force_confirmed: bool, - ) -> std::io::Result<()>; - - /// Save the given ShieldedContext for future loads - async fn save( - &self, - ctx: &ShieldedContext, - ) -> std::io::Result<()>; - - /// Save a cache of data as part of shielded sync if that - /// process gets interrupted. - async fn cache_save(&self, _cache: &DispatcherCache) - -> std::io::Result<()>; - - /// Load a cache of data as part of shielded sync if that - /// process gets interrupted. - async fn cache_load(&self) -> std::io::Result; -} - -/// Make a ViewingKey that can view notes encrypted by given ExtendedSpendingKey -pub fn to_viewing_key(esk: &MaspExtendedSpendingKey) -> FullViewingKey { - ExtendedFullViewingKey::from(esk).fvk -} - -/// Generate a valid diversifier, i.e. one that has a diversified base. Return -/// also this diversified base. -pub fn find_valid_diversifier( - rng: &mut R, -) -> (Diversifier, masp_primitives::jubjub::SubgroupPoint) { - let mut diversifier; - let g_d; - // Keep generating random diversifiers until one has a diversified base - loop { - let mut d = [0; 11]; - rng.fill_bytes(&mut d); - diversifier = Diversifier(d); - if let Some(val) = diversifier.g_d() { - g_d = val; - break; - } - } - (diversifier, g_d) -} - -/// Determine if using the current note would actually bring us closer to our -/// target. Returns the unused amounts (change) of delta if any -pub fn is_amount_required( - src: I128Sum, - dest: I128Sum, - normed_delta: I128Sum, - opt_delta: Option, -) -> Option { - let mut changes = None; - let gap = dest.clone() - src; - - for (asset_type, value) in gap.components() { - if *value > 0 && normed_delta[asset_type] > 0 { - let signed_change_amt = - checked!(normed_delta[asset_type] - *value).unwrap_or_default(); - let unsigned_change_amt = if signed_change_amt > 0 { - signed_change_amt - } else { - // Even if there's no change we still need to set the return - // value of this function to be Some so that the caller sees - // that this note should be used - 0 - }; - - let change_amt = I128Sum::from_nonnegative( - asset_type.to_owned(), - unsigned_change_amt, - ) - .expect("Change is guaranteed to be non-negative"); - changes = changes - .map(|prev| prev + change_amt.clone()) - .or(Some(change_amt)); - } - } - - // Because of the way conversions are computed, we need an extra step here - // if the token is not the native one - if let Some(delta) = opt_delta { - // Only if this note is going to be used, handle the assets in delta - // (not normalized) that are not part of dest - changes = changes.map(|mut chngs| { - for (delta_asset_type, delta_amt) in delta.components() { - if !dest.asset_types().contains(delta_asset_type) { - let rmng = I128Sum::from_nonnegative( - delta_asset_type.to_owned(), - *delta_amt, - ) - .expect("Change is guaranteed to be non-negative"); - chngs += rmng; - } - } - - chngs - }); - } - - changes -} - -/// a masp change -#[derive(BorshSerialize, BorshDeserialize, BorshDeserializer, Debug, Clone)] -pub struct MaspChange { - /// the token address - pub asset: Address, - /// the change in the token - pub change: token::Change, -} - -/// a masp amount -pub type MaspAmount = ValueSum<(Option, Address), token::Change>; - -// A type tracking the notes used to construct a shielded transfer. Used to -// avoid reusing the same notes multiple times which would lead to an invalid -// transaction -type SpentNotesTracker = HashMap>; - -/// An extension of Option's cloned method for pair types -fn cloned_pair((a, b): (&T, &U)) -> (T, U) { - (a.clone(), b.clone()) -} - -/// Represents the amount used of different conversions -pub type Conversions = - BTreeMap, i128)>; - -/// Represents the changes that were made to a list of transparent accounts -pub type TransferDelta = HashMap; - -/// Represents the changes that were made to a list of shielded accounts -pub type TransactionDelta = HashMap; - -/// Maps a shielded tx to the index of its first output note. -pub type NoteIndex = BTreeMap; - -/// Maps the note index (in the commitment tree) to a witness -pub type WitnessMap = HashMap>; - -#[derive(BorshSerialize, BorshDeserialize, Debug)] -/// The possible sync states of the shielded context -pub enum ContextSyncStatus { - /// The context contains only data that has been confirmed by the protocol - Confirmed, - /// The context contains that that has not yet been confirmed by the - /// protocol and could end up being invalid - Speculative, -} - -/// Represents the current state of the shielded pool from the perspective of -/// the chosen viewing keys. -#[derive(BorshSerialize, BorshDeserialize, Debug)] -pub struct ShieldedContext { - /// Location where this shielded context is saved - #[borsh(skip)] - pub utils: U, - /// The commitment tree produced by scanning all transactions up to tx_pos - pub tree: CommitmentTree, - /// Maps viewing keys to the block height to which they are synced. - /// In particular, the height given by the value *has been scanned*. - pub vk_heights: BTreeMap>, - /// Maps viewing keys to applicable note positions - pub pos_map: HashMap>, - /// Maps a nullifier to the note position to which it applies - pub nf_map: HashMap, - /// Maps note positions to their corresponding notes - pub note_map: HashMap, - /// Maps note positions to their corresponding memos - pub memo_map: HashMap, - /// Maps note positions to the diversifier of their payment address - pub div_map: HashMap, - /// Maps note positions to their witness (used to make merkle paths) - pub witness_map: WitnessMap, - /// The set of note positions that have been spent - pub spents: HashSet, - /// Maps asset types to their decodings - pub asset_types: HashMap, - /// Maps note positions to their corresponding viewing keys - pub vk_map: HashMap, - /// Maps a shielded tx to the index of its first output note. - pub note_index: NoteIndex, - /// The sync state of the context - pub sync_status: ContextSyncStatus, -} - -/// Default implementation to ease construction of TxContexts. Derive cannot be -/// used here due to CommitmentTree not implementing Default. -impl Default for ShieldedContext { - fn default() -> ShieldedContext { - ShieldedContext:: { - utils: U::default(), - vk_heights: BTreeMap::new(), - note_index: BTreeMap::default(), - tree: CommitmentTree::empty(), - pos_map: HashMap::default(), - nf_map: HashMap::default(), - note_map: HashMap::default(), - memo_map: HashMap::default(), - div_map: HashMap::default(), - witness_map: HashMap::default(), - spents: HashSet::default(), - asset_types: HashMap::default(), - vk_map: HashMap::default(), - sync_status: ContextSyncStatus::Confirmed, - } - } -} - -impl ShieldedContext { - /// Try to load the last saved shielded context from the given context - /// directory. If this fails, then leave the current context unchanged. - pub async fn load(&mut self) -> std::io::Result<()> { - self.utils.clone().load(self, false).await - } - - /// Try to load the last saved confirmed shielded context from the given - /// context directory. If this fails, then leave the current context - /// unchanged. - pub async fn load_confirmed(&mut self) -> std::io::Result<()> { - self.utils.clone().load(self, true).await?; - - Ok(()) - } - - /// Save this shielded context into its associated context directory. If the - /// state to be saved is confirmed than also delete the speculative one (if - /// available) - pub async fn save(&self) -> std::io::Result<()> { - self.utils.save(self).await - } - - /// Update the merkle tree of witnesses the first time we - /// scan new MASP transactions. - fn update_witness_map( - &mut self, - indexed_tx: IndexedTx, - shielded: &[Transaction], - ) -> Result<(), Error> { - let mut note_pos = self.tree.size(); - self.note_index.insert(indexed_tx, note_pos); - - for tx in shielded { - for so in - tx.sapling_bundle().map_or(&vec![], |x| &x.shielded_outputs) - { - // Create merkle tree leaf node from note commitment - let node = Node::new(so.cmu.to_repr()); - // Update each merkle tree in the witness map with the latest - // addition - for (_, witness) in self.witness_map.iter_mut() { - witness.append(node).map_err(|()| { - Error::Other("note commitment tree is full".to_string()) - })?; - } - self.tree.append(node).map_err(|()| { - Error::Other("note commitment tree is full".to_string()) - })?; - // Finally, make it easier to construct merkle paths to this new - // note - let witness = IncrementalWitness::::from_tree(&self.tree); - self.witness_map.insert(note_pos, witness); - note_pos += 1; - } - } - Ok(()) - } - - /// Sync the current state of the multi-asset shielded pool in a - /// ShieldedContext with the state on-chain. - pub async fn sync( - &mut self, - env: impl TaskEnvironment, - config: ShieldedSyncConfig, - last_query_height: Option, - sks: &[DatedSpendingKey], - fvks: &[DatedKeypair], - ) -> Result<(), Error> - where - M: MaspClient + Send + Sync + Unpin + 'static, - T: ProgressBar, - I: control_flow::ShutdownSignal, - { - env.run(|spawner| async move { - let dispatcher = config.dispatcher(spawner, &self.utils).await; - - if let Some(updated_ctx) = - dispatcher.run(None, last_query_height, sks, fvks).await? - { - *self = updated_ctx; - } - - Ok(()) - }) - .await - } - - fn min_height_to_sync_from(&self) -> Result { - let Some(maybe_least_synced_vk_height) = - self.vk_heights.values().min().cloned() - else { - return Err(Error::Other( - "No viewing keys are available in the shielded context to \ - decrypt notes with" - .to_string(), - )); - }; - Ok(maybe_least_synced_vk_height - .map_or_else(BlockHeight::first, |itx| itx.height)) - } - - #[allow(missing_docs)] - pub fn save_decrypted_shielded_outputs( - &mut self, - vk: &ViewingKey, - note_pos: usize, - note: Note, - pa: masp_primitives::sapling::PaymentAddress, - memo: MemoBytes, - ) -> Result<(), Error> { - // Add this note to list of notes decrypted by this - // viewing key - self.pos_map.entry(*vk).or_default().insert(note_pos); - // Compute the nullifier now to quickly recognize when - // spent - let nf = note.nf( - &vk.nk, - note_pos.try_into().map_err(|_| { - Error::Other("Can not get nullifier".to_string()) - })?, - ); - self.note_map.insert(note_pos, note); - self.memo_map.insert(note_pos, memo); - // The payment address' diversifier is required to spend - // note - self.div_map.insert(note_pos, *pa.diversifier()); - self.nf_map.insert(nf, note_pos); - self.vk_map.insert(note_pos, *vk); - Ok(()) - } - - #[allow(missing_docs)] - pub fn save_shielded_spends(&mut self, transactions: &[Transaction]) { - for stx in transactions { - for ss in - stx.sapling_bundle().map_or(&vec![], |x| &x.shielded_spends) - { - // If the shielded spend's nullifier is in our map, then target - // note is rendered unusable - if let Some(note_pos) = self.nf_map.get(&ss.nullifier) { - self.spents.insert(*note_pos); - } - } - } - } - - /// Compute the total unspent notes associated with the viewing key in the - /// context. If the key is not in the context, then we do not know the - /// balance and hence we return None. - pub async fn compute_shielded_balance( - &mut self, - vk: &ViewingKey, - ) -> Result, Error> { - // Cannot query the balance of a key that's not in the map - if !self.pos_map.contains_key(vk) { - return Ok(None); - } - let mut val_acc = I128Sum::zero(); - // Retrieve the notes that can be spent by this key - if let Some(avail_notes) = self.pos_map.get(vk) { - for note_idx in avail_notes { - // Spent notes cannot contribute a new transaction's pool - if self.spents.contains(note_idx) { - continue; - } - // Get note associated with this ID - let note = self.note_map.get(note_idx).ok_or_else(|| { - Error::Other(format!("Unable to get note {note_idx}")) - })?; - // Finally add value to multi-asset accumulator - val_acc += I128Sum::from_nonnegative( - note.asset_type, - note.value as i128, - ) - .map_err(|()| { - Error::Other( - "found note with invalid value or asset type" - .to_string(), - ) - })? - } - } - Ok(Some(val_acc)) - } - - /// Use the addresses already stored in the wallet to precompute as many - /// asset types as possible. - pub async fn precompute_asset_types( - &mut self, - client: &C, - tokens: Vec<&Address>, - ) -> Result<(), Error> { - // To facilitate lookups of human-readable token names - for token in tokens { - let Some(denom) = query_denom(client, token).await else { - return Err(Error::Query(QueryError::General(format!( - "denomination for token {token}" - )))); - }; - for position in MaspDigitPos::iter() { - let asset_type = - encode_asset_type(token.clone(), denom, position, None) - .map_err(|_| { - Error::Other( - "unable to create asset type".to_string(), - ) - })?; - self.asset_types.insert( - asset_type, - AssetData { - token: token.clone(), - denom, - position, - epoch: None, - }, - ); - } - } - Ok(()) - } - - /// Query the ledger for the decoding of the given asset type and cache it - /// if it is found. - pub async fn decode_asset_type( - &mut self, - client: &C, - asset_type: AssetType, - ) -> Option { - // Try to find the decoding in the cache - if let decoded @ Some(_) = self.asset_types.get(&asset_type) { - return decoded.cloned(); - } - // Query for the ID of the last accepted transaction - let (token, denom, position, ep, _conv, _path): ( - Address, - Denomination, - MaspDigitPos, - _, - I128Sum, - MerklePath, - ) = rpc::query_conversion(client, asset_type).await?; - let pre_asset_type = AssetData { - token, - denom, - position, - epoch: Some(ep), - }; - self.asset_types.insert(asset_type, pre_asset_type.clone()); - Some(pre_asset_type) - } - - /// Query the ledger for the conversion that is allowed for the given asset - /// type and cache it. - async fn query_allowed_conversion<'a, C: Client + Sync>( - &'a mut self, - client: &C, - asset_type: AssetType, - conversions: &'a mut Conversions, - ) { - if let btree_map::Entry::Vacant(conv_entry) = - conversions.entry(asset_type) - { - // Query for the ID of the last accepted transaction - let Some((token, denom, position, ep, conv, path)) = - query_conversion(client, asset_type).await - else { - return; - }; - self.asset_types.insert( - asset_type, - AssetData { - token, - denom, - position, - epoch: Some(ep), - }, - ); - // If the conversion is 0, then we just have a pure decoding - if !conv.is_zero() { - conv_entry.insert((conv.into(), path, 0)); - } - } - } - - /// Compute the total unspent notes associated with the viewing key in the - /// context and express that value in terms of the currently timestamped - /// asset types. If the key is not in the context, then we do not know the - /// balance and hence we return None. - pub async fn compute_exchanged_balance( - &mut self, - client: &(impl Client + Sync), - io: &impl Io, - vk: &ViewingKey, - target_epoch: MaspEpoch, - ) -> Result, Error> { - // First get the unexchanged balance - if let Some(balance) = self.compute_shielded_balance(vk).await? { - let exchanged_amount = self - .compute_exchanged_amount( - client, - io, - balance, - target_epoch, - BTreeMap::new(), - ) - .await? - .0; - // And then exchange balance into current asset types - Ok(Some(exchanged_amount)) - } else { - Ok(None) - } - } - - /// Try to convert as much of the given asset type-value pair using the - /// given allowed conversion. usage is incremented by the amount of the - /// conversion used, the conversions are applied to the given input, and - /// the trace amount that could not be converted is moved from input to - /// output. - #[allow(clippy::too_many_arguments)] - async fn apply_conversion( - &mut self, - io: &impl Io, - conv: AllowedConversion, - asset_type: AssetType, - value: i128, - usage: &mut i128, - input: &mut I128Sum, - output: &mut I128Sum, - normed_asset_type: AssetType, - normed_output: &mut I128Sum, - ) -> Result<(), Error> { - // we do not need to convert negative values - if value <= 0 { - return Ok(()); - } - // If conversion if possible, accumulate the exchanged amount - let conv: I128Sum = I128Sum::from_sum(conv.into()); - // The amount required of current asset to qualify for conversion - let threshold = -conv[&asset_type]; - if threshold == 0 { - edisplay_line!( - io, - "Asset threshold of selected conversion for asset type {} is \ - 0, this is a bug, please report it.", - asset_type - ); - } - // We should use an amount of the AllowedConversion that almost - // cancels the original amount - let required = value / threshold; - // Forget about the trace amount left over because we cannot - // realize its value - let trace = I128Sum::from_pair(asset_type, value % threshold); - let normed_trace = - I128Sum::from_pair(normed_asset_type, value % threshold); - // Record how much more of the given conversion has been used - *usage += required; - // Apply the conversions to input and move the trace amount to output - *input += conv * required - trace.clone(); - *output += trace; - *normed_output += normed_trace; - Ok(()) - } - - /// Convert the given amount into the latest asset types whilst making a - /// note of the conversions that were used. Note that this function does - /// not assume that allowed conversions from the ledger are expressed in - /// terms of the latest asset types. - pub async fn compute_exchanged_amount( - &mut self, - client: &(impl Client + Sync), - io: &impl Io, - mut input: I128Sum, - target_epoch: MaspEpoch, - mut conversions: Conversions, - ) -> Result<(I128Sum, I128Sum, Conversions), Error> { - // Where we will store our exchanged value - let mut output = I128Sum::zero(); - // Where we will store our normed exchanged value - let mut normed_output = I128Sum::zero(); - // Repeatedly exchange assets until it is no longer possible - while let Some((asset_type, value)) = - input.components().next().map(cloned_pair) - { - // Get the equivalent to the current asset in the target epoch and - // note whether this equivalent chronologically comes after the - // current asset - let (target_asset_type, forward_conversion) = self - .decode_asset_type(client, asset_type) - .await - .map(|mut pre_asset_type| { - let old_epoch = pre_asset_type.redate(target_epoch); - pre_asset_type - .encode() - .map(|asset_type| { - ( - asset_type, - old_epoch.map_or(false, |epoch| { - target_epoch >= epoch - }), - ) - }) - .map_err(|_| { - Error::Other( - "unable to create asset type".to_string(), - ) - }) - }) - .transpose()? - .unwrap_or((asset_type, false)); - let at_target_asset_type = target_asset_type == asset_type; - let trace_asset_type = if forward_conversion { - // If we are doing a forward conversion, then we can assume that - // the trace left over in the older epoch has at least a 1-to-1 - // conversion to the newer epoch. - target_asset_type - } else { - // If we are not doing a forward conversion, then we cannot - // lower bound what the asset type will be worth in the target - // asset type. So leave the asset type fixed. - asset_type - }; - // Fetch and store the required conversions - self.query_allowed_conversion( - client, - target_asset_type, - &mut conversions, - ) - .await; - self.query_allowed_conversion(client, asset_type, &mut conversions) - .await; - if let (Some((conv, _wit, usage)), false) = - (conversions.get_mut(&asset_type), at_target_asset_type) - { - display_line!( - io, - "converting current asset type to latest asset type..." - ); - // Not at the target asset type, not at the latest asset - // type. Apply conversion to get from - // current asset type to the latest - // asset type. - self.apply_conversion( - io, - conv.clone(), - asset_type, - value, - usage, - &mut input, - &mut output, - trace_asset_type, - &mut normed_output, - ) - .await?; - } else if let (Some((conv, _wit, usage)), false) = ( - conversions.get_mut(&target_asset_type), - at_target_asset_type, - ) { - display_line!( - io, - "converting latest asset type to target asset type..." - ); - // Not at the target asset type, yet at the latest asset - // type. Apply inverse conversion to get - // from latest asset type to the target - // asset type. - self.apply_conversion( - io, - conv.clone(), - asset_type, - value, - usage, - &mut input, - &mut output, - trace_asset_type, - &mut normed_output, - ) - .await?; - } else { - // At the target asset type. Then move component over to - // output. - let comp = input.project(asset_type); - output += comp.clone(); - normed_output += comp.clone(); - input -= comp; - } - } - Ok((output, normed_output, conversions)) - } - - /// Collect enough unspent notes in this context to exceed the given amount - /// of the specified asset type. Return the total value accumulated plus - /// notes and the corresponding diversifiers/merkle paths that were used to - /// achieve the total value. Updates the changes map. - #[allow(clippy::too_many_arguments)] - pub async fn collect_unspent_notes( - &mut self, - context: &impl Namada, - spent_notes: &mut SpentNotesTracker, - sk: namada_core::masp::ExtendedSpendingKey, - is_native_token: bool, - target: I128Sum, - target_epoch: MaspEpoch, - changes: &mut Changes, - ) -> Result< - ( - I128Sum, - Vec<(Diversifier, Note, MerklePath)>, - Conversions, - ), - Error, - > { - let vk = &to_viewing_key(&sk.into()).vk; - // TODO: we should try to use the smallest notes possible to fund the - // transaction to allow people to fetch less often - // Establish connection with which to do exchange rate queries - let mut conversions = BTreeMap::new(); - let mut val_acc = I128Sum::zero(); - let mut normed_val_acc = I128Sum::zero(); - let mut notes = Vec::new(); - - // Retrieve the notes that can be spent by this key - if let Some(avail_notes) = self.pos_map.get(vk).cloned() { - for note_idx in &avail_notes { - // Skip spend notes already used in this transaction - if spent_notes - .get(vk) - .is_some_and(|set| set.contains(note_idx)) - { - continue; - } - // No more transaction inputs are required once we have met - // the target amount - if normed_val_acc >= target { - break; - } - // Spent notes from the shielded context (i.e. from previous - // transactions) cannot contribute a new transaction's pool - if self.spents.contains(note_idx) { - continue; - } - // Get note, merkle path, diversifier associated with this ID - let note = *self.note_map.get(note_idx).ok_or_else(|| { - Error::Other(format!("Unable to get note {note_idx}")) - })?; - - // The amount contributed by this note before conversion - let pre_contr = - I128Sum::from_pair(note.asset_type, note.value as i128); - let (contr, normed_contr, proposed_convs) = self - .compute_exchanged_amount( - context.client(), - context.io(), - pre_contr, - target_epoch, - conversions.clone(), - ) - .await?; - - let opt_delta = if is_native_token { - None - } else { - Some(contr.clone()) - }; - // Use this note only if it brings us closer to our target - if let Some(change) = is_amount_required( - normed_val_acc.clone(), - target.clone(), - normed_contr.clone(), - opt_delta, - ) { - // Be sure to record the conversions used in computing - // accumulated value - val_acc += contr; - normed_val_acc += normed_contr; - - // Update the changes - changes - .entry(sk) - .and_modify(|amt| *amt += &change) - .or_insert(change); - - // Commit the conversions that were used to exchange - conversions = proposed_convs; - let merkle_path = self - .witness_map - .get(note_idx) - .ok_or_else(|| { - Error::Other(format!( - "Unable to get note {note_idx}" - )) - })? - .path() - .ok_or_else(|| { - Error::Other(format!( - "Unable to get path: {}", - line!() - )) - })?; - let diversifier = - self.div_map.get(note_idx).ok_or_else(|| { - Error::Other(format!( - "Unable to get note {note_idx}" - )) - })?; - // Commit this note to our transaction - notes.push((*diversifier, note, merkle_path)); - // Append the note the list of used ones - spent_notes - .entry(vk.to_owned()) - .and_modify(|set| { - set.insert(*note_idx); - }) - .or_insert([*note_idx].into_iter().collect()); - } - } - } - Ok((val_acc, notes, conversions)) - } - - /// Convert an amount whose units are AssetTypes to one whose units are - /// Addresses that they decode to. All asset types not corresponding to - /// the given epoch are ignored. - pub async fn decode_combine_sum_to_epoch( - &mut self, - client: &C, - amt: I128Sum, - target_epoch: MaspEpoch, - ) -> (ValueSum, I128Sum) { - let mut res = ValueSum::zero(); - let mut undecoded = ValueSum::zero(); - for (asset_type, val) in amt.components() { - // Decode the asset type - let decoded = self.decode_asset_type(client, *asset_type).await; - // Only assets with the target timestamp count - match decoded { - Some(pre_asset_type) - if pre_asset_type - .epoch - .map_or(true, |epoch| epoch <= target_epoch) => - { - let decoded_change = token::Change::from_masp_denominated( - *val, - pre_asset_type.position, - ) - .expect("expected this to fit"); - res += ValueSum::from_pair( - pre_asset_type.token, - decoded_change, - ); - } - None => { - undecoded += ValueSum::from_pair(*asset_type, *val); - } - _ => {} - } - } - (res, undecoded) - } - - /// Convert an amount whose units are AssetTypes to one whose units are - /// Addresses that they decode to and combine the denominations. - pub async fn decode_combine_sum( - &mut self, - client: &C, - amt: I128Sum, - ) -> (MaspAmount, I128Sum) { - let mut res = MaspAmount::zero(); - let mut undecoded = ValueSum::zero(); - for (asset_type, val) in amt.components() { - // Decode the asset type - if let Some(decoded) = - self.decode_asset_type(client, *asset_type).await - { - let decoded_change = token::Change::from_masp_denominated( - *val, - decoded.position, - ) - .expect("expected this to fit"); - res += MaspAmount::from_pair( - (decoded.epoch, decoded.token), - decoded_change, - ); - } else { - undecoded += ValueSum::from_pair(*asset_type, *val); - } - } - (res, undecoded) - } - - /// Convert an amount whose units are AssetTypes to one whose units are - /// Addresses that they decode to. - pub async fn decode_sum( - &mut self, - client: &C, - amt: I128Sum, - ) -> ValueSum<(AssetType, AssetData), i128> { - let mut res = ValueSum::zero(); - for (asset_type, val) in amt.components() { - // Decode the asset type - if let Some(decoded) = - self.decode_asset_type(client, *asset_type).await - { - res += ValueSum::from_pair((*asset_type, decoded), *val); - } - } - res - } - - /// Make shielded components to embed within a Transfer object. If no - /// shielded payment address nor spending key is specified, then no - /// shielded components are produced. Otherwise a transaction containing - /// nullifiers and/or note commitments are produced. Dummy transparent - /// UTXOs are sometimes used to make transactions balanced, but it is - /// understood that transparent account changes are effected only by the - /// amounts and signatures specified by the containing Transfer object. - pub async fn gen_shielded_transfer( - context: &impl Namada, - data: Vec, - fee_data: Option, - update_ctx: bool, - ) -> Result, TransferErr> { - // Try to get a seed from env var, if any. - #[allow(unused_mut)] - let mut rng = StdRng::from_rng(OsRng).unwrap(); - #[cfg(feature = "testing")] - let mut rng = if let Ok(seed) = std::env::var(ENV_VAR_MASP_TEST_SEED) - .map_err(|e| Error::Other(e.to_string())) - .and_then(|seed| { - let exp_str = - format!("Env var {ENV_VAR_MASP_TEST_SEED} must be a u64."); - let parsed_seed: u64 = - seed.parse().map_err(|_| Error::Other(exp_str))?; - Ok(parsed_seed) - }) { - tracing::warn!( - "UNSAFE: Using a seed from {ENV_VAR_MASP_TEST_SEED} env var \ - to build proofs." - ); - StdRng::seed_from_u64(seed) - } else { - rng - }; - - // TODO: if the user requested the default expiration, there might be a - // small discrepancy between the datetime we calculate here and the one - // we set for the transaction. This should be small enough to not cause - // any issue, in case refactor this function to request the precise - // datetime to the caller - let expiration_height: u32 = - match context.tx_builder().expiration.to_datetime() { - Some(expiration) => { - // Try to match a DateTime expiration with a plausible - // corresponding block height - let last_block_height: u64 = - crate::rpc::query_block(context.client()) - .await? - .map_or_else(|| 1, |block| u64::from(block.height)); - #[allow(clippy::disallowed_methods)] - let current_time = DateTimeUtc::now(); - let delta_time = - expiration.0.signed_duration_since(current_time.0); - - let max_block_time = - crate::rpc::query_max_block_time_estimate(context) - .await?; - - let delta_blocks = u32::try_from( - delta_time.num_seconds() / max_block_time.0 as i64, - ) - .map_err(|e| Error::Other(e.to_string()))?; - u32::try_from(last_block_height) - .map_err(|e| Error::Other(e.to_string()))? - + delta_blocks - } - None => { - // NOTE: The masp library doesn't support optional - // expiration so we set the max to mimic - // a never-expiring tx. We also need to - // remove 20 which is going to be added back by the builder - u32::MAX - 20 - } - }; - let mut builder = Builder::::new( - NETWORK, - // NOTE: this is going to add 20 more blocks to the actual - // expiration but there's no other exposed function that we could - // use from the masp crate to specify the expiration better - expiration_height.into(), - ); - // Determine epoch in which to submit potential shielded transaction - let epoch = rpc::query_masp_epoch(context.client()).await?; - - let mut notes_tracker = SpentNotesTracker::new(); - { - // Load the current shielded context given - // the spending key we possess - let mut shielded = context.shielded_mut().await; - let _ = shielded.load().await; - } - - let Some(MaspTxReorderedData { - source_data, - target_data, - mut denoms, - }) = Self::reorder_data_for_masp_transfer(context, data).await? - else { - // No shielded components are needed when neither source nor - // destination are shielded - return Ok(None); - }; - let mut changes = Changes::default(); - - for (MaspSourceTransferData { source, token }, amount) in &source_data { - Self::add_inputs( - context, - &mut builder, - source, - token, - amount, - epoch, - &denoms, - &mut notes_tracker, - &mut changes, - ) - .await?; - } - - for ( - MaspTargetTransferData { - source, - target, - token, - }, - amount, - ) in target_data - { - Self::add_outputs( - context, - &mut builder, - source, - &target, - token, - amount, - epoch, - &denoms, - ) - .await?; - } - - // Collect the fees if needed - if let Some(MaspFeeData { - sources, - target, - token, - amount, - }) = fee_data - { - Self::add_fees( - context, - &mut builder, - &source_data, - sources, - &target, - &token, - &amount, - epoch, - &mut denoms, - &mut notes_tracker, - &mut changes, - ) - .await?; - } - - // Finally, add outputs representing the change from this payment. - Self::add_changes(&mut builder, changes)?; - - let builder_clone = builder.clone().map_builder(WalletMap); - // Build and return the constructed transaction - #[cfg(not(feature = "testing"))] - let prover = context.shielded().await.utils.local_tx_prover(); - #[cfg(feature = "testing")] - let prover = testing::MockTxProver(std::sync::Mutex::new(OsRng)); - let (masp_tx, metadata) = builder - .build( - &prover, - &FeeRule::non_standard(U64Sum::zero()), - &mut rng, - &mut RngBuildParams::new(OsRng), - ) - .map_err(|error| TransferErr::Build { error, data: None })?; - - if update_ctx { - // Cache the generated transfer - let mut shielded_ctx = context.shielded_mut().await; - shielded_ctx - .pre_cache_transaction(std::slice::from_ref(&masp_tx)) - .await?; - } - - Ok(Some(ShieldedTransfer { - builder: builder_clone, - masp_tx, - metadata, - epoch, - })) - } - - // Group all the information for every source/token and target/token couple, - // and extract the denominations for all the tokens involved (expect the one - // involved in the fees if needed). This step is required so that we can - // collect the amount required for every couple and pass it to the - // appropriate function so that notes can be collected based on the correct - // amount. - async fn reorder_data_for_masp_transfer( - context: &impl Namada, - data: Vec, - ) -> Result, TransferErr> { - let mut source_data = - HashMap::::new(); - let mut target_data = - HashMap::::new(); - let mut denoms = HashMap::new(); - - for MaspTransferData { - source, - target, - token, - amount, - } in data - { - let spending_key = source.spending_key(); - let payment_address = target.payment_address(); - // No shielded components are needed when neither source nor - // destination are shielded - if spending_key.is_none() && payment_address.is_none() { - return Ok(None); - } - - if denoms.get(&token).is_none() { - if let Some(denom) = query_denom(context.client(), &token).await - { - denoms.insert(token.clone(), denom); - } else { - return Err(TransferErr::General(Error::from( - QueryError::General(format!( - "denomination for token {token}" - )), - ))); - }; - } - - let key = MaspSourceTransferData { - source: source.clone(), - token: token.clone(), - }; - match source_data.get_mut(&key) { - Some(prev_amount) => { - *prev_amount = checked!(prev_amount.to_owned() + amount) - .map_err(|e| TransferErr::General(e.into()))?; - } - None => { - source_data.insert(key, amount); - } - } - - let key = MaspTargetTransferData { - source, - target, - token, - }; - match target_data.get_mut(&key) { - Some(prev_amount) => { - *prev_amount = checked!(prev_amount.to_owned() + amount) - .map_err(|e| TransferErr::General(e.into()))?; - } - None => { - target_data.insert(key, amount); - } - } - } - - Ok(Some(MaspTxReorderedData { - source_data, - target_data, - denoms, - })) - } - - // Add the necessary transaction inputs to the builder. - #[allow(clippy::too_many_arguments)] - async fn add_inputs( - context: &impl Namada, - builder: &mut Builder, - source: &TransferSource, - token: &Address, - amount: &token::DenominatedAmount, - epoch: MaspEpoch, - denoms: &HashMap, - notes_tracker: &mut SpentNotesTracker, - changes: &mut Changes, - ) -> Result, TransferErr> { - // We want to fund our transaction solely from supplied spending key - let spending_key = source.spending_key(); - - // Now we build up the transaction within this object - - // Convert transaction amount into MASP types - // Ok to unwrap cause we've already seen the token before, the - // denomination must be there - let denom = denoms.get(token).unwrap(); - let (asset_types, masp_amount) = { - let mut shielded = context.shielded_mut().await; - // Do the actual conversion to an asset type - let amount = shielded - .convert_namada_amount_to_masp( - context.client(), - epoch, - token, - denom.to_owned(), - amount.amount(), - ) - .await?; - // Make sure to save any decodings of the asset types used so - // that balance queries involving them are - // successful - let _ = shielded.save().await; - amount - }; - - // If there are shielded inputs - let added_amt = if let Some(sk) = spending_key { - let is_native_token = - &query_native_token(context.client()).await? == token; - // Locate unspent notes that can help us meet the transaction - // amount - let (added_amount, unspent_notes, used_convs) = context - .shielded_mut() - .await - .collect_unspent_notes( - context, - notes_tracker, - sk, - is_native_token, - I128Sum::from_sum(masp_amount), - epoch, - changes, - ) - .await?; - // Commit the notes found to our transaction - for (diversifier, note, merkle_path) in unspent_notes { - builder - .add_sapling_spend( - sk.into(), - diversifier, - note, - merkle_path, - ) - .map_err(|e| TransferErr::Build { - error: builder::Error::SaplingBuild(e), - data: None, - })?; - } - // Commit the conversion notes used during summation - for (conv, wit, value) in used_convs.values() { - if value.is_positive() { - builder - .add_sapling_convert( - conv.clone(), - *value as u64, - wit.clone(), - ) - .map_err(|e| TransferErr::Build { - error: builder::Error::SaplingBuild(e), - data: None, - })?; - } - } - - Some(added_amount) - } else { - // We add a dummy UTXO to our transaction, but only the source - // of the parent Transfer object is used to - // validate fund availability - let script = source - .t_addr_data() - .ok_or_else(|| { - Error::Other( - "source address should be transparent".to_string(), - ) - })? - .taddress(); - - for (digit, asset_type) in - MaspDigitPos::iter().zip(asset_types.iter()) - { - let amount_part = digit.denominate(&amount.amount()); - // Skip adding an input if its value is 0 - if amount_part != 0 { - builder - .add_transparent_input(TxOut { - asset_type: *asset_type, - value: amount_part, - address: script, - }) - .map_err(|e| TransferErr::Build { - error: builder::Error::TransparentBuild(e), - data: None, - })?; - } - } - - None - }; - - Ok(added_amt) - } - - // Add the necessary transaction outputs to the builder - #[allow(clippy::too_many_arguments)] - async fn add_outputs( - context: &impl Namada, - builder: &mut Builder, - source: TransferSource, - target: &TransferTarget, - token: Address, - amount: token::DenominatedAmount, - epoch: MaspEpoch, - denoms: &HashMap, - ) -> Result<(), TransferErr> { - // Anotate the asset type in the value balance with its decoding in - // order to facilitate cross-epoch computations - let value_balance = context - .shielded_mut() - .await - .decode_sum(context.client(), builder.value_balance()) - .await; - - let payment_address = target.payment_address(); - - // This indicates how many more assets need to be sent to the - // receiver in order to satisfy the requested transfer - // amount. - let mut rem_amount = amount.amount().raw_amount().0; - - // Ok to unwrap cause we've already seen the token before, the - // denomination must be there - let denom = denoms.get(&token).unwrap(); - - // Now handle the outputs of this transaction - // Loop through the value balance components and see which - // ones can be given to the receiver - for ((asset_type, decoded), val) in value_balance.components() { - let rem_amount = &mut rem_amount[decoded.position as usize]; - // Only asset types with the correct token can contribute. But - // there must be a demonstrated need for it. - if decoded.token == token - && &decoded.denom == denom - && decoded.epoch.map_or(true, |vbal_epoch| vbal_epoch <= epoch) - && *rem_amount > 0 - { - let val = u128::try_from(*val).expect( - "value balance in absence of output descriptors should be \ - non-negative", - ); - // We want to take at most the remaining quota for the - // current denomination to the receiver - let contr = std::cmp::min(*rem_amount as u128, val) as u64; - // If we are sending to a shielded address, we need the outgoing - // viewing key in the following computations. - let ovk_opt = source - .spending_key() - .map(|x| MaspExtendedSpendingKey::from(x).expsk.ovk); - // Make transaction output tied to the current token, - // denomination, and epoch. - if let Some(pa) = payment_address { - // If there is a shielded output - builder - .add_sapling_output( - ovk_opt, - pa.into(), - *asset_type, - contr, - MemoBytes::empty(), - ) - .map_err(|e| TransferErr::Build { - error: builder::Error::SaplingBuild(e), - data: None, - })?; - } else if let Some(t_addr_data) = target.t_addr_data() { - // If there is a transparent output - builder - .add_transparent_output( - &t_addr_data.taddress(), - *asset_type, - contr, - ) - .map_err(|e| TransferErr::Build { - error: builder::Error::TransparentBuild(e), - data: None, - })?; - } else { - return Result::Err(TransferErr::from(Error::Other( - "transaction target must be a payment address or \ - Namada address or IBC address" - .to_string(), - ))); - } - // Lower what is required of the remaining contribution - *rem_amount -= contr; - } - } - - // Nothing must remain to be included in output - if rem_amount != [0; 4] { - let (asset_types, _) = { - let mut shielded = context.shielded_mut().await; - // Do the actual conversion to an asset type - let amount = shielded - .convert_namada_amount_to_masp( - context.client(), - epoch, - &token, - denom.to_owned(), - amount.amount(), - ) - .await?; - // Make sure to save any decodings of the asset types used so - // that balance queries involving them are - // successful - let _ = shielded.save().await; - amount - }; - - // Convert the shortfall into a I128Sum - let mut shortfall = I128Sum::zero(); - for (asset_type, val) in asset_types.iter().zip(rem_amount) { - shortfall += I128Sum::from_pair(*asset_type, val.into()); - } - // Return an insufficient funds error - return Result::Err(TransferErr::Build { - error: builder::Error::InsufficientFunds(shortfall), - data: Some(MaspDataLog { - source: Some(source), - token, - amount, - }), - }); - } - - Ok(()) - } - - // Add the necessary note to include a masp fee payment in the transaction. - // Funds are gathered in the following order: - // - // 1. From the residual values of the already included spend notes (i.e. - // changes) - // 2. From new spend notes of the transaction's sources - // 3. From new spend notes of the optional gas spending keys - #[allow(clippy::too_many_arguments)] - async fn add_fees( - context: &impl Namada, - builder: &mut Builder, - source_data: &HashMap, - sources: Vec, - target: &Address, - token: &Address, - amount: &token::DenominatedAmount, - epoch: MaspEpoch, - denoms: &mut HashMap, - notes_tracker: &mut SpentNotesTracker, - changes: &mut Changes, - ) -> Result<(), TransferErr> { - if denoms.get(token).is_none() { - if let Some(denom) = query_denom(context.client(), token).await { - denoms.insert(token.to_owned(), denom); - } else { - return Err(TransferErr::General(Error::from( - QueryError::General(format!( - "denomination for token {token}" - )), - ))); - }; - } - - let raw_amount = amount.amount().raw_amount().0; - let (asset_types, _) = { - let mut shielded = context.shielded_mut().await; - // Do the actual conversion to an asset type - let (asset_types, amount) = shielded - .convert_namada_amount_to_masp( - context.client(), - epoch, - token, - // Safe to unwrap - denoms.get(token).unwrap().to_owned(), - amount.amount(), - ) - .await?; - // Make sure to save any decodings of the asset types used so - // that balance queries involving them are - // successful - let _ = shielded.save().await; - (asset_types, amount) - }; - - let mut fees = I128Sum::zero(); - // Convert the shortfall into a I128Sum - for (asset_type, val) in asset_types.iter().zip(raw_amount) { - fees += I128Sum::from_nonnegative(*asset_type, val.into()) - .map_err(|()| { - TransferErr::General(Error::Other( - "Fee amount is expected expected to be non-negative" - .to_string(), - )) - })?; - } - - // 1. Try to use the change to pay fees - let mut temp_changes = Changes::default(); - - for (sp, changes) in changes.iter() { - for (asset_type, change) in changes.components() { - for (_, fee_amt) in fees - .clone() - .components() - .filter(|(axt, _)| *axt == asset_type) - { - // Get the minimum between the available change and - // the due fee - let output_amt = I128Sum::from_nonnegative( - asset_type.to_owned(), - *change.min(fee_amt), - ) - .map_err(|()| { - TransferErr::General(Error::Other( - "Fee amount is expected to be non-negative" - .to_string(), - )) - })?; - let denominated_output_amt = context - .shielded_mut() - .await - .convert_masp_amount_to_namada( - context.client(), - // Safe to unwrap - denoms.get(token).unwrap().to_owned(), - output_amt.clone(), - ) - .await?; - - Self::add_outputs( - context, - builder, - TransferSource::ExtendedSpendingKey(sp.to_owned()), - &TransferTarget::Address(target.clone()), - token.clone(), - denominated_output_amt, - epoch, - denoms, - ) - .await?; - - fees -= &output_amt; - // Update the changes - temp_changes - .entry(*sp) - .and_modify(|amt| *amt += &output_amt) - .or_insert(output_amt); - } - } - - if fees.is_zero() { - break; - } - } - - // Decrease the changes by the amounts used for fee payment - for (sp, temp_changes) in temp_changes.iter() { - for (asset_type, temp_change) in temp_changes.components() { - let output_amt = I128Sum::from_nonnegative( - asset_type.to_owned(), - *temp_change, - ) - .map_err(|()| { - TransferErr::General(Error::Other( - "Fee amount is expected expected to be non-negative" - .to_string(), - )) - })?; - - // Entry is guaranteed to be in the map - changes.entry(*sp).and_modify(|amt| *amt -= &output_amt); - } - } - - if !fees.is_zero() { - // 2. Look for unused spent notes of the sources and the optional - // gas spending keys (sources first) - for fee_source in - source_data.iter().map(|(src, _)| src.source.clone()).chain( - sources - .into_iter() - .map(TransferSource::ExtendedSpendingKey), - ) - { - for (asset_type, fee_amt) in fees.clone().components() { - let input_amt = I128Sum::from_nonnegative( - asset_type.to_owned(), - *fee_amt, - ) - .map_err(|()| { - TransferErr::General(Error::Other( - "Fee amount is expected expected to be \ - non-negative" - .to_string(), - )) - })?; - let denominated_fee = context - .shielded_mut() - .await - .convert_masp_amount_to_namada( - context.client(), - // Safe to unwrap - denoms.get(token).unwrap().to_owned(), - input_amt.clone(), - ) - .await?; - - let Some(found_amt) = Self::add_inputs( - context, - builder, - &fee_source, - token, - &denominated_fee, - epoch, - denoms, - notes_tracker, - changes, - ) - .await? - else { - continue; - }; - // Pick the minimum between the due fee and the amount found - let output_amt = match found_amt.partial_cmp(&input_amt) { - None | Some(Ordering::Less) => found_amt, - _ => input_amt.clone(), - }; - let denom_amt = context - .shielded_mut() - .await - .convert_masp_amount_to_namada( - context.client(), - // Safe to unwrap - denoms.get(token).unwrap().to_owned(), - output_amt.clone(), - ) - .await?; - - Self::add_outputs( - context, - builder, - fee_source.clone(), - &TransferTarget::Address(target.clone()), - token.clone(), - denom_amt, - epoch, - denoms, - ) - .await?; - - fees -= &output_amt; - } - - if fees.is_zero() { - break; - } - } - } - - if !fees.is_zero() { - return Result::Err(TransferErr::Build { - error: builder::Error::InsufficientFunds(fees), - data: Some(MaspDataLog { - source: None, - token: token.to_owned(), - amount: *amount, - }), - }); - } - - Ok(()) - } - - // Consumes the changes and adds them back to the original sources to - // balance the transaction. This function has to be called after - // `add_fees` cause we might have some change coming from there too - #[allow(clippy::result_large_err)] - fn add_changes( - builder: &mut Builder, - changes: Changes, - ) -> Result<(), TransferErr> { - for (sp, changes) in changes.into_iter() { - for (asset_type, amt) in changes.components() { - if let Ordering::Greater = amt.cmp(&0) { - let sk = MaspExtendedSpendingKey::from(sp.to_owned()); - // Send the change in this asset type back to the sender - builder - .add_sapling_output( - Some(sk.expsk.ovk), - sk.default_address().1, - *asset_type, - *amt as u64, - MemoBytes::empty(), - ) - .map_err(|e| TransferErr::Build { - error: builder::Error::SaplingBuild(e), - data: None, - })?; - } - } - } - - // Final safety check on the value balance to verify that the - // transaction is balanced - let value_balance = builder.value_balance(); - if !value_balance.is_zero() { - return Result::Err(TransferErr::Build { - error: builder::Error::InsufficientFunds(value_balance), - data: None, - }); - } - - Ok(()) - } - - // Updates the internal state with the data of the newly generated - // transaction. More specifically invalidate the spent notes, but do not - // cache the newly produced output descriptions and therefore the merkle - // tree - async fn pre_cache_transaction( - &mut self, - masp_txs: &[Transaction], - ) -> Result<(), Error> { - self.save_shielded_spends(masp_txs); - - // Save the speculative state for future usage - self.sync_status = ContextSyncStatus::Speculative; - self.save().await.map_err(|e| Error::Other(e.to_string()))?; - - Ok(()) - } - - /// Get the asset type with the given epoch, token, and denomination. If it - /// does not exist in the protocol, then remove the timestamp. Make sure to - /// store the derived AssetType so that future decoding is possible. - pub async fn get_asset_type( - &mut self, - client: &C, - decoded: &mut AssetData, - ) -> Result { - let mut asset_type = decoded.encode().map_err(|_| { - Error::Other("unable to create asset type".to_string()) - })?; - if self.decode_asset_type(client, asset_type).await.is_none() { - // If we fail to decode the epoched asset type, then remove the - // epoch - decoded.undate(); - asset_type = decoded.encode().map_err(|_| { - Error::Other("unable to create asset type".to_string()) - })?; - self.asset_types.insert(asset_type, decoded.clone()); - } - Ok(asset_type) - } - - /// Convert Namada amount and token type to MASP equivalents - async fn convert_namada_amount_to_masp( - &mut self, - client: &C, - epoch: MaspEpoch, - token: &Address, - denom: Denomination, - val: token::Amount, - ) -> Result<([AssetType; 4], U64Sum), Error> { - let mut amount = U64Sum::zero(); - let mut asset_types = Vec::new(); - for position in MaspDigitPos::iter() { - let mut pre_asset_type = AssetData { - epoch: Some(epoch), - token: token.clone(), - denom, - position, - }; - let asset_type = - self.get_asset_type(client, &mut pre_asset_type).await?; - // Combine the value and unit into one amount - amount += - U64Sum::from_nonnegative(asset_type, position.denominate(&val)) - .map_err(|_| { - Error::Other("invalid value for amount".to_string()) - })?; - asset_types.push(asset_type); - } - Ok(( - asset_types - .try_into() - .expect("there must be exactly 4 denominations"), - amount, - )) - } - - /// Convert MASP amount to Namada equivalent - async fn convert_masp_amount_to_namada( - &mut self, - client: &C, - denom: Denomination, - amt: I128Sum, - ) -> Result { - let mut amount = token::Amount::zero(); - let value_sum = self.decode_sum(client, amt).await; - - for ((_, decoded), val) in value_sum.components() { - let positioned_amt = token::Amount::from_masp_denominated_i128( - *val, - decoded.position, - ) - .unwrap_or_default(); - amount = checked!(amount + positioned_amt)?; - } - - Ok(token::DenominatedAmount::new(amount, denom)) - } -} +use crate::{token, MaybeSend, MaybeSync}; /// Extract the relevant shield portions of a [`Tx`], if any. fn extract_masp_tx( tx: &Tx, masp_section_refs: &MaspTxRefs, -) -> Result, Error> { +) -> Result, eyre::Error> { // NOTE: simply looking for masp sections attached to the tx // is not safe. We don't validate the sections attached to a // transaction se we could end up with transactions carrying @@ -2095,7 +50,7 @@ fn extract_masp_tx( .iter() .try_fold(vec![], |mut acc, hash| { match tx.get_masp_section(hash).cloned().ok_or_else(|| { - Error::Other("Missing expected masp transaction".to_string()) + eyre!("Missing expected masp transaction".to_string()) }) { Ok(transaction) => { acc.push(transaction); @@ -2182,1001 +137,88 @@ async fn get_indexed_masp_events_at_height( })) } -#[cfg(test)] -mod tests { - use masp_proofs::bls12_381::Bls12; - - use super::*; +/// An implementation of a shielded wallet +/// along with methods for interacting with a node +#[derive(Default, Debug)] +pub struct ShieldedContext(ShieldedWallet); - /// quick and dirty test. will fail on size check - #[test] - #[should_panic(expected = "parameter file size is not correct")] - fn test_wrong_masp_params() { - use std::io::Write; - - let tempdir = tempfile::tempdir() - .expect("expected a temp dir") - .into_path(); - let fake_params_paths = - [SPEND_NAME, OUTPUT_NAME, CONVERT_NAME].map(|p| tempdir.join(p)); - for path in &fake_params_paths { - let mut f = - std::fs::File::create(path).expect("expected a temp file"); - f.write_all(b"fake params") - .expect("expected a writable temp file"); - f.sync_all() - .expect("expected a writable temp file (on sync)"); - } - - std::env::set_var(ENV_VAR_MASP_PARAMS_DIR, tempdir.as_os_str()); - // should panic here - masp_proofs::load_parameters( - &fake_params_paths[0], - &fake_params_paths[1], - &fake_params_paths[2], - ); - } - - /// a more involved test, using dummy parameters with the right - /// size but the wrong hash. - #[test] - #[should_panic(expected = "parameter file is not correct")] - fn test_wrong_masp_params_hash() { - use masp_primitives::ff::PrimeField; - use masp_proofs::bellman::groth16::{ - generate_random_parameters, Parameters, - }; - use masp_proofs::bellman::{Circuit, ConstraintSystem, SynthesisError}; - use masp_proofs::bls12_381::Scalar; - - struct FakeCircuit { - x: E, - } - - impl Circuit for FakeCircuit { - fn synthesize>( - self, - cs: &mut CS, - ) -> Result<(), SynthesisError> { - let x = cs.alloc(|| "x", || Ok(self.x)).unwrap(); - cs.enforce( - || { - "this is an extra long constraint name so that rustfmt \ - is ok with wrapping the params of enforce()" - }, - |lc| lc + x, - |lc| lc + x, - |lc| lc + x, - ); - Ok(()) - } - } - - let dummy_circuit = FakeCircuit { x: Scalar::zero() }; - let mut rng = rand::thread_rng(); - let fake_params: Parameters = - generate_random_parameters(dummy_circuit, &mut rng) - .expect("expected to generate fake params"); - - let tempdir = tempfile::tempdir() - .expect("expected a temp dir") - .into_path(); - // TODO: get masp to export these consts - let fake_params_paths = [ - (SPEND_NAME, 49848572u64), - (OUTPUT_NAME, 16398620u64), - (CONVERT_NAME, 22570940u64), - ] - .map(|(p, s)| (tempdir.join(p), s)); - for (path, size) in &fake_params_paths { - let mut f = - std::fs::File::create(path).expect("expected a temp file"); - fake_params - .write(&mut f) - .expect("expected a writable temp file"); - // the dummy circuit has one constraint, and therefore its - // params should always be smaller than the large masp - // circuit params. so this truncate extends the file, and - // extra bytes at the end do not make it invalid. - f.set_len(*size) - .expect("expected to truncate the temp file"); - f.sync_all() - .expect("expected a writable temp file (on sync)"); - } - - std::env::set_var(ENV_VAR_MASP_PARAMS_DIR, tempdir.as_os_str()); - // should panic here - masp_proofs::load_parameters( - &fake_params_paths[0].0, - &fake_params_paths[1].0, - &fake_params_paths[2].0, - ); +impl From> for ShieldedWallet { + fn from(ctx: ShieldedContext) -> Self { + ctx.0 } } - -#[cfg(any(test, feature = "testing"))] -/// Tests and strategies for transactions -pub mod testing { - use std::ops::AddAssign; - use std::sync::Mutex; - - use masp_primitives::consensus::testing::arb_height; - use masp_primitives::constants::SPENDING_KEY_GENERATOR; - use masp_primitives::group::GroupEncoding; - use masp_primitives::sapling::note_encryption::{ - try_sapling_note_decryption, PreparedIncomingViewingKey, - }; - use masp_primitives::sapling::prover::TxProver; - use masp_primitives::sapling::redjubjub::{PublicKey, Signature}; - use masp_primitives::sapling::{ProofGenerationKey, Rseed}; - use masp_primitives::transaction::components::sapling::builder::StoredBuildParams; - use masp_primitives::transaction::components::{ - OutputDescription, GROTH_PROOF_SIZE, - }; - use masp_primitives::transaction::{ - Authorization, Authorized, TransparentAddress, - }; - use masp_proofs::bellman::groth16::Proof; - use masp_proofs::bls12_381; - use masp_proofs::bls12_381::{Bls12, G1Affine, G2Affine}; - use namada_core::address::testing::arb_non_internal_address; - use namada_token::{DenominatedAmount, Transfer}; - use proptest::prelude::*; - use proptest::sample::SizeRange; - use proptest::test_runner::TestRng; - use proptest::{collection, option, prop_compose}; - - use super::*; - use crate::address::testing::arb_address; - use crate::address::MASP; - use crate::masp_primitives::consensus::BranchId; - use crate::masp_primitives::constants::VALUE_COMMITMENT_RANDOMNESS_GENERATOR; - use crate::masp_primitives::merkle_tree::FrozenCommitmentTree; - use crate::masp_primitives::sapling::keys::OutgoingViewingKey; - use crate::masp_primitives::sapling::redjubjub::PrivateKey; - use crate::masp_primitives::transaction::components::transparent::testing::arb_transparent_address; - use crate::token::testing::arb_denomination; - - /// This function computes `value` in the exponent of the value commitment - /// base - fn masp_compute_value_balance( - asset_type: AssetType, - value: i128, - ) -> Option { - // Compute the absolute value (failing if -i128::MAX is - // the value) - let abs = match value.checked_abs() { - Some(a) => a as u128, - None => return None, - }; - - // Is it negative? We'll have to negate later if so. - let is_negative = value.is_negative(); - - // Compute it in the exponent - let mut abs_bytes = [0u8; 32]; - abs_bytes[0..16].copy_from_slice(&abs.to_le_bytes()); - let mut value_balance = asset_type.value_commitment_generator() - * jubjub::Fr::from_bytes(&abs_bytes).unwrap(); - - // Negate if necessary - if is_negative { - value_balance = -value_balance; - } - - // Convert to unknown order point - Some(value_balance.into()) - } - - /// A context object for creating the Sapling components of a Zcash - /// transaction. - pub struct SaplingProvingContext { - bsk: jubjub::Fr, - // (sum of the Spend value commitments) - (sum of the Output value - // commitments) - cv_sum: jubjub::ExtendedPoint, - } - - /// An implementation of TxProver that does everything except generating - /// valid zero-knowledge proofs. Uses the supplied source of randomness to - /// carry out its operations. - pub struct MockTxProver(pub Mutex); - - impl TxProver for MockTxProver { - type SaplingProvingContext = SaplingProvingContext; - - fn new_sapling_proving_context(&self) -> Self::SaplingProvingContext { - SaplingProvingContext { - bsk: jubjub::Fr::zero(), - cv_sum: jubjub::ExtendedPoint::identity(), - } - } - - fn spend_proof( - &self, - ctx: &mut Self::SaplingProvingContext, - proof_generation_key: ProofGenerationKey, - _diversifier: Diversifier, - _rseed: Rseed, - ar: jubjub::Fr, - asset_type: AssetType, - value: u64, - _anchor: bls12_381::Scalar, - _merkle_path: MerklePath, - rcv: jubjub::Fr, - ) -> Result< - ([u8; GROTH_PROOF_SIZE], jubjub::ExtendedPoint, PublicKey), - (), - > { - // Accumulate the value commitment randomness in the context - { - let mut tmp = rcv; - tmp.add_assign(&ctx.bsk); - - // Update the context - ctx.bsk = tmp; - } - - // Construct the value commitment - let value_commitment = asset_type.value_commitment(value, rcv); - - // This is the result of the re-randomization, we compute it for the - // caller - let rk = PublicKey(proof_generation_key.ak.into()) - .randomize(ar, SPENDING_KEY_GENERATOR); - - // Compute value commitment - let value_commitment: jubjub::ExtendedPoint = - value_commitment.commitment().into(); - - // Accumulate the value commitment in the context - ctx.cv_sum += value_commitment; - - let mut zkproof = [0u8; GROTH_PROOF_SIZE]; - let proof = Proof:: { - a: G1Affine::generator(), - b: G2Affine::generator(), - c: G1Affine::generator(), - }; - proof - .write(&mut zkproof[..]) - .expect("should be able to serialize a proof"); - Ok((zkproof, value_commitment, rk)) - } - - fn output_proof( - &self, - ctx: &mut Self::SaplingProvingContext, - _esk: jubjub::Fr, - _payment_address: masp_primitives::sapling::PaymentAddress, - _rcm: jubjub::Fr, - asset_type: AssetType, - value: u64, - rcv: jubjub::Fr, - ) -> ([u8; GROTH_PROOF_SIZE], jubjub::ExtendedPoint) { - // Accumulate the value commitment randomness in the context - { - let mut tmp = rcv.neg(); // Outputs subtract from the total. - tmp.add_assign(&ctx.bsk); - - // Update the context - ctx.bsk = tmp; - } - - // Construct the value commitment for the proof instance - let value_commitment = asset_type.value_commitment(value, rcv); - - // Compute the actual value commitment - let value_commitment_point: jubjub::ExtendedPoint = - value_commitment.commitment().into(); - - // Accumulate the value commitment in the context. We do this to - // check internal consistency. - ctx.cv_sum -= value_commitment_point; // Outputs subtract from the total. - - let mut zkproof = [0u8; GROTH_PROOF_SIZE]; - let proof = Proof:: { - a: G1Affine::generator(), - b: G2Affine::generator(), - c: G1Affine::generator(), - }; - proof - .write(&mut zkproof[..]) - .expect("should be able to serialize a proof"); - - (zkproof, value_commitment_point) - } - - fn convert_proof( - &self, - ctx: &mut Self::SaplingProvingContext, - allowed_conversion: AllowedConversion, - value: u64, - _anchor: bls12_381::Scalar, - _merkle_path: MerklePath, - rcv: jubjub::Fr, - ) -> Result<([u8; GROTH_PROOF_SIZE], jubjub::ExtendedPoint), ()> - { - // Accumulate the value commitment randomness in the context - { - let mut tmp = rcv; - tmp.add_assign(&ctx.bsk); - - // Update the context - ctx.bsk = tmp; - } - - // Construct the value commitment - let value_commitment = - allowed_conversion.value_commitment(value, rcv); - - // Compute value commitment - let value_commitment: jubjub::ExtendedPoint = - value_commitment.commitment().into(); - - // Accumulate the value commitment in the context - ctx.cv_sum += value_commitment; - - let mut zkproof = [0u8; GROTH_PROOF_SIZE]; - let proof = Proof:: { - a: G1Affine::generator(), - b: G2Affine::generator(), - c: G1Affine::generator(), - }; - proof - .write(&mut zkproof[..]) - .expect("should be able to serialize a proof"); - - Ok((zkproof, value_commitment)) - } - - fn binding_sig( - &self, - ctx: &mut Self::SaplingProvingContext, - assets_and_values: &I128Sum, - sighash: &[u8; 32], - ) -> Result { - // Initialize secure RNG - let mut rng = self.0.lock().unwrap(); - - // Grab the current `bsk` from the context - let bsk = PrivateKey(ctx.bsk); - - // Grab the `bvk` using DerivePublic. - let bvk = PublicKey::from_private( - &bsk, - VALUE_COMMITMENT_RANDOMNESS_GENERATOR, - ); - - // In order to check internal consistency, let's use the accumulated - // value commitments (as the verifier would) and apply - // value_balance to compare against our derived bvk. - { - let final_bvk = assets_and_values - .components() - .map(|(asset_type, value_balance)| { - // Compute value balance for each asset - // Error for bad value balances (-INT128_MAX value) - masp_compute_value_balance(*asset_type, *value_balance) - }) - .try_fold(ctx.cv_sum, |tmp, value_balance| { - // Compute cv_sum minus sum of all value balances - Result::<_, ()>::Ok(tmp - value_balance.ok_or(())?) - })?; - - // The result should be the same, unless the provided - // valueBalance is wrong. - if bvk.0 != final_bvk { - return Err(()); - } - } - - // Construct signature message - let mut data_to_be_signed = [0u8; 64]; - data_to_be_signed[0..32].copy_from_slice(&bvk.0.to_bytes()); - data_to_be_signed[32..64].copy_from_slice(&sighash[..]); - - // Sign - Ok(bsk.sign( - &data_to_be_signed, - &mut *rng, - VALUE_COMMITMENT_RANDOMNESS_GENERATOR, - )) - } - } - - #[derive(Debug, Clone)] - /// Adapts a CSPRNG from a PRNG for proptesting - pub struct TestCsprng(R); - - impl CryptoRng for TestCsprng {} - - impl RngCore for TestCsprng { - fn next_u32(&mut self) -> u32 { - self.0.next_u32() - } - - fn next_u64(&mut self) -> u64 { - self.0.next_u64() - } - - fn fill_bytes(&mut self, dest: &mut [u8]) { - self.0.fill_bytes(dest) - } - - fn try_fill_bytes( - &mut self, - dest: &mut [u8], - ) -> Result<(), rand::Error> { - self.0.try_fill_bytes(dest) - } - } - - prop_compose! { - /// Expose a random number generator - pub fn arb_rng()(rng in Just(()).prop_perturb(|(), rng| rng)) -> TestRng { - rng - } - } - - prop_compose! { - /// Generate an arbitrary output description with the given value - pub fn arb_output_description( - asset_type: AssetType, - value: u64, - )( - mut rng in arb_rng().prop_map(TestCsprng), - ) -> (Option, masp_primitives::sapling::PaymentAddress, AssetType, u64, MemoBytes) { - let mut spending_key_seed = [0; 32]; - rng.fill_bytes(&mut spending_key_seed); - let spending_key = MaspExtendedSpendingKey::master(spending_key_seed.as_ref()); - - let viewing_key = ExtendedFullViewingKey::from(&spending_key).fvk.vk; - let (div, _g_d) = find_valid_diversifier(&mut rng); - let payment_addr = viewing_key - .to_payment_address(div) - .expect("a PaymentAddress"); - - (None, payment_addr, asset_type, value, MemoBytes::empty()) - } - } - - prop_compose! { - /// Generate an arbitrary spend description with the given value - pub fn arb_spend_description( - asset_type: AssetType, - value: u64, - )( - address in arb_transparent_address(), - expiration_height in arb_height(BranchId::MASP, &Network), - mut rng in arb_rng().prop_map(TestCsprng), - bparams_rng in arb_rng().prop_map(TestCsprng), - prover_rng in arb_rng().prop_map(TestCsprng), - ) -> (MaspExtendedSpendingKey, Diversifier, Note, Node) { - let mut spending_key_seed = [0; 32]; - rng.fill_bytes(&mut spending_key_seed); - let spending_key = MaspExtendedSpendingKey::master(spending_key_seed.as_ref()); - - let viewing_key = ExtendedFullViewingKey::from(&spending_key).fvk.vk; - let (div, _g_d) = find_valid_diversifier(&mut rng); - let payment_addr = viewing_key - .to_payment_address(div) - .expect("a PaymentAddress"); - - let mut builder = Builder::::new( - NETWORK, - // NOTE: this is going to add 20 more blocks to the actual - // expiration but there's no other exposed function that we could - // use from the masp crate to specify the expiration better - expiration_height.unwrap(), - ); - // Add a transparent input to support our desired shielded output - builder.add_transparent_input(TxOut { asset_type, value, address }).unwrap(); - // Finally add the shielded output that we need - builder.add_sapling_output(None, payment_addr, asset_type, value, MemoBytes::empty()).unwrap(); - // Build a transaction in order to get its shielded outputs - let (transaction, metadata) = builder.build( - &MockTxProver(Mutex::new(prover_rng)), - &FeeRule::non_standard(U64Sum::zero()), - &mut rng, - &mut RngBuildParams::new(bparams_rng), - ).unwrap(); - // Extract the shielded output from the transaction - let shielded_output = &transaction - .sapling_bundle() - .unwrap() - .shielded_outputs[metadata.output_index(0).unwrap()]; - - // Let's now decrypt the constructed notes - let (note, pa, _memo) = try_sapling_note_decryption::<_, OutputDescription<<::SaplingAuth as masp_primitives::transaction::components::sapling::Authorization>::Proof>>( - &NETWORK, - 1.into(), - &PreparedIncomingViewingKey::new(&viewing_key.ivk()), - shielded_output, - ).unwrap(); - assert_eq!(payment_addr, pa); - // Make a path to out new note - let node = Node::new(shielded_output.cmu.to_repr()); - (spending_key, div, note, node) - } - } - - prop_compose! { - /// Generate an arbitrary MASP denomination - pub fn arb_masp_digit_pos()(denom in 0..4u8) -> MaspDigitPos { - MaspDigitPos::from(denom) - } - } - - // Maximum value for a note partition - const MAX_MONEY: u64 = 100; - // Maximum number of partitions for a note - const MAX_SPLITS: usize = 3; - - prop_compose! { - /// Arbitrarily partition the given vector of integers into sets and sum - /// them - pub fn arb_partition(values: Vec)(buckets in ((!values.is_empty()) as usize)..=values.len())( - values in Just(values.clone()), - assigns in collection::vec(0..buckets, values.len()), - buckets in Just(buckets), - ) -> Vec { - let mut buckets = vec![0; buckets]; - for (bucket, value) in assigns.iter().zip(values) { - buckets[*bucket] += value; - } - buckets - } - } - - prop_compose! { - /// Generate arbitrary spend descriptions with the given asset type - /// partitioning the given values - pub fn arb_spend_descriptions( - asset: AssetData, - values: Vec, - )(partition in arb_partition(values))( - spend_description in partition - .iter() - .map(|value| arb_spend_description( - encode_asset_type( - asset.token.clone(), - asset.denom, - asset.position, - asset.epoch, - ).unwrap(), - *value, - )).collect::>() - ) -> Vec<(MaspExtendedSpendingKey, Diversifier, Note, Node)> { - spend_description - } - } - - prop_compose! { - /// Generate arbitrary output descriptions with the given asset type - /// partitioning the given values - pub fn arb_output_descriptions( - asset: AssetData, - values: Vec, - )(partition in arb_partition(values))( - output_description in partition - .iter() - .map(|value| arb_output_description( - encode_asset_type( - asset.token.clone(), - asset.denom, - asset.position, - asset.epoch, - ).unwrap(), - *value, - )).collect::>() - ) -> Vec<(Option, masp_primitives::sapling::PaymentAddress, AssetType, u64, MemoBytes)> { - output_description - } - } - - prop_compose! { - /// Generate arbitrary spend descriptions with the given asset type - /// partitioning the given values - pub fn arb_txouts( - asset: AssetData, - values: Vec, - address: TransparentAddress, - )( - partition in arb_partition(values), - ) -> Vec { - partition - .iter() - .map(|value| TxOut { - asset_type: encode_asset_type( - asset.token.clone(), - asset.denom, - asset.position, - asset.epoch, - ).unwrap(), - value: *value, - address, - }).collect::>() - } +impl ShieldedContext { + /// Create a new [`ShieldedContext`] + pub fn new(wallet: ShieldedWallet) -> Self { + Self(wallet) } +} - prop_compose! { - /// Generate an arbitrary shielded MASP transaction builder - pub fn arb_shielded_builder(asset_range: impl Into)( - assets in collection::hash_map( - arb_pre_asset_type(), - collection::vec(..MAX_MONEY, ..MAX_SPLITS), - asset_range, - ), - )( - expiration_height in arb_height(BranchId::MASP, &Network), - spend_descriptions in assets - .iter() - .map(|(asset, values)| arb_spend_descriptions(asset.clone(), values.clone())) - .collect::>(), - output_descriptions in assets - .iter() - .map(|(asset, values)| arb_output_descriptions(asset.clone(), values.clone())) - .collect::>(), - input_data in collection::vec((any::(), arb_non_internal_address()), assets.len() * MAX_SPLITS), - output_data in collection::vec((any::(), arb_non_internal_address()), assets.len() * MAX_SPLITS), - assets in Just(assets), - ) -> ( - Transfer, - Builder::, - HashMap, - ) { - // Enable assets to be more easily decoded - let mut asset_decoder = BTreeMap::new(); - for asset_data in assets.keys() { - let asset_type = encode_asset_type( - asset_data.token.clone(), - asset_data.denom, - asset_data.position, - asset_data.epoch, - ).unwrap(); - asset_decoder.insert(asset_type, asset_data); - } - let mut transfer = Transfer::default(); - let mut builder = Builder::::new( - NETWORK, - // NOTE: this is going to add 20 more blocks to the actual - // expiration but there's no other exposed function that we could - // use from the masp crate to specify the expiration better - expiration_height.unwrap(), - ); - let mut leaves = Vec::new(); - // First construct a Merkle tree containing all notes to be used - for (_esk, _div, _note, node) in spend_descriptions.iter().flatten() { - leaves.push(*node); - } - let tree = FrozenCommitmentTree::new(&leaves); - // Then use the notes knowing that they all have the same anchor - for ((is_shielded, address), (idx, (esk, div, note, _node))) in - input_data.into_iter().zip(spend_descriptions.iter().flatten().enumerate()) - { - // Compute the equivalent transparent movement - let asset_data = asset_decoder[¬e.asset_type]; - let amount = DenominatedAmount::new( - token::Amount::from_masp_denominated(note.value, asset_data.position), - asset_data.denom, - ); - // Use either a transparent input or a shielded input - if is_shielded { - builder.add_sapling_spend(*esk, *div, *note, tree.path(idx)).unwrap(); - transfer = transfer.debit(MASP, asset_data.token.clone(), amount).unwrap(); - } else { - let txout = TxOut { - address: TAddrData::Addr(address.clone()).taddress(), - asset_type: note.asset_type, - value: note.value, - }; - builder.add_transparent_input(txout).unwrap(); - transfer = transfer.debit(address, asset_data.token.clone(), amount).unwrap(); - } - } - for ((is_shielded, address), (ovk, payment_addr, asset_type, value, memo)) in - output_data.into_iter().zip(output_descriptions.into_iter().flatten()) - { - // Compute the equivalent transparent movement - let asset_data = asset_decoder[&asset_type]; - let amount = DenominatedAmount::new( - token::Amount::from_masp_denominated(value, asset_data.position), - asset_data.denom, - ); - // Use either a transparent output or a shielded output - if is_shielded { - builder.add_sapling_output(ovk, payment_addr, asset_type, value, memo).unwrap(); - transfer = transfer.credit(MASP, asset_data.token.clone(), amount).unwrap(); - } else { - builder.add_transparent_output( - &TAddrData::Addr(address.clone()).taddress(), - asset_type, - value, - ).unwrap(); - transfer = transfer.credit(address, asset_data.token.clone(), amount).unwrap(); - } - } - (transfer, builder, assets.into_iter().map(|(k, v)| (k, v.iter().sum())).collect()) - } - } +impl std::ops::Deref for ShieldedContext { + type Target = ShieldedWallet; - prop_compose! { - /// Generate an arbitrary masp epoch - pub fn arb_masp_epoch()(epoch: u64) -> MaspEpoch{ - MaspEpoch::new(epoch) - } - } - - prop_compose! { - /// Generate an arbitrary pre-asset type - pub fn arb_pre_asset_type()( - token in arb_address(), - denom in arb_denomination(), - position in arb_masp_digit_pos(), - epoch in option::of(arb_masp_epoch()), - ) -> AssetData { - AssetData { - token, - denom, - position, - epoch, - } - } + fn deref(&self) -> &Self::Target { + &self.0 } +} - prop_compose! { - /// Generate an arbitrary MASP shielded transfer - pub fn arb_shielded_transfer( - asset_range: impl Into, - )(asset_range in Just(asset_range.into()))( - (mut transfer, builder, asset_types) in arb_shielded_builder(asset_range), - epoch in arb_masp_epoch(), - prover_rng in arb_rng().prop_map(TestCsprng), - mut rng in arb_rng().prop_map(TestCsprng), - bparams_rng in arb_rng().prop_map(TestCsprng), - ) -> (Transfer, ShieldedTransfer, HashMap, StoredBuildParams) { - let mut rng_build_params = RngBuildParams::new(bparams_rng); - let (masp_tx, metadata) = builder.clone().build( - &MockTxProver(Mutex::new(prover_rng)), - &FeeRule::non_standard(U64Sum::zero()), - &mut rng, - &mut rng_build_params, - ).unwrap(); - transfer.shielded_section_hash = Some(masp_tx.txid().into()); - (transfer, ShieldedTransfer { - builder: builder.map_builder(WalletMap), - metadata, - masp_tx, - epoch, - }, asset_types, rng_build_params.to_stored().unwrap()) - } +impl std::ops::DerefMut for ShieldedContext { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 } } -#[cfg(feature = "std")] -/// Implementation of MASP functionality depending on a standard filesystem -pub mod fs { - use std::env; - use std::fs::{File, OpenOptions}; - use std::io::{Read, Write}; - use std::path::PathBuf; - - use namada_token::validation::{ - get_params_dir, CONVERT_NAME, ENV_VAR_MASP_PARAMS_DIR, OUTPUT_NAME, - SPEND_NAME, +macro_rules! wrap_err { + ($e:expr) => { + eyre::WrapErr::wrap_err($e, "Query failed:") }; +} - use super::*; - - /// Shielded context file name - const FILE_NAME: &str = "shielded.dat"; - const TMP_FILE_PREFIX: &str = "shielded.tmp"; - const SPECULATIVE_FILE_NAME: &str = "speculative_shielded.dat"; - const SPECULATIVE_TMP_FILE_PREFIX: &str = "speculative_shielded.tmp"; - const CACHE_FILE_NAME: &str = "shielded_sync.cache"; - const CACHE_FILE_TMP_PREFIX: &str = "shielded_sync.cache.tmp"; - - #[derive(Debug, BorshSerialize, BorshDeserialize, Clone)] - /// An implementation of ShieldedUtils for standard filesystems - pub struct FsShieldedUtils { - #[borsh(skip)] - pub(crate) context_dir: PathBuf, +impl ShieldedQueries + for ShieldedContext +{ + async fn query_native_token( + client: &C, + ) -> Result { + wrap_err!(query_native_token(client).await) } - impl FsShieldedUtils { - /// Initialize a shielded transaction context that identifies notes - /// decryptable by any viewing key in the given set - pub fn new(context_dir: PathBuf) -> ShieldedContext { - // Make sure that MASP parameters are downloaded to enable MASP - // transaction building and verification later on - let params_dir = get_params_dir(); - let spend_path = params_dir.join(SPEND_NAME); - let convert_path = params_dir.join(CONVERT_NAME); - let output_path = params_dir.join(OUTPUT_NAME); - if !(spend_path.exists() - && convert_path.exists() - && output_path.exists()) - { - #[allow(clippy::print_stdout)] - { - println!("MASP parameters not present, downloading..."); - } - masp_proofs::download_masp_parameters(None) - .expect("MASP parameters not present or downloadable"); - #[allow(clippy::print_stdout)] - { - println!( - "MASP parameter download complete, resuming \ - execution..." - ); - } - } - // Finally initialize a shielded context with the supplied directory - - let sync_status = - if std::fs::read(context_dir.join(SPECULATIVE_FILE_NAME)) - .is_ok() - { - // Load speculative state - ContextSyncStatus::Speculative - } else { - ContextSyncStatus::Confirmed - }; - - let utils = Self { context_dir }; - ShieldedContext { - utils, - sync_status, - ..Default::default() - } - } - - /// Write to a file ensuring that all contents of the file - /// were written by a single process (in case of multiple - /// concurrent write attempts). - /// - /// N.B. This is not the same as a file lock. If multiple - /// concurrent writes take place, this code ensures that - /// the result of exactly one will be persisted. - /// - /// N.B. This only truly works if each process uses - /// to a *unique* tmp file name. - fn atomic_file_write( - &self, - tmp_file_name: impl AsRef, - file_name: impl AsRef, - data: impl BorshSerialize, - ) -> std::io::Result<()> { - let tmp_path = self.context_dir.join(&tmp_file_name); - { - // First serialize the shielded context into a temporary file. - // Inability to create this file implies a simultaneuous write - // is in progress. In this case, immediately - // fail. This is unproblematic because the data - // intended to be stored can always be re-fetched - // from the blockchain. - let mut ctx_file = OpenOptions::new() - .write(true) - .create_new(true) - .open(tmp_path.clone())?; - let mut bytes = Vec::new(); - data.serialize(&mut bytes).unwrap_or_else(|e| { - panic!( - "cannot serialize data to {} with error: {}", - file_name.as_ref().to_string_lossy(), - e, - ) - }); - ctx_file.write_all(&bytes[..])?; - } - // Atomically update the old shielded context file with new data. - // Atomicity is required to prevent other client instances from - // reading corrupt data. - std::fs::rename(tmp_path, self.context_dir.join(file_name)) - } + async fn query_denom( + client: &C, + token: &Address, + ) -> Option { + query_denom(client, token).await } - impl Default for FsShieldedUtils { - fn default() -> Self { - Self { - context_dir: PathBuf::from(FILE_NAME), - } - } + async fn query_conversion( + client: &C, + asset_type: AssetType, + ) -> Option<( + Address, + Denomination, + MaspDigitPos, + MaspEpoch, + I128Sum, + MerklePath, + )> { + query_conversion(client, asset_type).await + } + + async fn query_block( + client: &C, + ) -> Result, eyre::Report> { + wrap_err!(query_block(client).await.map(|b| b.map(|h| h.height.0))) } - #[cfg_attr(feature = "async-send", async_trait::async_trait)] - #[cfg_attr(not(feature = "async-send"), async_trait::async_trait(?Send))] - impl ShieldedUtils for FsShieldedUtils { - fn local_tx_prover(&self) -> LocalTxProver { - if let Ok(params_dir) = env::var(ENV_VAR_MASP_PARAMS_DIR) { - let params_dir = PathBuf::from(params_dir); - let spend_path = params_dir.join(SPEND_NAME); - let convert_path = params_dir.join(CONVERT_NAME); - let output_path = params_dir.join(OUTPUT_NAME); - LocalTxProver::new(&spend_path, &output_path, &convert_path) - } else { - LocalTxProver::with_default_location() - .expect("unable to load MASP Parameters") - } - } - - /// Try to load the last saved shielded context from the given context - /// directory. If this fails, then leave the current context unchanged. - async fn load( - &self, - ctx: &mut ShieldedContext, - force_confirmed: bool, - ) -> std::io::Result<()> { - // Try to load shielded context from file - let file_name = if force_confirmed { - FILE_NAME - } else { - match ctx.sync_status { - ContextSyncStatus::Confirmed => FILE_NAME, - ContextSyncStatus::Speculative => SPECULATIVE_FILE_NAME, - } - }; - let mut ctx_file = File::open(self.context_dir.join(file_name))?; - let mut bytes = Vec::new(); - ctx_file.read_to_end(&mut bytes)?; - // Fill the supplied context with the deserialized object - *ctx = ShieldedContext { - utils: ctx.utils.clone(), - ..ShieldedContext::::deserialize(&mut &bytes[..])? - }; - Ok(()) - } - - /// Save this confirmed shielded context into its associated context - /// directory. At the same time, delete the speculative file if present - async fn save( - &self, - ctx: &ShieldedContext, - ) -> std::io::Result<()> { - let (tmp_file_pref, file_name) = match ctx.sync_status { - ContextSyncStatus::Confirmed => (TMP_FILE_PREFIX, FILE_NAME), - ContextSyncStatus::Speculative => { - (SPECULATIVE_TMP_FILE_PREFIX, SPECULATIVE_FILE_NAME) - } - }; - let tmp_file_name = { - let t = tempfile::Builder::new() - .prefix(tmp_file_pref) - .tempfile()?; - t.path().file_name().unwrap().to_owned() - }; - self.atomic_file_write(tmp_file_name, file_name, ctx)?; - - // Remove the speculative file if present since it's state is - // overruled by the confirmed one we just saved - if let ContextSyncStatus::Confirmed = ctx.sync_status { - let _ = std::fs::remove_file( - self.context_dir.join(SPECULATIVE_FILE_NAME), - ); - } - - Ok(()) - } - - async fn cache_save( - &self, - cache: &DispatcherCache, - ) -> std::io::Result<()> { - let tmp_file_name = { - let t = tempfile::Builder::new() - .prefix(CACHE_FILE_TMP_PREFIX) - .tempfile()?; - t.path().file_name().unwrap().to_owned() - }; - - self.atomic_file_write(tmp_file_name, CACHE_FILE_NAME, cache) - } + async fn query_max_block_time_estimate( + client: &C, + ) -> Result { + wrap_err!(query_max_block_time_estimate(client).await) + } - async fn cache_load(&self) -> std::io::Result { - let file_name = self.context_dir.join(CACHE_FILE_NAME); - let mut file = File::open(file_name)?; - DispatcherCache::try_from_reader(&mut file) - } + async fn query_masp_epoch( + client: &C, + ) -> Result { + wrap_err!(query_masp_epoch(client).await) } } diff --git a/crates/sdk/src/masp/shielded_sync/utils.rs b/crates/sdk/src/masp/utilities.rs similarity index 62% rename from crates/sdk/src/masp/shielded_sync/utils.rs rename to crates/sdk/src/masp/utilities.rs index 7b4387fd4e..0cf563e578 100644 --- a/crates/sdk/src/masp/shielded_sync/utils.rs +++ b/crates/sdk/src/masp/utilities.rs @@ -3,16 +3,17 @@ use std::collections::BTreeMap; use std::sync::Arc; -use borsh::{BorshDeserialize, BorshSerialize}; -use masp_primitives::memo::MemoBytes; +use borsh::BorshDeserialize; use masp_primitives::merkle_tree::{CommitmentTree, IncrementalWitness}; -use masp_primitives::sapling::{Node, Note, PaymentAddress, ViewingKey}; -use masp_primitives::transaction::Transaction; +use masp_primitives::sapling::Node; use namada_core::chain::BlockHeight; use namada_core::collections::HashMap; use namada_core::storage::TxIndex; -use namada_tx::{IndexedTx, IndexedTxRange, Tx}; -#[cfg(not(target_family = "wasm"))] +use namada_io::Client; +use namada_token::masp::utils::{ + IndexedNoteEntry, MaspClient, MaspClientCapabilities, +}; +use namada_tx::{IndexedTx, Tx}; use tokio::sync::Semaphore; use crate::error::{Error, QueryError}; @@ -20,271 +21,18 @@ use crate::masp::{ extract_masp_tx, extract_masp_tx_from_ibc_message, get_indexed_masp_events_at_height, }; -use crate::queries::Client; - -/// Type alias for convenience and profit -pub type IndexedNoteData = BTreeMap>; - -/// Type alias for the entries of [`IndexedNoteData`] iterators -pub type IndexedNoteEntry = (IndexedTx, Vec); - -/// Borrowed version of an [`IndexedNoteEntry`] -pub type IndexedNoteEntryRefs<'a> = (&'a IndexedTx, &'a Vec); - -/// Type alias for a successful note decryption. -pub type DecryptedData = (Note, PaymentAddress, MemoBytes); - -/// Cache of decrypted notes. -#[derive(Default, BorshSerialize, BorshDeserialize)] -pub struct TrialDecrypted { - inner: - HashMap>>, -} - -impl TrialDecrypted { - /// Returns the number of successful trial decryptions in cache. - pub fn successful_decryptions(&self) -> usize { - self.inner - .values() - .flat_map(|viewing_keys_to_notes| viewing_keys_to_notes.values()) - .map(|decrypted_notes| decrypted_notes.len()) - .sum::() - } - - /// Get cached notes decrypted with `vk`, indexed at `itx`. - pub fn get( - &self, - itx: &IndexedTx, - vk: &ViewingKey, - ) -> Option<&BTreeMap> { - self.inner.get(itx).and_then(|h| h.get(vk)) - } - - /// Take cached notes decrypted with `vk`, indexed at `itx`. - pub fn take( - &mut self, - itx: &IndexedTx, - vk: &ViewingKey, - ) -> Option> { - let (notes, no_more_notes) = { - let viewing_keys_to_notes = self.inner.get_mut(itx)?; - let notes = viewing_keys_to_notes.swap_remove(vk)?; - (notes, viewing_keys_to_notes.is_empty()) - }; - if no_more_notes { - self.inner.swap_remove(itx); - } - Some(notes) - } - - /// Cache `notes` decrypted with `vk`, indexed at `itx`. - pub fn insert( - &mut self, - itx: IndexedTx, - vk: ViewingKey, - notes: BTreeMap, - ) { - self.inner.entry(itx).or_default().insert(vk, notes); - } - - /// Check if empty - pub fn is_empty(&self) -> bool { - self.inner.is_empty() - } -} - -/// A cache of fetched indexed transactions. -/// -/// An invariant that shielded-sync maintains is that -/// this cache either contains all transactions from -/// a given height, or none. -#[derive(Debug, Default, Clone, BorshSerialize, BorshDeserialize)] -pub struct Fetched { - pub(crate) txs: IndexedNoteData, -} - -impl Fetched { - /// Append elements to the cache from an iterator. - pub fn extend(&mut self, items: I) - where - I: IntoIterator, - { - self.txs.extend(items); - } - - /// Iterates over the fetched transactions in the order - /// they appear in blocks. - pub fn iter( - &self, - ) -> impl IntoIterator> + '_ { - &self.txs - } - - /// Iterates over the fetched transactions in the order - /// they appear in blocks, whilst taking ownership of - /// the returned data. - pub fn take(&mut self) -> impl IntoIterator { - std::mem::take(&mut self.txs) - } - - /// Add a single entry to the cache. - pub fn insert(&mut self, (k, v): IndexedNoteEntry) { - self.txs.insert(k, v); - } - - /// Check if this cache has already been populated for a given - /// block height. - pub fn contains_height(&self, height: BlockHeight) -> bool { - self.txs - .range(IndexedTxRange::with_height(height)) - .next() - .is_some() - } - - /// Check if empty - pub fn is_empty(&self) -> bool { - self.txs.is_empty() - } - - /// Check the length of the fetched cache - pub fn len(&self) -> usize { - self.txs.len() - } -} - -impl IntoIterator for Fetched { - type IntoIter = ::IntoIter; - type Item = IndexedNoteEntry; - fn into_iter(self) -> Self::IntoIter { - self.txs.into_iter() - } -} - -/// When retrying to fetch all notes in a -/// loop, this dictates the strategy for -/// how many attempts should be made. -#[derive(Debug, Copy, Clone)] -pub enum RetryStrategy { - /// Always retry - Forever, - /// Limit number of retries to a fixed number - Times(u64), -} - -impl RetryStrategy { - /// Check if retries are exhausted. - pub fn may_retry(&mut self) -> bool { - match self { - RetryStrategy::Forever => true, - RetryStrategy::Times(left) => { - if *left == 0 { - false - } else { - *left -= 1; - true - } - } - } - } -} - -/// Enumerates the capabilities of a [`MaspClient`] implementation. -#[derive(Copy, Clone, Eq, PartialEq, Debug)] -pub enum MaspClientCapabilities { - /// The masp client implementation is only capable of fetching shielded - /// transfers. - OnlyTransfers, - /// The masp client implementation is capable of not only fetching shielded - /// transfers, but also of fetching commitment trees, witness maps, and - /// note maps. - AllData, -} - -impl MaspClientCapabilities { - /// Check if the lack of one or more capabilities in the - /// masp client implementation warrants a manual update - /// of the witnesses map. - pub const fn needs_witness_map_update(&self) -> bool { - matches!(self, Self::OnlyTransfers) - } - - /// Check if the masp client is able to fetch a pre-built - /// commitment tree. - pub const fn may_fetch_pre_built_tree(&self) -> bool { - matches!(self, Self::AllData) - } - - /// Check if the masp client is able to fetch a pre-built - /// notes index. - pub const fn may_fetch_pre_built_notes_index(&self) -> bool { - matches!(self, Self::AllData) - } - - /// Check if the masp client is able to fetch a pre-built - /// witness map. - pub const fn may_fetch_pre_built_witness_map(&self) -> bool { - matches!(self, Self::AllData) - } -} - -/// This abstracts away the implementation details -/// of how shielded-sync fetches the necessary data -/// from a remote server. -pub trait MaspClient: Clone { - /// Return the last block height we can retrieve data from. - #[allow(async_fn_in_trait)] - async fn last_block_height(&self) -> Result, Error>; - - /// Fetch shielded transfers from blocks heights in the range `[from, to]`, - /// keeping track of progress through `progress`. The fetched transfers - /// are sent over to a separate worker through `tx_sender`. - #[allow(async_fn_in_trait)] - async fn fetch_shielded_transfers( - &self, - from: BlockHeight, - to: BlockHeight, - ) -> Result, Error>; - - /// Return the capabilities of this client. - fn capabilities(&self) -> MaspClientCapabilities; - - /// Fetch the commitment tree of height `height`. - #[allow(async_fn_in_trait)] - async fn fetch_commitment_tree( - &self, - height: BlockHeight, - ) -> Result, Error>; - - /// Fetch the tx notes map of height `height`. - #[allow(async_fn_in_trait)] - async fn fetch_note_index( - &self, - height: BlockHeight, - ) -> Result, Error>; - - /// Fetch the witness map of height `height`. - #[allow(async_fn_in_trait)] - async fn fetch_witness_map( - &self, - height: BlockHeight, - ) -> Result>, Error>; -} - -#[cfg(not(target_family = "wasm"))] struct LedgerMaspClientInner { client: C, semaphore: Semaphore, } /// An inefficient MASP client which simply uses a -/// client to the blockchain to query it directly. -#[cfg(not(target_family = "wasm"))] +/// client to the blockchain to query it pub struct LedgerMaspClient { inner: Arc>, } -#[cfg(not(target_family = "wasm"))] impl Clone for LedgerMaspClient { fn clone(&self) -> Self { Self { @@ -293,7 +41,6 @@ impl Clone for LedgerMaspClient { } } -#[cfg(not(target_family = "wasm"))] impl LedgerMaspClient { /// Create a new [`MaspClient`] given an rpc client. #[inline(always)] @@ -307,8 +54,9 @@ impl LedgerMaspClient { } } -#[cfg(not(target_family = "wasm"))] impl MaspClient for LedgerMaspClient { + type Error = Error; + async fn last_block_height(&self) -> Result, Error> { let maybe_block = crate::rpc::query_block(&self.inner.client).await?; Ok(maybe_block.map(|b| b.height)) @@ -366,8 +114,10 @@ impl MaspClient for LedgerMaspClient { .map_err(|e| Error::Other(e.to_string()))?; let mut extracted_masp_txs = vec![]; if let Some(masp_sections_refs) = masp_sections_refs { - extracted_masp_txs - .extend(extract_masp_tx(&tx, &masp_sections_refs)?); + extracted_masp_txs.extend( + extract_masp_tx(&tx, &masp_sections_refs) + .map_err(|e| Error::Other(e.to_string()))?, + ); }; if ibc_tx_data_refs.is_some() { extracted_masp_txs @@ -424,7 +174,6 @@ impl MaspClient for LedgerMaspClient { } #[derive(Debug)] -#[cfg(not(target_family = "wasm"))] struct IndexerMaspClientShared { /// Limits open connections so as not to exhaust /// the connection limit at the OS level. @@ -442,19 +191,16 @@ struct IndexerMaspClientShared { /// [`namada-masp-indexer`]. /// /// [`namada-masp-indexer`]: -#[cfg(not(target_family = "wasm"))] #[derive(Clone, Debug)] pub struct IndexerMaspClient { client: reqwest::Client, shared: Arc, } -#[cfg(not(target_family = "wasm"))] trait RequestBuilderExt { fn keep_alive(self) -> reqwest::RequestBuilder; } -#[cfg(not(target_family = "wasm"))] impl RequestBuilderExt for reqwest::RequestBuilder { #[inline(always)] fn keep_alive(self) -> reqwest::RequestBuilder { @@ -462,7 +208,6 @@ impl RequestBuilderExt for reqwest::RequestBuilder { } } -#[cfg(not(target_family = "wasm"))] impl IndexerMaspClient { /// Create a new [`IndexerMaspClient`]. #[inline] @@ -551,105 +296,9 @@ impl IndexerMaspClient { } } -#[derive(Copy, Clone)] -#[allow(clippy::enum_variant_names)] -#[cfg(not(target_family = "wasm"))] -enum BlockIndex { - BelowRange { - from: u64, - to: u64, - }, - InRange { - from: u64, - to: u64, - block_index_height: u64, - }, - AboveRange { - from: u64, - to: u64, - }, -} - -#[cfg(not(target_family = "wasm"))] -impl BlockIndex { - /// Get the sub-range or [`from`, `to`] for which a [`BlockIndex`] - /// built at height `block_index_height` is applicable. - fn check_block_index( - block_index_height: u64, - from: u64, - to: u64, - ) -> BlockIndex { - // applicable to whole range - if block_index_height > to { - return BlockIndex::AboveRange { from, to }; - } - // applicable to none of the range - if block_index_height < from { - return BlockIndex::BelowRange { from, to }; - } - // applicable to range [`from`, `block_index_height`] - BlockIndex::InRange { - from, - to, - block_index_height, - } - } - - /// Narrow the requested range to only those blocks - /// containing MASP notes. - fn needs_to_fetch( - self, - block_index: &xorf::BinaryFuse16, - ) -> std::ops::ControlFlow<(), (u64, u64)> { - use std::ops::ControlFlow; - - use xorf::Filter; - - match self { - Self::BelowRange { from, to } => ControlFlow::Continue((from, to)), - Self::InRange { - from, - block_index_height, - to, - } => { - let lowest_height_in_index = (from..=block_index_height) - .find(|height| block_index.contains(height)); - - match lowest_height_in_index { - Some(from_height_in_index) => { - ControlFlow::Continue((from_height_in_index, to)) - } - None if block_index_height == to => ControlFlow::Break(()), - None => ControlFlow::Continue((block_index_height + 1, to)), - } - } - Self::AboveRange { from, to } => { - // drop from the beginning of the range - let lowest_height_in_index = - (from..=to).find(|height| block_index.contains(height)); - - // drop from the end of the range - let maybe_bounds = - lowest_height_in_index.and_then(|lowest_height_in_index| { - let highest_height_in_index = (from..=to) - .rev() - .find(|height| block_index.contains(height))?; - - Some((lowest_height_in_index, highest_height_in_index)) - }); - - if let Some((from, to)) = maybe_bounds { - ControlFlow::Continue((from, to)) - } else { - ControlFlow::Break(()) - } - } - } - } -} - -#[cfg(not(target_family = "wasm"))] impl MaspClient for IndexerMaspClient { + type Error = Error; + async fn last_block_height(&self) -> Result, Error> { use serde::Deserialize; @@ -1046,280 +695,98 @@ impl MaspClient for IndexerMaspClient { ) } } - -/// Given a block height range we wish to request and a cache of fetched block -/// heights, returns the set of sub-ranges we need to request so that all blocks -/// in the inclusive range `[from, to]` get cached. -pub fn blocks_left_to_fetch( - from: BlockHeight, - to: BlockHeight, - fetched: &Fetched, -) -> Vec<[BlockHeight; 2]> { - const ZERO: BlockHeight = BlockHeight(0); - - if from > to { - panic!("Empty range passed to `blocks_left_to_fetch`, [{from}, {to}]"); - } - if from == ZERO || to == ZERO { - panic!("Block height values start at 1"); - } - - let mut to_fetch = Vec::with_capacity((to.0 - from.0 + 1) as usize); - let mut current_from = from; - let mut need_to_fetch = true; - - for height in (from.0..=to.0).map(BlockHeight) { - let height_in_cache = fetched.contains_height(height); - - // cross an upper gap boundary - if need_to_fetch && height_in_cache { - if height > current_from { - to_fetch.push([ - current_from, - height.checked_sub(1).expect("Height is greater than zero"), - ]); - } - need_to_fetch = false; - } else if !need_to_fetch && !height_in_cache { - // cross a lower gap boundary - current_from = height; - need_to_fetch = true; - } - } - if need_to_fetch { - to_fetch.push([current_from, to]); - } - to_fetch +#[derive(Copy, Clone)] +#[allow(clippy::enum_variant_names)] +enum BlockIndex { + BelowRange { + from: u64, + to: u64, + }, + InRange { + from: u64, + to: u64, + block_index_height: u64, + }, + AboveRange { + from: u64, + to: u64, + }, } -#[cfg(test)] -mod test_blocks_left_to_fetch { - use proptest::prelude::*; - - use super::*; - - struct ArbRange { - max_from: u64, - max_len: u64, - } - - impl Default for ArbRange { - fn default() -> Self { - Self { - max_from: u64::MAX, - max_len: 1000, - } +impl BlockIndex { + /// Get the sub-range or [`from`, `to`] for which a [`BlockIndex`] + /// built at height `block_index_height` is applicable. + fn check_block_index( + block_index_height: u64, + from: u64, + to: u64, + ) -> BlockIndex { + // applicable to whole range + if block_index_height > to { + return BlockIndex::AboveRange { from, to }; } - } - - fn fetched_cache_with_blocks( - blocks_in_cache: impl IntoIterator, - ) -> Fetched { - let txs = blocks_in_cache - .into_iter() - .map(|height| { - ( - IndexedTx { - height, - index: TxIndex(0), - }, - vec![], - ) - }) - .collect(); - Fetched { txs } - } - - fn blocks_in_range( - from: BlockHeight, - to: BlockHeight, - ) -> impl Iterator { - (from.0..=to.0).map(BlockHeight) - } - - prop_compose! { - fn arb_block_range(ArbRange { max_from, max_len }: ArbRange) - ( - from in 1u64..=max_from, - ) - ( - from in Just(from), - to in from..from.saturating_add(max_len) - ) - -> (BlockHeight, BlockHeight) - { - (BlockHeight(from), BlockHeight(to)) + // applicable to none of the range + if block_index_height < from { + return BlockIndex::BelowRange { from, to }; } - } - - proptest! { - #[test] - fn test_empty_cache_with_singleton_output((from, to) in arb_block_range(ArbRange::default())) { - let empty_cache = fetched_cache_with_blocks([]); - - let &[[returned_from, returned_to]] = blocks_left_to_fetch( - from, - to, - &empty_cache, - ) - .as_slice() else { - return Err(TestCaseError::Fail("Test failed".into())); - }; - - prop_assert_eq!(returned_from, from); - prop_assert_eq!(returned_to, to); + // applicable to range [`from`, `block_index_height`] + BlockIndex::InRange { + from, + to, + block_index_height, } + } - #[test] - fn test_non_empty_cache_with_empty_output((from, to) in arb_block_range(ArbRange::default())) { - let cache = fetched_cache_with_blocks( - blocks_in_range(from, to) - ); + /// Narrow the requested range to only those blocks + /// containing MASP notes. + fn needs_to_fetch( + self, + block_index: &xorf::BinaryFuse16, + ) -> std::ops::ControlFlow<(), (u64, u64)> { + use std::ops::ControlFlow; - let &[] = blocks_left_to_fetch( - from, - to, - &cache, - ) - .as_slice() else { - return Err(TestCaseError::Fail("Test failed".into())); - }; - } + use xorf::Filter; - #[test] - fn test_non_empty_cache_with_singleton_input_and_maybe_singleton_output( - (from, to) in arb_block_range(ArbRange::default()), - block_height in 1u64..1000, - ) { - test_non_empty_cache_with_singleton_input_and_maybe_singleton_output_inner( + match self { + Self::BelowRange { from, to } => ControlFlow::Continue((from, to)), + Self::InRange { from, + block_index_height, to, - BlockHeight(block_height), - )?; - } - - #[test] - fn test_non_empty_cache_with_singleton_hole_and_singleton_output( - (first_from, first_to) in - arb_block_range(ArbRange { - max_from: 1_000_000, - max_len: 1000, - }), - ) { - // [from, to], [to + 2, 2 * to - from + 2] - - let hole = first_to + 1; - let second_from = BlockHeight(first_to.0 + 2); - let second_to = BlockHeight(2 * first_to.0 - first_from.0 + 2); - - let cache = fetched_cache_with_blocks( - blocks_in_range(first_from, first_to) - .chain(blocks_in_range(second_from, second_to)), - ); - - let &[[returned_from, returned_to]] = blocks_left_to_fetch( - first_from, - second_to, - &cache, - ) - .as_slice() else { - return Err(TestCaseError::Fail("Test failed".into())); - }; - - prop_assert_eq!(returned_from, hole); - prop_assert_eq!(returned_to, hole); - } - } - - fn test_non_empty_cache_with_singleton_input_and_maybe_singleton_output_inner( - from: BlockHeight, - to: BlockHeight, - block_height: BlockHeight, - ) -> Result<(), TestCaseError> { - let cache = fetched_cache_with_blocks(blocks_in_range(from, to)); + } => { + let lowest_height_in_index = (from..=block_index_height) + .find(|height| block_index.contains(height)); - if block_height >= from && block_height <= to { - // random height is inside the range of txs in cache + match lowest_height_in_index { + Some(from_height_in_index) => { + ControlFlow::Continue((from_height_in_index, to)) + } + None if block_index_height == to => ControlFlow::Break(()), + None => ControlFlow::Continue((block_index_height + 1, to)), + } + } + Self::AboveRange { from, to } => { + // drop from the beginning of the range + let lowest_height_in_index = + (from..=to).find(|height| block_index.contains(height)); - let &[] = blocks_left_to_fetch(block_height, block_height, &cache) - .as_slice() - else { - return Err(TestCaseError::Fail("Test failed".into())); - }; - } else { - // random height is outside the range of txs in cache + // drop from the end of the range + let maybe_bounds = + lowest_height_in_index.and_then(|lowest_height_in_index| { + let highest_height_in_index = (from..=to) + .rev() + .find(|height| block_index.contains(height))?; - let &[[returned_from, returned_to]] = - blocks_left_to_fetch(block_height, block_height, &cache) - .as_slice() - else { - return Err(TestCaseError::Fail("Test failed".into())); - }; + Some((lowest_height_in_index, highest_height_in_index)) + }); - prop_assert_eq!(returned_from, block_height); - prop_assert_eq!(returned_to, block_height); + if let Some((from, to)) = maybe_bounds { + ControlFlow::Continue((from, to)) + } else { + ControlFlow::Break(()) + } + } } - - Ok(()) - } - - #[test] - fn test_happy_flow() { - let cache = fetched_cache_with_blocks([ - BlockHeight(1), - BlockHeight(5), - BlockHeight(6), - BlockHeight(8), - BlockHeight(11), - ]); - - let from = BlockHeight(1); - let to = BlockHeight(10); - - let blocks_to_fetch = blocks_left_to_fetch(from, to, &cache); - assert_eq!( - &blocks_to_fetch, - &[ - [BlockHeight(2), BlockHeight(4)], - [BlockHeight(7), BlockHeight(7)], - [BlockHeight(9), BlockHeight(10)], - ], - ); - } - - #[test] - fn test_endpoint_cases() { - let cache = - fetched_cache_with_blocks(blocks_in_range(2.into(), 4.into())); - let blocks_to_fetch = blocks_left_to_fetch(1.into(), 3.into(), &cache); - assert_eq!(&blocks_to_fetch, &[[BlockHeight(1), BlockHeight(1)]]); - - // ------------- - - let cache = - fetched_cache_with_blocks(blocks_in_range(1.into(), 3.into())); - let blocks_to_fetch = blocks_left_to_fetch(2.into(), 4.into(), &cache); - assert_eq!(&blocks_to_fetch, &[[BlockHeight(4), BlockHeight(4)]]); - - // ------------- - - let cache = - fetched_cache_with_blocks(blocks_in_range(2.into(), 4.into())); - let blocks_to_fetch = blocks_left_to_fetch(1.into(), 5.into(), &cache); - assert_eq!( - &blocks_to_fetch, - &[ - [BlockHeight(1), BlockHeight(1)], - [BlockHeight(5), BlockHeight(5)], - ], - ); - - // ------------- - - let cache = - fetched_cache_with_blocks(blocks_in_range(1.into(), 5.into())); - let blocks_to_fetch = blocks_left_to_fetch(2.into(), 4.into(), &cache); - assert!(blocks_to_fetch.is_empty()); } } @@ -1329,7 +796,7 @@ mod test_block_index { use proptest::proptest; - use crate::masp::utils::BlockIndex; + use super::BlockIndex; /// An arbitrary filter fn block_filter() -> xorf::BinaryFuse16 { diff --git a/crates/sdk/src/queries/mod.rs b/crates/sdk/src/queries/mod.rs index f7782570ac..202a99280c 100644 --- a/crates/sdk/src/queries/mod.rs +++ b/crates/sdk/src/queries/mod.rs @@ -2,7 +2,6 @@ //! defined via `router!` macro. // Re-export to show in rustdoc! -use namada_core::chain::BlockHeight; use namada_state::{DBIter, StorageHasher, DB}; pub use shell::Shell; use shell::SHELL; @@ -16,7 +15,6 @@ pub use self::shell::eth_bridge::{ Erc20FlowControl, GenBridgePoolProofReq, GenBridgePoolProofRsp, TransferToErcArgs, TransferToEthereumStatus, }; -use crate::MaybeSend; #[macro_use] mod router; @@ -24,8 +22,6 @@ mod shell; mod types; pub mod vp; -const HEIGHT_CAST_ERR: &str = "Failed to cast block height"; - // Most commonly expected patterns should be declared first router! {RPC, // Shell provides storage read access, block metadata and can dry-run a tx @@ -99,6 +95,8 @@ pub fn require_no_data(request: &RequestQuery) -> namada_storage::Result<()> { #[cfg(any(test, feature = "testing"))] pub(crate) mod testing { use borsh_ext::BorshSerializeExt; + use namada_core::chain::BlockHeight; + use namada_io::client::Client; use namada_state::testing::TestState; use tendermint_rpc::Response; @@ -195,292 +193,3 @@ pub(crate) mod testing { } } } - -use std::fmt::{Debug, Display}; - -use tendermint_rpc::endpoint::{ - abci_info, block, block_results, blockchain, commit, consensus_params, - consensus_state, health, net_info, status, -}; -use tendermint_rpc::query::Query; -use tendermint_rpc::{Error as RpcError, Order}; - -use crate::tendermint::abci::response::Info; -use crate::tendermint::block::Height; - -/// A client with async request dispatcher method, which can be used to invoke -/// type-safe methods from a root [`Router`], generated -/// via `router!` macro. -#[cfg_attr(feature = "async-send", async_trait::async_trait)] -#[cfg_attr(not(feature = "async-send"), async_trait::async_trait(?Send))] -pub trait Client { - /// `std::io::Error` can happen in decoding with - /// `BorshDeserialize::try_from_slice` - type Error: From + Display + Debug; - - /// Send a simple query request at the given path. For more options, use the - /// `request` method. - async fn simple_request( - &self, - path: String, - ) -> Result, Self::Error> { - self.request(path, None, None, false) - .await - .map(|response| response.data) - } - - /// Send a query request at the given path. - async fn request( - &self, - path: String, - data: Option>, - height: Option, - prove: bool, - ) -> Result; - - /// `/abci_info`: get information about the ABCI application. - async fn abci_info(&self) -> Result { - Ok(self.perform(abci_info::Request).await?.response) - } - - /// `/broadcast_tx_sync`: broadcast a transaction, returning the response - /// from `CheckTx`. - async fn broadcast_tx_sync( - &self, - tx: impl Into> + MaybeSend, - ) -> Result - { - self.perform( - tendermint_rpc::endpoint::broadcast::tx_sync::Request::new(tx), - ) - .await - } - - /// `/block`: get the latest block. - async fn latest_block(&self) -> Result { - self.perform(block::Request::default()).await - } - - /// `/block`: get block at a given height. - async fn block(&self, height: H) -> Result - where - H: TryInto + Send, - { - self.perform(block::Request::new( - height - .try_into() - .map_err(|_| RpcError::parse(HEIGHT_CAST_ERR.to_string()))?, - )) - .await - } - - /// `/block_search`: search for blocks by BeginBlock and EndBlock events. - async fn block_search( - &self, - query: Query, - page: u32, - per_page: u8, - order: Order, - ) -> Result - { - self.perform(tendermint_rpc::endpoint::block_search::Request::new( - query, page, per_page, order, - )) - .await - } - - /// `/block_results`: get ABCI results for a block at a particular height. - async fn block_results( - &self, - height: H, - ) -> Result - where - H: TryInto + Send, - { - self.perform(tendermint_rpc::endpoint::block_results::Request::new( - height - .try_into() - .map_err(|_| RpcError::parse(HEIGHT_CAST_ERR.to_string()))?, - )) - .await - } - - /// `/tx_search`: search for transactions with their results. - async fn tx_search( - &self, - query: Query, - prove: bool, - page: u32, - per_page: u8, - order: Order, - ) -> Result { - self.perform(tendermint_rpc::endpoint::tx_search::Request::new( - query, prove, page, per_page, order, - )) - .await - } - - /// `/abci_query`: query the ABCI application - async fn abci_query( - &self, - path: Option, - data: V, - height: Option, - prove: bool, - ) -> Result - where - V: Into> + Send, - { - Ok(self - .perform(tendermint_rpc::endpoint::abci_query::Request::new( - path, data, height, prove, - )) - .await? - .response) - } - - /// `/block_results`: get ABCI results for the latest block. - async fn latest_block_results( - &self, - ) -> Result { - self.perform(block_results::Request::default()).await - } - - /// `/blockchain`: get block headers for `min` <= `height` <= `max`. - /// - /// Block headers are returned in descending order (highest first). - /// - /// Returns at most 20 items. - async fn blockchain( - &self, - min: H, - max: H, - ) -> Result - where - H: TryInto + Send, - { - self.perform(blockchain::Request::new( - min.try_into() - .map_err(|_| RpcError::parse(HEIGHT_CAST_ERR.to_string()))?, - max.try_into() - .map_err(|_| RpcError::parse(HEIGHT_CAST_ERR.to_string()))?, - )) - .await - } - - /// `/commit`: get block commit at a given height. - async fn commit(&self, height: H) -> Result - where - H: TryInto + Send, - { - self.perform(commit::Request::new( - height - .try_into() - .map_err(|_| RpcError::parse(HEIGHT_CAST_ERR.to_string()))?, - )) - .await - } - - /// `/consensus_params`: get current consensus parameters at the specified - /// height. - async fn consensus_params( - &self, - height: H, - ) -> Result - where - H: TryInto + Send, - { - self.perform(consensus_params::Request::new(Some( - height - .try_into() - .map_err(|_| RpcError::parse(HEIGHT_CAST_ERR.to_string()))?, - ))) - .await - } - - /// `/consensus_state`: get current consensus state - async fn consensus_state( - &self, - ) -> Result { - self.perform(consensus_state::Request::new()).await - } - - /// `/consensus_params`: get the latest consensus parameters. - async fn latest_consensus_params( - &self, - ) -> Result { - self.perform(consensus_params::Request::new(None)).await - } - - /// `/commit`: get the latest block commit - async fn latest_commit(&self) -> Result { - self.perform(commit::Request::default()).await - } - - /// `/health`: get node health. - /// - /// Returns empty result (200 OK) on success, no response in case of an - /// error. - async fn health(&self) -> Result<(), RpcError> { - self.perform(health::Request).await?; - Ok(()) - } - - /// `/net_info`: obtain information about P2P and other network connections. - async fn net_info(&self) -> Result { - self.perform(net_info::Request).await - } - - /// `/status`: get Tendermint status including node info, pubkey, latest - /// block hash, app hash, block height and time. - async fn status(&self) -> Result { - self.perform(status::Request).await - } - - /// Perform a request against the RPC endpoint - async fn perform(&self, request: R) -> Result - where - R: tendermint_rpc::SimpleRequest; -} - -#[cfg_attr(feature = "async-send", async_trait::async_trait)] -#[cfg_attr(not(feature = "async-send"), async_trait::async_trait(?Send))] -impl Client for C { - type Error = Error; - - async fn request( - &self, - path: String, - data: Option>, - height: Option, - prove: bool, - ) -> Result { - use crate::tendermint::abci::Code; - - let data = data.unwrap_or_default(); - let height = height - .map(|height| { - crate::tendermint::block::Height::try_from(height.0) - .map_err(|_err| Error::InvalidHeight(height)) - }) - .transpose()?; - - let response = self.abci_query(Some(path), data, height, prove).await?; - match response.code { - Code::Ok => Ok(EncodedResponseQuery { - data: response.value, - info: response.info, - proof: response.proof, - height: response.height.value().into(), - }), - Code::Err(code) => Err(Error::Query(response.info, code.into())), - } - } - - async fn perform(&self, request: R) -> Result - where - R: tendermint_rpc::SimpleRequest, - { - tendermint_rpc::client::Client::perform(self, request).await - } -} diff --git a/crates/sdk/src/queries/router.rs b/crates/sdk/src/queries/router.rs index bb09b6ec2f..1e8f8ea43e 100644 --- a/crates/sdk/src/queries/router.rs +++ b/crates/sdk/src/queries/router.rs @@ -410,9 +410,9 @@ macro_rules! pattern_and_handler_to_method { ) -> std::result::Result< $crate::queries::ResponseQuery>, - ::Error + ::Error > - where CLIENT: $crate::queries::Client + std::marker::Sync { + where CLIENT: namada_io::Client + std::marker::Sync { let path = self.storage_value_path( $( $param ),* ); let $crate::queries::ResponseQuery { @@ -462,9 +462,9 @@ macro_rules! pattern_and_handler_to_method { ) -> std::result::Result< $crate::queries::ResponseQuery<$return_type>, - ::Error + ::Error > - where CLIENT: $crate::queries::Client + std::marker::Sync { + where CLIENT: namada_io::Client + std::marker::Sync { let path = self.[<$handle _path>]( $( $param ),* ); let $crate::queries::ResponseQuery { @@ -513,9 +513,9 @@ macro_rules! pattern_and_handler_to_method { ) -> std::result::Result< $return_type, - ::Error + ::Error > - where CLIENT: $crate::queries::Client + std::marker::Sync { + where CLIENT: namada_io::Client + std::marker::Sync { let path = self.[<$handle _path>]( $( $param ),* ); let data = client.simple_request(path).await?; diff --git a/crates/sdk/src/queries/shell.rs b/crates/sdk/src/queries/shell.rs index 52132f703c..66b55c551a 100644 --- a/crates/sdk/src/queries/shell.rs +++ b/crates/sdk/src/queries/shell.rs @@ -22,6 +22,7 @@ use namada_core::uint::Uint; use namada_ibc::event::IbcEventType; use namada_state::{DBIter, LastBlock, StateRead, StorageHasher, DB}; use namada_storage::{ResultExt, StorageRead}; +use namada_token::masp::MaspTokenRewardData; use namada_token::storage_key::masp_token_map_key; use namada_tx::data::DryRunResult; @@ -31,7 +32,6 @@ use crate::events::Event; use crate::ibc::core::host::types::identifiers::{ ChannelId, ClientId, PortId, Sequence, }; -use crate::masp::MaspTokenRewardData; use crate::queries::types::{RequestCtx, RequestQuery}; use crate::queries::{require_latest_height, EncodedResponseQuery}; use crate::tendermint::merkle::proof::ProofOps; diff --git a/crates/sdk/src/queries/types.rs b/crates/sdk/src/queries/types.rs index f5aeb2c298..6cdb533cb6 100644 --- a/crates/sdk/src/queries/types.rs +++ b/crates/sdk/src/queries/types.rs @@ -1,12 +1,10 @@ use std::fmt::Debug; -use namada_core::chain::BlockHeight; +pub use namada_io::client::{EncodedResponseQuery, Error, ResponseQuery}; use namada_state::{DBIter, StorageHasher, WlState, DB}; -use thiserror::Error; use crate::events::log::EventLog; pub use crate::tendermint::abci::request::Query as RequestQuery; -use crate::tendermint::merkle::proof::ProofOps; /// A request context provides read-only access to storage and WASM compilation /// caches to request handlers. #[derive(Debug, Clone)] @@ -24,7 +22,7 @@ where /// Cache of transaction wasm compiled artifacts. pub tx_wasm_cache: TxCache, /// Taken from config `storage_read_past_height_limit`. When set, will - /// limit the how many block heights in the past can the storage be + /// limit how many block heights in the past can the storage be /// queried for reading values. pub storage_read_past_height_limit: Option, } @@ -67,32 +65,3 @@ pub trait Router { D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync; } - -#[allow(missing_docs)] -#[derive(Error, Debug)] -pub enum Error { - #[error("{0}")] - Tendermint(#[from] tendermint_rpc::Error), - #[error("Decoding error: {0}")] - Decoding(#[from] std::io::Error), - #[error("Info log: {0}, error code: {1}")] - Query(String, u32), - #[error("Invalid block height: {0} (overflown i64)")] - InvalidHeight(BlockHeight), -} - -/// Generic response from a query -#[derive(Clone, Debug, Default)] -pub struct ResponseQuery { - /// Response data to be borsh encoded - pub data: T, - /// Non-deterministic log of the request execution - pub info: String, - /// Optional proof - used for storage value reads which request `prove` - pub proof: Option, - /// Block height from which data was derived - pub height: BlockHeight, -} - -/// [`ResponseQuery`] with borsh-encoded `data` field -pub type EncodedResponseQuery = ResponseQuery>; diff --git a/crates/sdk/src/queries/vp/pos.rs b/crates/sdk/src/queries/vp/pos.rs index 0c2568f973..946e0c2197 100644 --- a/crates/sdk/src/queries/vp/pos.rs +++ b/crates/sdk/src/queries/vp/pos.rs @@ -708,8 +708,10 @@ where /// Client-only methods for the router type are composed from router functions. pub mod client_only_methods { + use namada_io::Client; + use super::*; - use crate::queries::{Client, RPC}; + use crate::queries::RPC; impl Pos { /// Get bonds and unbonds with all details (slashes and rewards, if any) diff --git a/crates/sdk/src/queries/vp/token.rs b/crates/sdk/src/queries/vp/token.rs index 3904f1cd85..a8af1dc850 100644 --- a/crates/sdk/src/queries/vp/token.rs +++ b/crates/sdk/src/queries/vp/token.rs @@ -55,10 +55,11 @@ pub mod client_only_methods { use borsh::BorshDeserialize; use namada_core::address::Address; use namada_core::token; + use namada_io::Client; use namada_token::storage_key::{balance_key, masp_total_rewards}; use super::Token; - use crate::queries::{Client, RPC}; + use crate::queries::RPC; impl Token { /// Get the balance of the given `token` belonging to the given `owner`. diff --git a/crates/sdk/src/rpc.rs b/crates/sdk/src/rpc.rs index 96e82e7686..5a3e19971f 100644 --- a/crates/sdk/src/rpc.rs +++ b/crates/sdk/src/rpc.rs @@ -35,12 +35,14 @@ use namada_governance::utils::{ use namada_ibc::storage::{ ibc_trace_key, ibc_trace_key_prefix, is_ibc_trace_key, }; +use namada_io::{display_line, edisplay_line, Client, Io}; use namada_parameters::{storage as params_storage, EpochDuration}; use namada_proof_of_stake::parameters::PosParams; use namada_proof_of_stake::types::{ BondsAndUnbondsDetails, CommissionPair, ValidatorMetaData, }; use namada_state::LastBlock; +use namada_token::masp::MaspTokenRewardData; use namada_tx::data::{BatchedTxResult, DryRunResult, ResultCode, TxResult}; use namada_tx::event::{Batch as BatchAttr, Code as CodeAttr}; use serde::Serialize; @@ -50,23 +52,21 @@ use crate::control_flow::time; use crate::error::{EncodingError, Error, QueryError, TxSubmitError}; use crate::events::{extend, Event}; use crate::internal_macros::echo_error; -use crate::io::Io; -use crate::masp::MaspTokenRewardData; use crate::queries::vp::pos::{ EnrichedBondsAndUnbondsDetails, ValidatorStateInfo, }; -use crate::queries::{Client, RPC}; +use crate::queries::RPC; use crate::tendermint::block::Height; use crate::tendermint::merkle::proof::ProofOps; use crate::tendermint_rpc::query::Query; -use crate::{display_line, edisplay_line, error, Namada, Tx}; +use crate::{error, Namada, Tx}; /// Query an estimate of the maximum block time. -pub async fn query_max_block_time_estimate( - context: &impl Namada, +pub async fn query_max_block_time_estimate( + client: &C, ) -> Result { RPC.shell() - .max_block_time(context.client()) + .max_block_time(client) .await .map_err(|err| Error::from(QueryError::NoResponse(err.to_string()))) } @@ -80,8 +80,8 @@ pub async fn query_tx_status2( deadline: time::Instant, ) -> Result where - C: crate::queries::Client + Sync, - IO: crate::io::Io + crate::MaybeSend + crate::MaybeSync, + C: namada_io::Client + Sync, + IO: Io + crate::MaybeSend + crate::MaybeSync, { time::Sleep { strategy: time::LinearBackoff { @@ -141,21 +141,21 @@ pub async fn query_tx_status( } /// Query the epoch of the last committed block -pub async fn query_epoch( +pub async fn query_epoch( client: &C, ) -> Result { convert_response::(RPC.shell().epoch(client).await) } /// Query the masp epoch of the last committed block -pub async fn query_masp_epoch( +pub async fn query_masp_epoch( client: &C, ) -> Result { convert_response::(RPC.shell().masp_epoch(client).await) } /// Query the address of the native token -pub async fn query_native_token( +pub async fn query_native_token( client: &C, ) -> Result { convert_response::(RPC.shell().native_token(client).await) @@ -164,7 +164,7 @@ pub async fn query_native_token( /// Query the epoch of the given block height, if it exists. /// Will return none if the input block height is greater than /// the latest committed block height. -pub async fn query_epoch_at_height( +pub async fn query_epoch_at_height( client: &C, height: BlockHeight, ) -> Result, error::Error> { @@ -172,7 +172,7 @@ pub async fn query_epoch_at_height( } /// Query the last committed block, if any. -pub async fn query_block( +pub async fn query_block( client: &C, ) -> Result, error::Error> { // NOTE: We're not using `client.latest_block()` because it may return an @@ -181,7 +181,7 @@ pub async fn query_block( } /// A helper to unwrap client's response. Will shut down process on error. -fn unwrap_client_response( +fn unwrap_client_response( response: Result, ) -> T { response.unwrap_or_else(|err| { @@ -192,21 +192,21 @@ fn unwrap_client_response( /// A helper to turn client's response into an error type that can be used with /// ? The exact error type is a `QueryError::NoResponse`, and thus should be /// seen as getting no response back from a query. -fn convert_response( +fn convert_response( response: Result, ) -> Result { response.map_err(|err| Error::from(QueryError::NoResponse(err.to_string()))) } /// Query the results of the last committed block -pub async fn query_results( +pub async fn query_results( client: &C, ) -> Result, Error> { convert_response::(RPC.shell().read_results(client).await) } /// Query token amount of owner. -pub async fn get_token_balance( +pub async fn get_token_balance( client: &C, token: &Address, owner: &Address, @@ -217,7 +217,7 @@ pub async fn get_token_balance( } /// Query token total supply. -pub async fn get_token_total_supply( +pub async fn get_token_total_supply( client: &C, token: &Address, ) -> Result { @@ -225,7 +225,7 @@ pub async fn get_token_total_supply( } /// Query the effective total supply of the native token -pub async fn get_effective_native_supply( +pub async fn get_effective_native_supply( client: &C, ) -> Result { convert_response::( @@ -234,7 +234,7 @@ pub async fn get_effective_native_supply( } /// Check if the given address is a known validator. -pub async fn is_validator( +pub async fn is_validator( client: &C, address: &Address, ) -> Result { @@ -242,7 +242,7 @@ pub async fn is_validator( } /// Check if the given address is a pgf steward. -pub async fn is_steward( +pub async fn is_steward( client: &C, address: &Address, ) -> bool { @@ -252,7 +252,7 @@ pub async fn is_steward( } /// Check if a given address is a known delegator -pub async fn is_delegator( +pub async fn is_delegator( client: &C, address: &Address, ) -> Result { @@ -262,7 +262,7 @@ pub async fn is_delegator( } /// Check if a given address is a known delegator at the given epoch -pub async fn is_delegator_at( +pub async fn is_delegator_at( client: &C, address: &Address, epoch: Epoch, @@ -276,7 +276,7 @@ pub async fn is_delegator_at( } /// Find if the given source address has any bonds. -pub async fn has_bonds( +pub async fn has_bonds( client: &C, source: &Address, ) -> Result { @@ -284,7 +284,7 @@ pub async fn has_bonds( } /// Get the set of pgf stewards -pub async fn query_pgf_stewards( +pub async fn query_pgf_stewards( client: &C, ) -> Result, error::Error> { convert_response::>( @@ -293,9 +293,7 @@ pub async fn query_pgf_stewards( } /// Query the consensus key by validator address -pub async fn query_validator_consensus_keys< - C: crate::queries::Client + Sync, ->( +pub async fn query_validator_consensus_keys( client: &C, address: &Address, ) -> Result, error::Error> { @@ -305,7 +303,7 @@ pub async fn query_validator_consensus_keys< } /// Get the set of consensus keys registered in the network -pub async fn get_consensus_keys( +pub async fn get_consensus_keys( client: &C, ) -> Result, error::Error> { convert_response::>( @@ -316,7 +314,7 @@ pub async fn get_consensus_keys( /// Check if the address exists on chain. Established address exists if it has a /// stored validity predicate. Implicit and internal addresses always return /// true. -pub async fn known_address( +pub async fn known_address( client: &C, address: &Address, ) -> Result { @@ -331,7 +329,7 @@ pub async fn known_address( } /// Query a conversion. -pub async fn query_conversion( +pub async fn query_conversion( client: &C, asset_type: AssetType, ) -> Option<( @@ -348,7 +346,7 @@ pub async fn query_conversion( } /// Query conversions -pub async fn query_conversions( +pub async fn query_conversions( client: &C, ) -> Result< BTreeMap< @@ -367,14 +365,14 @@ pub async fn query_conversions( } /// Query the total rewards minted by MASP -pub async fn query_masp_total_rewards( +pub async fn query_masp_total_rewards( client: &C, ) -> Result { convert_response::(RPC.vp().token().masp_total_rewards(client).await) } /// Query to read the tokens that earn masp rewards. -pub async fn query_masp_reward_tokens( +pub async fn query_masp_reward_tokens( client: &C, ) -> Result, Error> { convert_response::(RPC.shell().masp_reward_tokens(client).await) @@ -412,7 +410,7 @@ pub async fn query_storage_value( ) -> Result where T: BorshDeserialize, - C: crate::queries::Client + Sync, + C: namada_io::Client + Sync, { // In case `T` is a unit (only thing that encodes to 0 bytes), we have to // use `storage_has_key` instead of `storage_value`, because `storage_value` @@ -441,7 +439,7 @@ where } /// Query a storage value and the proof without decoding. -pub async fn query_storage_value_bytes( +pub async fn query_storage_value_bytes( client: &C, key: &storage::Key, height: Option, @@ -498,7 +496,7 @@ where } /// Query to check if the given storage key exists. -pub async fn query_has_storage_key( +pub async fn query_has_storage_key( client: &C, key: &storage::Key, ) -> Result { @@ -541,10 +539,10 @@ impl<'a> From> for Query { /// Call the corresponding `tx_event_query` RPC method, to fetch /// the current status of a transaction. -pub async fn query_tx_events( +pub async fn query_tx_events( client: &C, tx_event_query: TxEventQuery<'_>, -) -> std::result::Result, ::Error> { +) -> std::result::Result, ::Error> { let tx_hash: Hash = tx_event_query.tx_hash().try_into().unwrap(); match tx_event_query { TxEventQuery::Applied(_) => RPC.shell().applied(client, &tx_hash).await, /* .wrap_err_with(|| { @@ -723,14 +721,14 @@ impl TxResponse { } /// Get the PoS parameters -pub async fn get_pos_params( +pub async fn get_pos_params( client: &C, ) -> Result { convert_response::(RPC.vp().pos().pos_params(client).await) } /// Get all validators in the given epoch -pub async fn get_all_validators( +pub async fn get_all_validators( client: &C, epoch: Epoch, ) -> Result, error::Error> { @@ -743,7 +741,7 @@ pub async fn get_all_validators( } /// Get the total staked tokens in the given epoch -pub async fn get_total_staked_tokens( +pub async fn get_total_staked_tokens( client: &C, epoch: Epoch, ) -> Result { @@ -753,7 +751,7 @@ pub async fn get_total_staked_tokens( } /// Get the total active voting power in the given epoch -pub async fn get_total_active_voting_power( +pub async fn get_total_active_voting_power( client: &C, epoch: Epoch, ) -> Result { @@ -766,7 +764,7 @@ pub async fn get_total_active_voting_power( } /// Get the given validator's stake at the given epoch -pub async fn get_validator_stake( +pub async fn get_validator_stake( client: &C, epoch: Epoch, validator: &Address, @@ -781,7 +779,7 @@ pub async fn get_validator_stake( } /// Query and return a validator's state -pub async fn get_validator_state( +pub async fn get_validator_state( client: &C, validator: &Address, epoch: Option, @@ -795,7 +793,7 @@ pub async fn get_validator_state( } /// Get the validators to which a delegator is bonded at a certain epoch -pub async fn get_delegation_validators( +pub async fn get_delegation_validators( client: &C, address: &Address, epoch: Epoch, @@ -810,9 +808,7 @@ pub async fn get_delegation_validators( /// Get the delegations of a delegator at some epoch, including the validator /// and bond amount -pub async fn get_delegations_of_delegator_at< - C: crate::queries::Client + Sync, ->( +pub async fn get_delegations_of_delegator_at( client: &C, address: &Address, epoch: Epoch, @@ -826,7 +822,7 @@ pub async fn get_delegations_of_delegator_at< } /// Query proposal by Id -pub async fn query_proposal_by_id( +pub async fn query_proposal_by_id( client: &C, proposal_id: u64, ) -> Result, Error> { @@ -837,7 +833,7 @@ pub async fn query_proposal_by_id( /// Query and return validator's commission rate and max commission rate change /// per epoch -pub async fn query_commission_rate( +pub async fn query_commission_rate( client: &C, validator: &Address, epoch: Option, @@ -852,7 +848,7 @@ pub async fn query_commission_rate( /// Query and return validator's metadata, including the commission rate and max /// commission rate change -pub async fn query_metadata( +pub async fn query_metadata( client: &C, validator: &Address, epoch: Option, @@ -871,7 +867,7 @@ pub async fn query_metadata( /// Query and return the incoming redelegation epoch for a given pair of source /// validator and delegator, if there is any. -pub async fn query_incoming_redelegations( +pub async fn query_incoming_redelegations( client: &C, src_validator: &Address, delegator: &Address, @@ -885,7 +881,7 @@ pub async fn query_incoming_redelegations( } /// Query a validator's bonds for a given epoch -pub async fn query_bond( +pub async fn query_bond( client: &C, source: &Address, validator: &Address, @@ -897,7 +893,7 @@ pub async fn query_bond( } /// Query a validator's bonds for a given epoch -pub async fn query_last_infraction_epoch( +pub async fn query_last_infraction_epoch( client: &C, validator: &Address, ) -> Result, error::Error> { @@ -910,7 +906,7 @@ pub async fn query_last_infraction_epoch( } /// Query the accunt substorage space of an address -pub async fn get_account_info( +pub async fn get_account_info( client: &C, owner: &Address, ) -> Result, error::Error> { @@ -920,7 +916,7 @@ pub async fn get_account_info( } /// Query if the public_key is revealed -pub async fn is_public_key_revealed( +pub async fn is_public_key_revealed( client: &C, owner: &Address, ) -> Result { @@ -928,7 +924,7 @@ pub async fn is_public_key_revealed( } /// Query an account substorage at a specific index -pub async fn get_public_key_at( +pub async fn get_public_key_at( client: &C, owner: &Address, index: u8, @@ -944,7 +940,7 @@ pub async fn get_public_key_at( } /// Query the proposal result -pub async fn query_proposal_result( +pub async fn query_proposal_result( client: &C, proposal_id: u64, ) -> Result, Error> { @@ -1075,7 +1071,7 @@ pub async fn query_and_print_unbonds( } /// Query withdrawable tokens in a validator account for a given epoch -pub async fn query_withdrawable_tokens( +pub async fn query_withdrawable_tokens( client: &C, bond_source: &Address, validator: &Address, @@ -1090,7 +1086,7 @@ pub async fn query_withdrawable_tokens( } /// Query all unbonds for a validator, applying slashes -pub async fn query_unbond_with_slashing( +pub async fn query_unbond_with_slashing( client: &C, source: &Address, validator: &Address, @@ -1104,21 +1100,21 @@ pub async fn query_unbond_with_slashing( } /// Get the governance parameters -pub async fn query_governance_parameters( +pub async fn query_governance_parameters( client: &C, ) -> GovernanceParameters { unwrap_client_response::(RPC.vp().gov().parameters(client).await) } /// Get the public good fundings parameters -pub async fn query_pgf_parameters( +pub async fn query_pgf_parameters( client: &C, ) -> PgfParameters { unwrap_client_response::(RPC.vp().pgf().parameters(client).await) } /// Get all the votes of a proposal -pub async fn query_proposal_votes( +pub async fn query_proposal_votes( client: &C, proposal_id: u64, ) -> Result, error::Error> { @@ -1128,7 +1124,7 @@ pub async fn query_proposal_votes( } /// Query the information to estimate next epoch start -pub async fn query_next_epoch_info( +pub async fn query_next_epoch_info( client: &C, ) -> Result<(BlockHeight, EpochDuration), error::Error> { let this_epoch_first_height = convert_response::( @@ -1145,7 +1141,7 @@ pub async fn query_next_epoch_info( } /// Get the bond amount at the given epoch -pub async fn get_bond_amount_at( +pub async fn get_bond_amount_at( client: &C, delegator: &Address, validator: &Address, @@ -1162,7 +1158,7 @@ pub async fn get_bond_amount_at( /// Get bonds and unbonds with all details (slashes and rewards, if any) /// grouped by their bond IDs. -pub async fn bonds_and_unbonds( +pub async fn bonds_and_unbonds( client: &C, source: &Option
, validator: &Option
, @@ -1178,7 +1174,7 @@ pub async fn bonds_and_unbonds( /// Get bonds and unbonds with all details (slashes and rewards, if any) /// grouped by their bond IDs, enriched with extra information calculated from /// the data. -pub async fn enriched_bonds_and_unbonds( +pub async fn enriched_bonds_and_unbonds( client: &C, current_epoch: Epoch, source: &Option
, @@ -1198,7 +1194,7 @@ pub async fn enriched_bonds_and_unbonds( } /// Query the denomination of the given token -pub async fn query_denom( +pub async fn query_denom( client: &C, token: &Address, ) -> Option { @@ -1270,7 +1266,7 @@ pub async fn validate_amount( /// Wait for a first block and node to be synced. pub async fn wait_until_node_is_synched( - client: &(impl Client + Sync), + client: &(impl namada_io::Client + Sync), io: &impl Io, ) -> Result<(), Error> { let height_one = Height::try_from(1_u64).unwrap(); @@ -1328,7 +1324,7 @@ pub async fn wait_until_node_is_synched( /// Look up the denomination of a token in order to make a correctly denominated /// amount. -pub async fn denominate_amount( +pub async fn denominate_amount( client: &C, io: &impl Io, token: &Address, @@ -1355,7 +1351,7 @@ pub async fn denominate_amount( /// Look up the denomination of a token in order to format it /// correctly as a string. pub async fn format_denominated_amount( - client: &(impl Client + Sync), + client: &(impl namada_io::Client + Sync), io: &impl Io, token: &Address, amount: token::Amount, diff --git a/crates/sdk/src/signing.rs b/crates/sdk/src/signing.rs index 01b9f819e3..1eab0842de 100644 --- a/crates/sdk/src/signing.rs +++ b/crates/sdk/src/signing.rs @@ -24,6 +24,7 @@ use namada_governance::storage::proposal::{ }; use namada_governance::storage::vote::ProposalVote; use namada_ibc::{MsgNftTransfer, MsgTransfer}; +use namada_io::*; use namada_parameters::storage as parameter_storage; use namada_token as token; use namada_token::storage_key::balance_key; @@ -39,7 +40,6 @@ use crate::args::SdkTypes; use crate::error::{EncodingError, Error, TxSubmitError}; use crate::eth_bridge_pool::PendingTransfer; use crate::governance::storage::proposal::{AddRemove, PGFAction, PGFTarget}; -use crate::io::*; use crate::rpc::validate_amount; use crate::token::Account; use crate::tx::{ @@ -55,7 +55,7 @@ use crate::tx::{ }; pub use crate::wallet::store::AddressVpType; use crate::wallet::{Wallet, WalletIo}; -use crate::{args, display_line, rpc, MaybeSend, Namada}; +use crate::{args, rpc, Namada}; /// A structure holding the signing data to craft a transaction #[derive(Clone, PartialEq)] diff --git a/crates/sdk/src/tx.rs b/crates/sdk/src/tx.rs index 75430bbfbd..ce5758d959 100644 --- a/crates/sdk/src/tx.rs +++ b/crates/sdk/src/tx.rs @@ -52,11 +52,17 @@ use namada_governance::storage::vote::ProposalVote; use namada_ibc::storage::channel_key; use namada_ibc::trace::is_nft_trace; use namada_ibc::{MsgNftTransfer, MsgTransfer}; +use namada_io::{display_line, edisplay_line, Client, Io}; use namada_proof_of_stake::parameters::{ PosParams, MAX_VALIDATOR_METADATA_LEN, }; use namada_proof_of_stake::types::{CommissionPair, ValidatorState}; use namada_token as token; +use namada_token::masp::shielded_wallet::ShieldedApi; +use namada_token::masp::TransferErr::Build; +use namada_token::masp::{ + MaspDataLog, MaspFeeData, MaspTransferData, ShieldedTransfer, +}; use namada_token::storage_key::balance_key; use namada_token::DenominatedAmount; use namada_tx::data::pgf::UpdateStewardCommission; @@ -74,13 +80,6 @@ use crate::args::{ }; use crate::control_flow::time; use crate::error::{EncodingError, Error, QueryError, Result, TxSubmitError}; -use crate::io::Io; -use crate::masp::TransferErr::Build; -use crate::masp::{ - MaspDataLog, MaspFeeData, MaspTransferData, ShieldedContext, - ShieldedTransfer, -}; -use crate::queries::Client; use crate::rpc::{ self, get_validator_stake, query_wasm_code_hash, validate_amount, InnerTxResult, TxBroadcastData, TxResponse, @@ -91,7 +90,7 @@ use crate::signing::{ use crate::tendermint_rpc::endpoint::broadcast::tx_sync::Response; use crate::tendermint_rpc::error::Error as RpcError; use crate::wallet::WalletIo; -use crate::{args, display_line, edisplay_line, Namada}; +use crate::{args, Namada}; /// Initialize account transaction WASM pub const TX_INIT_ACCOUNT_WASM: &str = "tx_init_account.wasm"; @@ -286,7 +285,7 @@ pub async fn process_tx( } /// Check if a reveal public key transaction is needed -pub async fn is_reveal_pk_needed( +pub async fn is_reveal_pk_needed( client: &C, address: &Address, ) -> Result { @@ -295,7 +294,7 @@ pub async fn is_reveal_pk_needed( } /// Check if the public key for the given address has been revealed -pub async fn has_revealed_pk( +pub async fn has_revealed_pk( client: &C, address: &Address, ) -> Result { @@ -3414,16 +3413,20 @@ async fn construct_shielded_parts( // Precompute asset types to increase chances of success in decoding let token_map = context.wallet().await.get_addresses(); let tokens = token_map.values().collect(); - let _ = context - .shielded_mut() - .await - .precompute_asset_types(context.client(), tokens) - .await; - let stx_result = - ShieldedContext::::gen_shielded_transfer( - context, data, fee_data, update_ctx, - ) - .await; + + let stx_result = { + let expiration = context.tx_builder().expiration.to_datetime(); + let mut shielded = context.shielded_mut().await; + _ = shielded + .precompute_asset_types(context.client(), tokens) + .await; + + shielded + .gen_shielded_transfer( + context, data, fee_data, expiration, update_ctx, + ) + .await + }; let shielded_parts = match stx_result { Ok(Some(stx)) => stx, @@ -3771,16 +3774,21 @@ pub async fn gen_ibc_shielding_transfer( token: token.clone(), amount: validated_amount, }; - let shielded_transfer = - ShieldedContext::::gen_shielded_transfer( - context, - vec![masp_transfer_data], - // Fees are paid from the transparent balance of the relayer - None, - true, - ) - .await - .map_err(|err| TxSubmitError::MaspError(err.to_string()))?; + let shielded_transfer = { + let expiration = context.tx_builder().expiration.to_datetime(); + let mut shielded = context.shielded_mut().await; + shielded + .gen_shielded_transfer( + context, + vec![masp_transfer_data], + // Fees are paid from the transparent balance of the relayer + None, + expiration, + true, + ) + .await + .map_err(|err| TxSubmitError::MaspError(err.to_string()))? + }; Ok(shielded_transfer.map(|st| st.masp_tx)) } diff --git a/crates/shielded_token/Cargo.toml b/crates/shielded_token/Cargo.toml index b60853ea2f..5b62b4a034 100644 --- a/crates/shielded_token/Cargo.toml +++ b/crates/shielded_token/Cargo.toml @@ -14,36 +14,67 @@ version.workspace = true [features] default = [] +std = ["download-params", "multicore"] +async-send = ["namada_io/async-send"] +mainnet = [] +migrations = ["namada_migrations", "linkme"] multicore = ["dep:rayon"] testing = [ "multicore", "namada_core/testing", "masp_primitives/test-dependencies", + "proptest", + "std" ] download-params = ["masp_proofs/download-params"] +masp = [ + "namada_io", + "namada_wallet", + "namada_core/control_flow", + "namada_core/task_env", + "flume", +] [dependencies] namada_account = { path = "../account" } namada_controller = { path = "../controller" } -namada_core = { path = "../core" } +namada_core = { path = "../core"} +namada_events = {path = "../events" } namada_gas = { path = "../gas" } +namada_io = { path = "../io", optional = true } +namada_macros = {path = "../macros" } +namada_migrations = { path = "../migrations", optional = true } namada_state = { path = "../state" } namada_systems = { path = "../systems" } namada_tx = { path = "../tx" } namada_vp = { path = "../vp" } +namada_wallet = { path = "../wallet", optional = true } +async-trait.workspace = true borsh.workspace = true +eyre.workspace = true +futures.workspace = true +flume = { workspace = true, optional = true } +itertools.workspace = true lazy_static.workspace = true +linkme = {workspace = true, optional = true} masp_primitives.workspace = true -masp_proofs = { workspace = true } +masp_proofs.workspace = true +proptest = {workspace = true, optional = true} +rand.workspace = true rand_core.workspace = true rayon = { workspace = true, optional = true } ripemd.workspace = true serde.workspace = true +serde_json.workspace = true sha2.workspace = true smooth-operator.workspace = true +tempfile.workspace = true thiserror.workspace = true tracing.workspace = true +typed-builder.workspace = true +xorf.workspace = true + [dev-dependencies] namada_gas = { path = "../gas" } @@ -52,8 +83,10 @@ namada_state = { path = "../state", features = ["testing"] } namada_trans_token = { path = "../trans_token" } lazy_static.workspace = true +masp_primitives = { workspace = true, features = ["test-dependencies"] } masp_proofs = { workspace = true, features = ["download-params"] } proptest.workspace = true rand_core.workspace = true rayon.workspace = true test-log.workspace = true +tokio.workspace = true diff --git a/crates/shielded_token/src/lib.rs b/crates/shielded_token/src/lib.rs index 2e35149be9..e337d7faad 100644 --- a/crates/shielded_token/src/lib.rs +++ b/crates/shielded_token/src/lib.rs @@ -18,6 +18,9 @@ )] pub mod conversion; + +#[cfg(feature = "masp")] +pub mod masp; mod storage; pub mod storage_key; pub mod utils; @@ -37,6 +40,9 @@ pub use namada_state::{ use serde::{Deserialize, Serialize}; pub use storage::*; +#[cfg(feature = "masp")] +pub use crate::masp::shielded_wallet::ShieldedWallet; + /// Token parameters for each kind of asset held on chain #[derive( Clone, diff --git a/crates/shielded_token/src/masp.rs b/crates/shielded_token/src/masp.rs new file mode 100644 index 0000000000..d2cc14a60f --- /dev/null +++ b/crates/shielded_token/src/masp.rs @@ -0,0 +1,1220 @@ +//! MASP verification wrappers. +#![allow(clippy::arithmetic_side_effects)] +#![allow(clippy::cast_possible_truncation)] +#![allow(clippy::cast_sign_loss)] +mod shielded_sync; +pub mod shielded_wallet; +#[cfg(test)] +mod test_utils; + +use std::collections::BTreeMap; +use std::fmt::Debug; + +use borsh::{BorshDeserialize, BorshSerialize}; +use itertools::Itertools; +use masp_primitives::asset_type::AssetType; +#[cfg(feature = "mainnet")] +use masp_primitives::consensus::MainNetwork as Network; +#[cfg(not(feature = "mainnet"))] +use masp_primitives::consensus::TestNetwork as Network; +use masp_primitives::convert::AllowedConversion; +use masp_primitives::merkle_tree::{IncrementalWitness, MerklePath}; +use masp_primitives::sapling::keys::FullViewingKey; +use masp_primitives::sapling::{Diversifier, Node, ViewingKey}; +use masp_primitives::transaction::builder::{self, *}; +use masp_primitives::transaction::components::sapling::builder::SaplingMetadata; +use masp_primitives::transaction::components::{I128Sum, ValueSum}; +use masp_primitives::transaction::Transaction; +use masp_primitives::zip32::{ + ExtendedFullViewingKey, ExtendedSpendingKey as MaspExtendedSpendingKey, +}; +use masp_proofs::prover::LocalTxProver; +use namada_core::address::Address; +use namada_core::collections::{HashMap, HashSet}; +use namada_core::dec::Dec; +use namada_core::masp::*; +use namada_core::token; +use namada_core::token::Denomination; +use namada_core::uint::Uint; +use namada_io::{MaybeSend, MaybeSync}; +use namada_macros::BorshDeserializer; +#[cfg(feature = "migrations")] +use namada_migrations::*; +use namada_tx::IndexedTx; +use rand_core::{CryptoRng, RngCore}; +pub use shielded_wallet::ShieldedWallet; +use smooth_operator::checked; +use thiserror::Error; + +pub use crate::masp::shielded_sync::dispatcher::{Dispatcher, DispatcherCache}; +#[cfg(not(target_family = "wasm"))] +pub use crate::masp::shielded_sync::MaspLocalTaskEnv; +pub use crate::masp::shielded_sync::{ + utils, ShieldedSyncConfig, ShieldedSyncConfigBuilder, +}; +pub use crate::validation::{ + partial_deauthorize, preload_verifying_keys, PVKs, CONVERT_NAME, + ENV_VAR_MASP_PARAMS_DIR, OUTPUT_NAME, SPEND_NAME, +}; + +/// Randomness seed for MASP integration tests to build proofs with +/// deterministic rng. +pub const ENV_VAR_MASP_TEST_SEED: &str = "NAMADA_MASP_TEST_SEED"; + +/// The network to use for MASP +pub const NETWORK: Network = Network; + +/// Shielded transfer +#[derive(Clone, Debug, BorshSerialize, BorshDeserialize, BorshDeserializer)] +pub struct ShieldedTransfer { + /// Shielded transfer builder + pub builder: Builder<(), ExtendedFullViewingKey, ()>, + /// MASP transaction + pub masp_tx: Transaction, + /// Metadata + pub metadata: SaplingMetadata, + /// Epoch in which the transaction was created + pub epoch: MaspEpoch, +} + +/// The data for a masp fee payment +#[allow(missing_docs)] +#[derive(Debug)] +pub struct MaspFeeData { + pub sources: Vec, + pub target: Address, + pub token: Address, + pub amount: token::DenominatedAmount, +} + +/// The data for a single masp transfer +#[allow(missing_docs)] +#[derive(Debug)] +pub struct MaspTransferData { + pub source: TransferSource, + pub target: TransferTarget, + pub token: Address, + pub amount: token::DenominatedAmount, +} + +/// The data for a masp transfer relative to a given source +#[derive(Hash, Eq, PartialEq)] +pub struct MaspSourceTransferData { + source: TransferSource, + token: Address, +} + +/// The data for a masp transfer relative to a given target +#[derive(Hash, Eq, PartialEq)] +struct MaspTargetTransferData { + source: TransferSource, + target: TransferTarget, + token: Address, +} + +/// Data to log masp transactions' errors +#[allow(missing_docs)] +#[derive(Debug)] +pub struct MaspDataLog { + pub source: Option, + pub token: Address, + pub amount: token::DenominatedAmount, +} + +#[allow(missing_docs)] +pub struct MaspTxReorderedData { + source_data: HashMap, + target_data: HashMap, + denoms: HashMap, +} + +/// Data about the unspent amounts for any given shielded source coming from the +/// spent notes in their posses that have been added to the builder. Can be used +/// to either pay fees or to return a change +pub type Changes = HashMap; + +/// Shielded pool data for a token +#[allow(missing_docs)] +#[derive(Debug, BorshSerialize, BorshDeserialize, BorshDeserializer)] +pub struct MaspTokenRewardData { + pub name: String, + pub address: Address, + pub max_reward_rate: Dec, + pub kp_gain: Dec, + pub kd_gain: Dec, + pub locked_amount_target: Uint, +} + +/// A return type for gen_shielded_transfer +#[allow(clippy::large_enum_variant)] +#[derive(Error, Debug)] +pub enum TransferErr { + /// Build error for masp errors + #[error("{error}")] + Build { + /// The error + error: builder::Error, + /// The optional associated transfer data for logging purposes + data: Option, + }, + /// errors + #[error("{0}")] + General(String), +} + +/// Freeze a Builder into the format necessary for inclusion in a Tx. This is +/// the format used by hardware wallets to validate a MASP Transaction. +pub struct WalletMap; + +impl + masp_primitives::transaction::components::sapling::builder::MapBuilder< + P1, + MaspExtendedSpendingKey, + (), + ExtendedFullViewingKey, + > for WalletMap +{ + fn map_params(&self, _s: P1) {} + + fn map_key(&self, s: MaspExtendedSpendingKey) -> ExtendedFullViewingKey { + (&s).into() + } +} + +impl + MapBuilder + for WalletMap +{ + fn map_notifier(&self, _s: N1) {} +} + +/// Abstracts platform specific details away from the logic of shielded pool +/// operations. +#[cfg_attr(feature = "async-send", async_trait::async_trait)] +#[cfg_attr(not(feature = "async-send"), async_trait::async_trait(?Send))] +pub trait ShieldedUtils: + Sized + BorshDeserialize + BorshSerialize + Default + Clone +{ + /// Get a MASP transaction prover + fn local_tx_prover(&self) -> LocalTxProver; + + /// Load up the currently saved ShieldedContext + async fn load( + &self, + ctx: &mut ShieldedWallet, + force_confirmed: bool, + ) -> std::io::Result<()>; + + /// Save the given ShieldedContext for future loads + async fn save( + &self, + ctx: &ShieldedWallet, + ) -> std::io::Result<()>; + + /// Save a cache of data as part of shielded sync if that + /// process gets interrupted. + async fn cache_save(&self, _cache: &DispatcherCache) + -> std::io::Result<()>; + + /// Load a cache of data as part of shielded sync if that + /// process gets interrupted. + async fn cache_load(&self) -> std::io::Result; +} + +/// Make a ViewingKey that can view notes encrypted by given ExtendedSpendingKey +pub fn to_viewing_key(esk: &MaspExtendedSpendingKey) -> FullViewingKey { + ExtendedFullViewingKey::from(esk).fvk +} + +/// Generate a valid diversifier, i.e. one that has a diversified base. Return +/// also this diversified base. +pub fn find_valid_diversifier( + rng: &mut R, +) -> (Diversifier, masp_primitives::jubjub::SubgroupPoint) { + let mut diversifier; + let g_d; + // Keep generating random diversifiers until one has a diversified base + loop { + let mut d = [0; 11]; + rng.fill_bytes(&mut d); + diversifier = Diversifier(d); + if let Some(val) = diversifier.g_d() { + g_d = val; + break; + } + } + (diversifier, g_d) +} + +/// Determine if using the current note would actually bring us closer to our +/// target. Returns the unused amounts (change) of delta if any +pub fn is_amount_required( + src: I128Sum, + dest: I128Sum, + normed_delta: I128Sum, + opt_delta: Option, +) -> Option { + let mut changes = None; + let gap = dest.clone() - src; + + for (asset_type, value) in gap.components() { + if *value > 0 && normed_delta[asset_type] > 0 { + let signed_change_amt = + checked!(normed_delta[asset_type] - *value).unwrap_or_default(); + let unsigned_change_amt = if signed_change_amt > 0 { + signed_change_amt + } else { + // Even if there's no change we still need to set the return + // value of this function to be Some so that the caller sees + // that this note should be used + 0 + }; + + let change_amt = I128Sum::from_nonnegative( + asset_type.to_owned(), + unsigned_change_amt, + ) + .expect("Change is guaranteed to be non-negative"); + changes = changes + .map(|prev| prev + change_amt.clone()) + .or(Some(change_amt)); + } + } + + // Because of the way conversions are computed, we need an extra step here + // if the token is not the native one + if let Some(delta) = opt_delta { + // Only if this note is going to be used, handle the assets in delta + // (not normalized) that are not part of dest + changes = changes.map(|mut chngs| { + for (delta_asset_type, delta_amt) in delta.components() { + if !dest.asset_types().contains(delta_asset_type) { + let rmng = I128Sum::from_nonnegative( + delta_asset_type.to_owned(), + *delta_amt, + ) + .expect("Change is guaranteed to be non-negative"); + chngs += rmng; + } + } + + chngs + }); + } + + changes +} + +/// a masp change +#[derive(BorshSerialize, BorshDeserialize, BorshDeserializer, Debug, Clone)] +pub struct MaspChange { + /// the token address + pub asset: Address, + /// the change in the token + pub change: token::Change, +} + +/// a masp amount +pub type MaspAmount = ValueSum<(Option, Address), token::Change>; + +/// A type tracking the notes used to construct a shielded transfer. Used to +/// avoid reusing the same notes multiple times which would lead to an invalid +/// transaction +pub type SpentNotesTracker = HashMap>; + +/// An extension of Option's cloned method for pair types +fn cloned_pair((a, b): (&T, &U)) -> (T, U) { + (a.clone(), b.clone()) +} + +/// Represents the amount used of different conversions +pub type Conversions = + BTreeMap, i128)>; + +/// Represents the changes that were made to a list of transparent accounts +pub type TransferDelta = HashMap; + +/// Represents the changes that were made to a list of shielded accounts +pub type TransactionDelta = HashMap; + +/// Maps a shielded tx to the index of its first output note. +pub type NoteIndex = BTreeMap; + +/// Maps the note index (in the commitment tree) to a witness +pub type WitnessMap = HashMap>; + +#[derive(BorshSerialize, BorshDeserialize, Debug)] +/// The possible sync states of the shielded context +pub enum ContextSyncStatus { + /// The context contains only data that has been confirmed by the protocol + Confirmed, + /// The context contains that that has not yet been confirmed by the + /// protocol and could end up being invalid + Speculative, +} + +#[cfg(test)] +mod tests { + use masp_proofs::bls12_381::Bls12; + + use super::*; + + /// quick and dirty test. will fail on size check + #[test] + #[should_panic(expected = "parameter file size is not correct")] + fn test_wrong_masp_params() { + use std::io::Write; + + let tempdir = tempfile::tempdir() + .expect("expected a temp dir") + .into_path(); + let fake_params_paths = + [SPEND_NAME, OUTPUT_NAME, CONVERT_NAME].map(|p| tempdir.join(p)); + for path in &fake_params_paths { + let mut f = + std::fs::File::create(path).expect("expected a temp file"); + f.write_all(b"fake params") + .expect("expected a writable temp file"); + f.sync_all() + .expect("expected a writable temp file (on sync)"); + } + + std::env::set_var(ENV_VAR_MASP_PARAMS_DIR, tempdir.as_os_str()); + // should panic here + masp_proofs::load_parameters( + &fake_params_paths[0], + &fake_params_paths[1], + &fake_params_paths[2], + ); + } + + /// a more involved test, using dummy parameters with the right + /// size but the wrong hash. + #[test] + #[should_panic(expected = "parameter file is not correct")] + fn test_wrong_masp_params_hash() { + use masp_primitives::ff::PrimeField; + use masp_proofs::bellman::groth16::{ + generate_random_parameters, Parameters, + }; + use masp_proofs::bellman::{Circuit, ConstraintSystem, SynthesisError}; + use masp_proofs::bls12_381::Scalar; + + struct FakeCircuit { + x: E, + } + + impl Circuit for FakeCircuit { + fn synthesize>( + self, + cs: &mut CS, + ) -> Result<(), SynthesisError> { + let x = cs.alloc(|| "x", || Ok(self.x)).unwrap(); + cs.enforce( + || { + "this is an extra long constraint name so that rustfmt \ + is ok with wrapping the params of enforce()" + }, + |lc| lc + x, + |lc| lc + x, + |lc| lc + x, + ); + Ok(()) + } + } + + let dummy_circuit = FakeCircuit { x: Scalar::zero() }; + let mut rng = rand::thread_rng(); + let fake_params: Parameters = + generate_random_parameters(dummy_circuit, &mut rng) + .expect("expected to generate fake params"); + + let tempdir = tempfile::tempdir() + .expect("expected a temp dir") + .into_path(); + // TODO: get masp to export these consts + let fake_params_paths = [ + (SPEND_NAME, 49848572u64), + (OUTPUT_NAME, 16398620u64), + (CONVERT_NAME, 22570940u64), + ] + .map(|(p, s)| (tempdir.join(p), s)); + for (path, size) in &fake_params_paths { + let mut f = + std::fs::File::create(path).expect("expected a temp file"); + fake_params + .write(&mut f) + .expect("expected a writable temp file"); + // the dummy circuit has one constraint, and therefore its + // params should always be smaller than the large masp + // circuit params. so this truncate extends the file, and + // extra bytes at the end do not make it invalid. + f.set_len(*size) + .expect("expected to truncate the temp file"); + f.sync_all() + .expect("expected a writable temp file (on sync)"); + } + + std::env::set_var(ENV_VAR_MASP_PARAMS_DIR, tempdir.as_os_str()); + // should panic here + masp_proofs::load_parameters( + &fake_params_paths[0].0, + &fake_params_paths[1].0, + &fake_params_paths[2].0, + ); + } +} + +#[cfg(any(test, feature = "testing"))] +/// Tests and strategies for transactions +pub mod testing { + use std::ops::AddAssign; + use std::sync::Mutex; + + use masp_primitives::consensus::testing::arb_height; + use masp_primitives::consensus::BranchId; + use masp_primitives::constants::{ + SPENDING_KEY_GENERATOR, VALUE_COMMITMENT_RANDOMNESS_GENERATOR, + }; + use masp_primitives::ff::PrimeField; + use masp_primitives::group::GroupEncoding; + use masp_primitives::jubjub; + use masp_primitives::keys::OutgoingViewingKey; + use masp_primitives::memo::MemoBytes; + use masp_primitives::sapling::note_encryption::{ + try_sapling_note_decryption, PreparedIncomingViewingKey, + }; + use masp_primitives::sapling::prover::TxProver; + use masp_primitives::sapling::redjubjub::{ + PrivateKey, PublicKey, Signature, + }; + use masp_primitives::sapling::{Note, ProofGenerationKey, Rseed}; + use masp_primitives::transaction::components::sapling::builder::RngBuildParams; + use masp_primitives::transaction::components::transparent::testing::arb_transparent_address; + use masp_primitives::transaction::components::{ + OutputDescription, TxOut, U64Sum, GROTH_PROOF_SIZE, + }; + use masp_primitives::transaction::fees::fixed::FeeRule; + use masp_primitives::transaction::{ + Authorization, Authorized, TransparentAddress, + }; + use masp_proofs::bellman::groth16::Proof; + use masp_proofs::bls12_381; + use masp_proofs::bls12_381::{Bls12, G1Affine, G2Affine}; + use namada_core::address::testing::arb_address; + use namada_core::token::testing::arb_denomination; + use namada_core::token::MaspDigitPos; + use proptest::prelude::*; + use proptest::test_runner::TestRng; + use proptest::{collection, option, prop_compose}; + + use super::*; + + /// This function computes `value` in the exponent of the value commitment + /// base + fn masp_compute_value_balance( + asset_type: AssetType, + value: i128, + ) -> Option { + // Compute the absolute value (failing if -i128::MAX is + // the value) + let abs = match value.checked_abs() { + Some(a) => a as u128, + None => return None, + }; + + // Is it negative? We'll have to negate later if so. + let is_negative = value.is_negative(); + + // Compute it in the exponent + let mut abs_bytes = [0u8; 32]; + abs_bytes[0..16].copy_from_slice(&abs.to_le_bytes()); + let mut value_balance = asset_type.value_commitment_generator() + * jubjub::Fr::from_bytes(&abs_bytes).unwrap(); + + // Negate if necessary + if is_negative { + value_balance = -value_balance; + } + + // Convert to unknown order point + Some(value_balance.into()) + } + + /// A context object for creating the Sapling components of a Zcash + /// transaction. + pub struct SaplingProvingContext { + bsk: jubjub::Fr, + // (sum of the Spend value commitments) - (sum of the Output value + // commitments) + cv_sum: jubjub::ExtendedPoint, + } + + /// An implementation of TxProver that does everything except generating + /// valid zero-knowledge proofs. Uses the supplied source of randomness to + /// carry out its operations. + pub struct MockTxProver(pub Mutex); + + impl TxProver for MockTxProver { + type SaplingProvingContext = SaplingProvingContext; + + fn new_sapling_proving_context(&self) -> Self::SaplingProvingContext { + SaplingProvingContext { + bsk: jubjub::Fr::zero(), + cv_sum: jubjub::ExtendedPoint::identity(), + } + } + + fn spend_proof( + &self, + ctx: &mut Self::SaplingProvingContext, + proof_generation_key: ProofGenerationKey, + _diversifier: Diversifier, + _rseed: Rseed, + ar: jubjub::Fr, + asset_type: AssetType, + value: u64, + _anchor: bls12_381::Scalar, + _merkle_path: MerklePath, + rcv: jubjub::Fr, + ) -> Result< + ([u8; GROTH_PROOF_SIZE], jubjub::ExtendedPoint, PublicKey), + (), + > { + // Accumulate the value commitment randomness in the context + { + let mut tmp = rcv; + tmp.add_assign(&ctx.bsk); + + // Update the context + ctx.bsk = tmp; + } + + // Construct the value commitment + let value_commitment = asset_type.value_commitment(value, rcv); + + // This is the result of the re-randomization, we compute it for the + // caller + let rk = PublicKey(proof_generation_key.ak.into()) + .randomize(ar, SPENDING_KEY_GENERATOR); + + // Compute value commitment + let value_commitment: jubjub::ExtendedPoint = + value_commitment.commitment().into(); + + // Accumulate the value commitment in the context + ctx.cv_sum += value_commitment; + + let mut zkproof = [0u8; GROTH_PROOF_SIZE]; + let proof = Proof:: { + a: G1Affine::generator(), + b: G2Affine::generator(), + c: G1Affine::generator(), + }; + proof + .write(&mut zkproof[..]) + .expect("should be able to serialize a proof"); + Ok((zkproof, value_commitment, rk)) + } + + fn output_proof( + &self, + ctx: &mut Self::SaplingProvingContext, + _esk: jubjub::Fr, + _payment_address: masp_primitives::sapling::PaymentAddress, + _rcm: jubjub::Fr, + asset_type: AssetType, + value: u64, + rcv: jubjub::Fr, + ) -> ([u8; GROTH_PROOF_SIZE], jubjub::ExtendedPoint) { + // Accumulate the value commitment randomness in the context + { + let mut tmp = rcv.neg(); // Outputs subtract from the total. + tmp.add_assign(&ctx.bsk); + + // Update the context + ctx.bsk = tmp; + } + + // Construct the value commitment for the proof instance + let value_commitment = asset_type.value_commitment(value, rcv); + + // Compute the actual value commitment + let value_commitment_point: jubjub::ExtendedPoint = + value_commitment.commitment().into(); + + // Accumulate the value commitment in the context. We do this to + // check internal consistency. + ctx.cv_sum -= value_commitment_point; // Outputs subtract from the total. + + let mut zkproof = [0u8; GROTH_PROOF_SIZE]; + let proof = Proof:: { + a: G1Affine::generator(), + b: G2Affine::generator(), + c: G1Affine::generator(), + }; + proof + .write(&mut zkproof[..]) + .expect("should be able to serialize a proof"); + + (zkproof, value_commitment_point) + } + + fn convert_proof( + &self, + ctx: &mut Self::SaplingProvingContext, + allowed_conversion: AllowedConversion, + value: u64, + _anchor: bls12_381::Scalar, + _merkle_path: MerklePath, + rcv: jubjub::Fr, + ) -> Result<([u8; GROTH_PROOF_SIZE], jubjub::ExtendedPoint), ()> + { + // Accumulate the value commitment randomness in the context + { + let mut tmp = rcv; + tmp.add_assign(&ctx.bsk); + + // Update the context + ctx.bsk = tmp; + } + + // Construct the value commitment + let value_commitment = + allowed_conversion.value_commitment(value, rcv); + + // Compute value commitment + let value_commitment: jubjub::ExtendedPoint = + value_commitment.commitment().into(); + + // Accumulate the value commitment in the context + ctx.cv_sum += value_commitment; + + let mut zkproof = [0u8; GROTH_PROOF_SIZE]; + let proof = Proof:: { + a: G1Affine::generator(), + b: G2Affine::generator(), + c: G1Affine::generator(), + }; + proof + .write(&mut zkproof[..]) + .expect("should be able to serialize a proof"); + + Ok((zkproof, value_commitment)) + } + + fn binding_sig( + &self, + ctx: &mut Self::SaplingProvingContext, + assets_and_values: &I128Sum, + sighash: &[u8; 32], + ) -> Result { + // Initialize secure RNG + let mut rng = self.0.lock().unwrap(); + + // Grab the current `bsk` from the context + let bsk = PrivateKey(ctx.bsk); + + // Grab the `bvk` using DerivePublic. + let bvk = PublicKey::from_private( + &bsk, + VALUE_COMMITMENT_RANDOMNESS_GENERATOR, + ); + + // In order to check internal consistency, let's use the accumulated + // value commitments (as the verifier would) and apply + // value_balance to compare against our derived bvk. + { + let final_bvk = assets_and_values + .components() + .map(|(asset_type, value_balance)| { + // Compute value balance for each asset + // Error for bad value balances (-INT128_MAX value) + masp_compute_value_balance(*asset_type, *value_balance) + }) + .try_fold(ctx.cv_sum, |tmp, value_balance| { + // Compute cv_sum minus sum of all value balances + Result::<_, ()>::Ok(tmp - value_balance.ok_or(())?) + })?; + + // The result should be the same, unless the provided + // valueBalance is wrong. + if bvk.0 != final_bvk { + return Err(()); + } + } + + // Construct signature message + let mut data_to_be_signed = [0u8; 64]; + data_to_be_signed[0..32].copy_from_slice(&bvk.0.to_bytes()); + data_to_be_signed[32..64].copy_from_slice(&sighash[..]); + + // Sign + Ok(bsk.sign( + &data_to_be_signed, + &mut *rng, + VALUE_COMMITMENT_RANDOMNESS_GENERATOR, + )) + } + } + + #[derive(Debug, Clone)] + /// Adapts a CSPRNG from a PRNG for proptesting + pub struct TestCsprng(pub R); + + impl CryptoRng for TestCsprng {} + + impl RngCore for TestCsprng { + fn next_u32(&mut self) -> u32 { + self.0.next_u32() + } + + fn next_u64(&mut self) -> u64 { + self.0.next_u64() + } + + fn fill_bytes(&mut self, dest: &mut [u8]) { + self.0.fill_bytes(dest) + } + + fn try_fill_bytes( + &mut self, + dest: &mut [u8], + ) -> Result<(), rand::Error> { + self.0.try_fill_bytes(dest) + } + } + + prop_compose! { + /// Expose a random number generator + pub fn arb_rng()(rng in Just(()).prop_perturb(|(), rng| rng)) -> TestRng { + rng + } + } + + prop_compose! { + /// Generate an arbitrary output description with the given value + pub fn arb_output_description( + asset_type: AssetType, + value: u64, + )( + mut rng in arb_rng().prop_map(TestCsprng), + ) -> (Option, masp_primitives::sapling::PaymentAddress, AssetType, u64, MemoBytes) { + let mut spending_key_seed = [0; 32]; + rng.fill_bytes(&mut spending_key_seed); + let spending_key = MaspExtendedSpendingKey::master(spending_key_seed.as_ref()); + + let viewing_key = ExtendedFullViewingKey::from(&spending_key).fvk.vk; + let (div, _g_d) = find_valid_diversifier(&mut rng); + let payment_addr = viewing_key + .to_payment_address(div) + .expect("a PaymentAddress"); + + (None, payment_addr, asset_type, value, MemoBytes::empty()) + } + } + + prop_compose! { + /// Generate an arbitrary spend description with the given value + pub fn arb_spend_description( + asset_type: AssetType, + value: u64, + )( + address in arb_transparent_address(), + expiration_height in arb_height(BranchId::MASP, &Network), + mut rng in arb_rng().prop_map(TestCsprng), + bparams_rng in arb_rng().prop_map(TestCsprng), + prover_rng in arb_rng().prop_map(TestCsprng), + ) -> (MaspExtendedSpendingKey, Diversifier, Note, Node) { + let mut spending_key_seed = [0; 32]; + rng.fill_bytes(&mut spending_key_seed); + let spending_key = MaspExtendedSpendingKey::master(spending_key_seed.as_ref()); + + let viewing_key = ExtendedFullViewingKey::from(&spending_key).fvk.vk; + let (div, _g_d) = find_valid_diversifier(&mut rng); + let payment_addr = viewing_key + .to_payment_address(div) + .expect("a PaymentAddress"); + + let mut builder = Builder::::new( + NETWORK, + // NOTE: this is going to add 20 more blocks to the actual + // expiration but there's no other exposed function that we could + // use from the masp crate to specify the expiration better + expiration_height.unwrap(), + ); + // Add a transparent input to support our desired shielded output + builder.add_transparent_input(TxOut { asset_type, value, address }).unwrap(); + // Finally add the shielded output that we need + builder.add_sapling_output(None, payment_addr, asset_type, value, MemoBytes::empty()).unwrap(); + // Build a transaction in order to get its shielded outputs + let (transaction, metadata) = builder.build( + &MockTxProver(Mutex::new(prover_rng)), + &FeeRule::non_standard(U64Sum::zero()), + &mut rng, + &mut RngBuildParams::new(bparams_rng), + ).unwrap(); + // Extract the shielded output from the transaction + let shielded_output = &transaction + .sapling_bundle() + .unwrap() + .shielded_outputs[metadata.output_index(0).unwrap()]; + + // Let's now decrypt the constructed notes + let (note, pa, _memo) = try_sapling_note_decryption::<_, OutputDescription<<::SaplingAuth as masp_primitives::transaction::components::sapling::Authorization>::Proof>>( + &NETWORK, + 1.into(), + &PreparedIncomingViewingKey::new(&viewing_key.ivk()), + shielded_output, + ).unwrap(); + assert_eq!(payment_addr, pa); + // Make a path to out new note + let node = Node::new(shielded_output.cmu.to_repr()); + (spending_key, div, note, node) + } + } + + prop_compose! { + /// Generate an arbitrary MASP denomination + pub fn arb_masp_digit_pos()(denom in 0..4u8) -> MaspDigitPos { + MaspDigitPos::from(denom) + } + } + + prop_compose! { + /// Arbitrarily partition the given vector of integers into sets and sum + /// them + pub fn arb_partition(values: Vec)(buckets in usize::from(!values.is_empty())..=values.len())( + values in Just(values.clone()), + assigns in collection::vec(0..buckets, values.len()), + buckets in Just(buckets), + ) -> Vec { + let mut buckets = vec![0; buckets]; + for (bucket, value) in assigns.iter().zip(values) { + buckets[*bucket] += value; + } + buckets + } + } + + prop_compose! { + /// Generate arbitrary spend descriptions with the given asset type + /// partitioning the given values + pub fn arb_spend_descriptions( + asset: AssetData, + values: Vec, + )(partition in arb_partition(values))( + spend_description in partition + .iter() + .map(|value| arb_spend_description( + encode_asset_type( + asset.token.clone(), + asset.denom, + asset.position, + asset.epoch, + ).unwrap(), + *value, + )).collect::>() + ) -> Vec<(MaspExtendedSpendingKey, Diversifier, Note, Node)> { + spend_description + } + } + + prop_compose! { + /// Generate arbitrary output descriptions with the given asset type + /// partitioning the given values + pub fn arb_output_descriptions( + asset: AssetData, + values: Vec, + )(partition in arb_partition(values))( + output_description in partition + .iter() + .map(|value| arb_output_description( + encode_asset_type( + asset.token.clone(), + asset.denom, + asset.position, + asset.epoch, + ).unwrap(), + *value, + )).collect::>() + ) -> Vec<(Option, masp_primitives::sapling::PaymentAddress, AssetType, u64, MemoBytes)> { + output_description + } + } + + prop_compose! { + /// Generate arbitrary spend descriptions with the given asset type + /// partitioning the given values + pub fn arb_txouts( + asset: AssetData, + values: Vec, + address: TransparentAddress, + )( + partition in arb_partition(values), + ) -> Vec { + partition + .iter() + .map(|value| TxOut { + asset_type: encode_asset_type( + asset.token.clone(), + asset.denom, + asset.position, + asset.epoch, + ).unwrap(), + value: *value, + address, + }).collect::>() + } + } + + prop_compose! { + /// Generate an arbitrary masp epoch + pub fn arb_masp_epoch()(epoch: u64) -> MaspEpoch{ + MaspEpoch::new(epoch) + } + } + + prop_compose! { + /// Generate an arbitrary pre-asset type + pub fn arb_pre_asset_type()( + token in arb_address(), + denom in arb_denomination(), + position in arb_masp_digit_pos(), + epoch in option::of(arb_masp_epoch()), + ) -> AssetData { + AssetData { + token, + denom, + position, + epoch, + } + } + } +} + +#[cfg(feature = "std")] +/// Implementation of MASP functionality depending on a standard filesystem +pub mod fs { + use std::env; + use std::fs::{File, OpenOptions}; + use std::io::{Read, Write}; + use std::path::PathBuf; + + use super::*; + use crate::validation::{ + get_params_dir, CONVERT_NAME, ENV_VAR_MASP_PARAMS_DIR, OUTPUT_NAME, + SPEND_NAME, + }; + + /// Shielded context file name + const FILE_NAME: &str = "shielded.dat"; + const TMP_FILE_PREFIX: &str = "shielded.tmp"; + const SPECULATIVE_FILE_NAME: &str = "speculative_shielded.dat"; + const SPECULATIVE_TMP_FILE_PREFIX: &str = "speculative_shielded.tmp"; + const CACHE_FILE_NAME: &str = "shielded_sync.cache"; + const CACHE_FILE_TMP_PREFIX: &str = "shielded_sync.cache.tmp"; + + #[derive(Debug, BorshSerialize, BorshDeserialize, Clone)] + /// An implementation of ShieldedUtils for standard filesystems + pub struct FsShieldedUtils { + #[borsh(skip)] + pub(crate) context_dir: PathBuf, + } + + impl FsShieldedUtils { + /// Initialize a shielded transaction context that identifies notes + /// decryptable by any viewing key in the given set + pub fn new(context_dir: PathBuf) -> ShieldedWallet { + // Make sure that MASP parameters are downloaded to enable MASP + // transaction building and verification later on + let params_dir = get_params_dir(); + let spend_path = params_dir.join(SPEND_NAME); + let convert_path = params_dir.join(CONVERT_NAME); + let output_path = params_dir.join(OUTPUT_NAME); + if !(spend_path.exists() + && convert_path.exists() + && output_path.exists()) + { + #[allow(clippy::print_stdout)] + { + println!("MASP parameters not present, downloading..."); + } + masp_proofs::download_masp_parameters(None) + .expect("MASP parameters not present or downloadable"); + #[allow(clippy::print_stdout)] + { + println!( + "MASP parameter download complete, resuming \ + execution..." + ); + } + } + // Finally initialize a shielded context with the supplied directory + + let sync_status = + if std::fs::read(context_dir.join(SPECULATIVE_FILE_NAME)) + .is_ok() + { + // Load speculative state + ContextSyncStatus::Speculative + } else { + ContextSyncStatus::Confirmed + }; + + let utils = Self { context_dir }; + ShieldedWallet { + utils, + sync_status, + ..Default::default() + } + } + + /// Write to a file ensuring that all contents of the file + /// were written by a single process (in case of multiple + /// concurrent write attempts). + /// + /// N.B. This is not the same as a file lock. If multiple + /// concurrent writes take place, this code ensures that + /// the result of exactly one will be persisted. + /// + /// N.B. This only truly works if each process uses + /// to a *unique* tmp file name. + fn atomic_file_write( + &self, + tmp_file_name: impl AsRef, + file_name: impl AsRef, + data: impl BorshSerialize, + ) -> std::io::Result<()> { + let tmp_path = self.context_dir.join(&tmp_file_name); + { + // First serialize the shielded context into a temporary file. + // Inability to create this file implies a simultaneuous write + // is in progress. In this case, immediately + // fail. This is unproblematic because the data + // intended to be stored can always be re-fetched + // from the blockchain. + let mut ctx_file = OpenOptions::new() + .write(true) + .create_new(true) + .open(tmp_path.clone())?; + let mut bytes = Vec::new(); + data.serialize(&mut bytes).unwrap_or_else(|e| { + panic!( + "cannot serialize data to {} with error: {}", + file_name.as_ref().to_string_lossy(), + e, + ) + }); + ctx_file.write_all(&bytes[..])?; + } + // Atomically update the old shielded context file with new data. + // Atomicity is required to prevent other client instances from + // reading corrupt data. + std::fs::rename(tmp_path, self.context_dir.join(file_name)) + } + } + + impl Default for FsShieldedUtils { + fn default() -> Self { + Self { + context_dir: PathBuf::from(FILE_NAME), + } + } + } + + #[cfg_attr(feature = "async-send", async_trait::async_trait)] + #[cfg_attr(not(feature = "async-send"), async_trait::async_trait(?Send))] + impl ShieldedUtils for FsShieldedUtils { + fn local_tx_prover(&self) -> LocalTxProver { + if let Ok(params_dir) = env::var(ENV_VAR_MASP_PARAMS_DIR) { + let params_dir = PathBuf::from(params_dir); + let spend_path = params_dir.join(SPEND_NAME); + let convert_path = params_dir.join(CONVERT_NAME); + let output_path = params_dir.join(OUTPUT_NAME); + LocalTxProver::new(&spend_path, &output_path, &convert_path) + } else { + LocalTxProver::with_default_location() + .expect("unable to load MASP Parameters") + } + } + + /// Try to load the last saved shielded context from the given context + /// directory. If this fails, then leave the current context unchanged. + async fn load( + &self, + ctx: &mut ShieldedWallet, + force_confirmed: bool, + ) -> std::io::Result<()> { + // Try to load shielded context from file + let file_name = if force_confirmed { + FILE_NAME + } else { + match ctx.sync_status { + ContextSyncStatus::Confirmed => FILE_NAME, + ContextSyncStatus::Speculative => SPECULATIVE_FILE_NAME, + } + }; + let mut ctx_file = File::open(self.context_dir.join(file_name))?; + let mut bytes = Vec::new(); + ctx_file.read_to_end(&mut bytes)?; + // Fill the supplied context with the deserialized object + *ctx = ShieldedWallet { + utils: ctx.utils.clone(), + ..ShieldedWallet::::deserialize(&mut &bytes[..])? + }; + Ok(()) + } + + /// Save this confirmed shielded context into its associated context + /// directory. At the same time, delete the speculative file if present + async fn save( + &self, + ctx: &ShieldedWallet, + ) -> std::io::Result<()> { + let (tmp_file_pref, file_name) = match ctx.sync_status { + ContextSyncStatus::Confirmed => (TMP_FILE_PREFIX, FILE_NAME), + ContextSyncStatus::Speculative => { + (SPECULATIVE_TMP_FILE_PREFIX, SPECULATIVE_FILE_NAME) + } + }; + let tmp_file_name = { + let t = tempfile::Builder::new() + .prefix(tmp_file_pref) + .tempfile()?; + t.path().file_name().unwrap().to_owned() + }; + self.atomic_file_write(tmp_file_name, file_name, ctx)?; + + // Remove the speculative file if present since it's state is + // overruled by the confirmed one we just saved + if let ContextSyncStatus::Confirmed = ctx.sync_status { + let _ = std::fs::remove_file( + self.context_dir.join(SPECULATIVE_FILE_NAME), + ); + } + + Ok(()) + } + + async fn cache_save( + &self, + cache: &DispatcherCache, + ) -> std::io::Result<()> { + let tmp_file_name = { + let t = tempfile::Builder::new() + .prefix(CACHE_FILE_TMP_PREFIX) + .tempfile()?; + t.path().file_name().unwrap().to_owned() + }; + + self.atomic_file_write(tmp_file_name, CACHE_FILE_NAME, cache) + } + + async fn cache_load(&self) -> std::io::Result { + let file_name = self.context_dir.join(CACHE_FILE_NAME); + let mut file = File::open(file_name)?; + DispatcherCache::try_from_reader(&mut file) + } + } +} diff --git a/crates/sdk/src/masp/shielded_sync/dispatcher.rs b/crates/shielded_token/src/masp/shielded_sync/dispatcher.rs similarity index 94% rename from crates/sdk/src/masp/shielded_sync/dispatcher.rs rename to crates/shielded_token/src/masp/shielded_sync/dispatcher.rs index ac60c011d9..3f5a3ec416 100644 --- a/crates/sdk/src/masp/shielded_sync/dispatcher.rs +++ b/crates/shielded_token/src/masp/shielded_sync/dispatcher.rs @@ -8,6 +8,7 @@ use std::sync::Arc; use std::task::{Context, Poll}; use borsh::{BorshDeserialize, BorshSerialize}; +use eyre::{eyre, WrapErr}; use futures::future::{select, Either}; use futures::task::AtomicWaker; use masp_primitives::merkle_tree::{CommitmentTree, IncrementalWitness}; @@ -16,24 +17,22 @@ use masp_primitives::transaction::Transaction; use namada_core::chain::BlockHeight; use namada_core::collections::HashMap; use namada_core::control_flow::time::{Duration, LinearBackoff, Sleep}; +use namada_core::control_flow::ShutdownSignal; use namada_core::hints; +use namada_core::task_env::TaskSpawner; +use namada_io::{MaybeSend, MaybeSync, ProgressBar}; use namada_tx::IndexedTx; +use namada_wallet::{DatedKeypair, DatedSpendingKey}; use super::utils::{IndexedNoteEntry, MaspClient}; -use crate::control_flow::ShutdownSignal; -use crate::error::Error; -use crate::io::ProgressBar; use crate::masp::shielded_sync::trial_decrypt; use crate::masp::utils::{ blocks_left_to_fetch, DecryptedData, Fetched, RetryStrategy, TrialDecrypted, }; use crate::masp::{ - to_viewing_key, MaspExtendedSpendingKey, NoteIndex, ShieldedContext, - ShieldedUtils, WitnessMap, + to_viewing_key, MaspExtendedSpendingKey, NoteIndex, ShieldedUtils, + ShieldedWallet, WitnessMap, }; -use crate::task_env::TaskSpawner; -use crate::wallet::{DatedKeypair, DatedSpendingKey}; -use crate::{MaybeSend, MaybeSync}; struct AsyncCounterInner { waker: AtomicWaker, @@ -147,7 +146,7 @@ impl Drop for PanicFlag { } struct TaskError { - error: Error, + error: eyre::Error, context: C, } @@ -218,7 +217,7 @@ pub struct DispatcherCache { enum DispatcherState { Normal, Interrupted, - Errored(Error), + Errored(eyre::Error), } #[derive(Default, Debug)] @@ -247,7 +246,7 @@ where client: M, state: DispatcherState, tasks: DispatcherTasks, - ctx: ShieldedContext, + ctx: ShieldedWallet, config: Config, cache: DispatcherCache, /// We are syncing up to this height @@ -269,7 +268,7 @@ where U: ShieldedUtils + MaybeSend + MaybeSync, { let ctx = { - let mut ctx = ShieldedContext { + let mut ctx = ShieldedWallet { utils: utils.clone(), ..Default::default() }; @@ -321,7 +320,7 @@ where last_query_height: Option, sks: &[DatedSpendingKey], fvks: &[DatedKeypair], - ) -> Result>, Error> { + ) -> Result>, eyre::Error> { let initial_state = self .perform_initial_setup( start_query_height, @@ -353,9 +352,7 @@ where self.apply_cache_to_shielded_context(&initial_state)?; self.finish_progress_bars(); self.ctx.save().await.map_err(|err| { - Error::Other(format!( - "Failed to save the shielded context: {err}" - )) + eyre!("Failed to save the shielded context: {err}") })?; self.save_cache().await; Ok(Some(self.ctx)) @@ -390,7 +387,7 @@ where last_query_height, .. }: &InitialState, - ) -> Result<(), Error> { + ) -> Result<(), eyre::Error> { if let Some((_, cmt)) = self.cache.commitment_tree.take() { self.ctx.tree = cmt; } @@ -457,13 +454,13 @@ where last_query_height: Option, sks: &[DatedSpendingKey], fvks: &[DatedKeypair], - ) -> Result { + ) -> Result { if start_query_height > last_query_height { - return Err(Error::Other(format!( + return Err(eyre!( "The start height {start_query_height:?} cannot be higher \ than the ending height {last_query_height:?} in the shielded \ sync" - ))); + )); } for vk in sks @@ -502,22 +499,25 @@ where if self.config.wait_for_last_query_height && shutdown_signal.borrow_mut().received() { - return ControlFlow::Break(Err(Error::Other( - "Interrupted while waiting for last query height" - .to_string(), + return ControlFlow::Break(Err(eyre!( + "Interrupted while waiting for last query height", ))); } // Query for the last produced block height - let last_block_height = match self.client.last_block_height().await + let last_block_height = match self + .client + .last_block_height() + .await + .wrap_err("Failed to fetch last block height") { Ok(Some(last_block_height)) => last_block_height, Ok(None) => { return if self.config.wait_for_last_query_height { ControlFlow::Continue(()) } else { - ControlFlow::Break(Err(Error::Other( - "No block has been committed yet".to_string(), + ControlFlow::Break(Err(eyre!( + "No block has been committed yet", ))) }; } @@ -568,8 +568,8 @@ where fn check_exit_conditions(&mut self) { if hints::unlikely(self.tasks.panic_flag.panicked()) { - self.state = DispatcherState::Errored(Error::Other( - "A worker thread panicked during the shielded sync".into(), + self.state = DispatcherState::Errored(eyre!( + "A worker thread panicked during the shielded sync".to_string(), )); } if matches!( @@ -580,7 +580,7 @@ where } if self.config.shutdown_signal.received() { self.config.fetched_tracker.message( - "Interrupt received, shutting down shielded sync".into(), + "Interrupt received, shutting down shielded sync".to_string(), ); self.state = DispatcherState::Interrupted; self.interrupt_flag.set(); @@ -692,7 +692,7 @@ where } /// Check if we can launch a new fetch task retry. - fn can_launch_new_fetch_retry(&mut self, error: Error) -> bool { + fn can_launch_new_fetch_retry(&mut self, error: eyre::Error) -> bool { if matches!( self.state, DispatcherState::Errored(_) | DispatcherState::Interrupted @@ -717,12 +717,14 @@ where let client = self.client.clone(); self.spawn_async(Box::pin(async move { Message::UpdateWitnessMap( - client.fetch_witness_map(height).await.map_err(|error| { - TaskError { + client + .fetch_witness_map(height) + .await + .wrap_err("Failed to fetch witness map") + .map_err(|error| TaskError { error, context: height, - } - }), + }), ) })); } @@ -734,12 +736,14 @@ where let client = self.client.clone(); self.spawn_async(Box::pin(async move { Message::UpdateCommitmentTree( - client.fetch_commitment_tree(height).await.map_err(|error| { - TaskError { + client + .fetch_commitment_tree(height) + .await + .wrap_err("Failed to fetch commitment tree") + .map_err(|error| TaskError { error, context: height, - } - }), + }), ) })); } @@ -751,12 +755,14 @@ where let client = self.client.clone(); self.spawn_async(Box::pin(async move { Message::UpdateNotesMap( - client.fetch_note_index(height).await.map_err(|error| { - TaskError { + client + .fetch_note_index(height) + .await + .wrap_err("Failed to fetch note index") + .map_err(|error| TaskError { error, context: height, - } - }), + }), ) })); } @@ -772,8 +778,9 @@ where client .fetch_shielded_transfers(from, to) .await + .wrap_err("Failed to fetch shielded transfers") .map_err(|error| TaskError { - error, + error: eyre!("{error}"), context: [from, to], }) .map(|batch| (from, to, batch)), @@ -863,21 +870,21 @@ mod dispatcher_tests { use futures::join; use namada_core::chain::BlockHeight; + use namada_core::control_flow::testing::shutdown_signal; use namada_core::storage::TxIndex; + use namada_core::task_env::TaskEnvironment; + use namada_io::DevNullProgressBar; use namada_tx::IndexedTx; + use namada_wallet::StoredKeypair; use tempfile::tempdir; use super::*; - use crate::control_flow::testing::shutdown_signal; - use crate::io::DevNullProgressBar; use crate::masp::fs::FsShieldedUtils; use crate::masp::test_utils::{ arbitrary_masp_tx, arbitrary_masp_tx_with_fee_unshielding, arbitrary_vk, dated_arbitrary_vk, TestingMaspClient, }; use crate::masp::{MaspLocalTaskEnv, ShieldedSyncConfig}; - use crate::task_env::TaskEnvironment; - use crate::wallet::StoredKeypair; #[tokio::test] async fn test_applying_cache_drains_decrypted_data() { @@ -1015,7 +1022,7 @@ mod dispatcher_tests { dispatcher.spawn_async(Box::pin(async move { barrier.wait().await; Message::UpdateWitnessMap(Err(TaskError { - error: Error::Other("Test".to_string()), + error: eyre!("Test"), context: BlockHeight::first(), })) })); @@ -1074,12 +1081,10 @@ mod dispatcher_tests { fut, }; - let Err(Error::Other(ref msg)) = res else { - panic!("Test failed") - }; + let Err(msg) = res else { panic!("Test failed") }; assert_eq!( - msg, + msg.to_string(), "A worker thread panicked during the shielded sync", ); }) @@ -1168,9 +1173,9 @@ mod dispatcher_tests { let result = dispatcher.run(None, None, &[], &[vk]).await; match result { - Err(Error::Other(msg)) => assert_eq!( - msg.as_str(), - "After retrying, could not fetch all MASP txs." + Err(msg) => assert_eq!( + msg.to_string(), + "Failed to fetch shielded transfers" ), other => { panic!("{:?} does not match Error::Other(_)", other) @@ -1283,9 +1288,9 @@ mod dispatcher_tests { masp_tx_sender.send(None).expect("Test failed"); let result = dispatcher.run(None, None, &[], &[vk]).await; match result { - Err(Error::Other(msg)) => assert_eq!( - msg.as_str(), - "After retrying, could not fetch all MASP txs." + Err(msg) => assert_eq!( + msg.to_string(), + "Failed to fetch shielded transfers" ), other => { panic!("{:?} does not match Error::Other(_)", other) diff --git a/crates/sdk/src/masp/shielded_sync/mod.rs b/crates/shielded_token/src/masp/shielded_sync/mod.rs similarity index 93% rename from crates/sdk/src/masp/shielded_sync/mod.rs rename to crates/shielded_token/src/masp/shielded_sync/mod.rs index b431f3666e..993488665c 100644 --- a/crates/sdk/src/masp/shielded_sync/mod.rs +++ b/crates/shielded_token/src/masp/shielded_sync/mod.rs @@ -1,25 +1,27 @@ use std::collections::BTreeMap; +#[cfg(not(target_family = "wasm"))] use std::future::Future; use std::ops::ControlFlow; +#[cfg(not(target_family = "wasm"))] +use eyre::eyre; use masp_primitives::sapling::note_encryption::{ try_sapling_note_decryption, PreparedIncomingViewingKey, }; use masp_primitives::sapling::ViewingKey; use masp_primitives::transaction::components::OutputDescription; use masp_primitives::transaction::{Authorization, Authorized, Transaction}; +#[cfg(not(target_family = "wasm"))] +use namada_core::task_env::{ + LocalSetSpawner, LocalSetTaskEnvironment, TaskEnvironment, +}; +use namada_io::{MaybeSend, MaybeSync}; use typed_builder::TypedBuilder; use super::shielded_sync::utils::{MaspClient, RetryStrategy}; -use crate::error::Error; use crate::masp::shielded_sync::dispatcher::Dispatcher; use crate::masp::utils::DecryptedData; use crate::masp::{ShieldedUtils, NETWORK}; -#[cfg(not(target_family = "wasm"))] -use crate::task_env::{ - LocalSetSpawner, LocalSetTaskEnvironment, TaskEnvironment, -}; -use crate::{MaybeSend, MaybeSync}; pub mod dispatcher; pub mod utils; @@ -52,16 +54,15 @@ pub struct ShieldedSyncConfig { pub struct MaspLocalTaskEnv(LocalSetTaskEnvironment); #[cfg(not(target_family = "wasm"))] +#[cfg(feature = "std")] impl MaspLocalTaskEnv { /// create a new [`MaspLocalTaskEnv`] - pub fn new(num_threads: usize) -> Result { + pub fn new(num_threads: usize) -> Result { let pool = rayon::ThreadPoolBuilder::new() .num_threads(num_threads) .panic_handler(|_| {}) .build() - .map_err(|err| { - Error::Other(format!("Failed to create thread pool: {err}")) - })?; + .map_err(|err| eyre!("Failed to create thread pool: {err}"))?; Ok(Self(LocalSetTaskEnvironment::new(pool))) } } diff --git a/crates/shielded_token/src/masp/shielded_sync/utils.rs b/crates/shielded_token/src/masp/shielded_sync/utils.rs new file mode 100644 index 0000000000..138700da6b --- /dev/null +++ b/crates/shielded_token/src/masp/shielded_sync/utils.rs @@ -0,0 +1,542 @@ +//! Helper functions and types +use std::collections::BTreeMap; + +use borsh::{BorshDeserialize, BorshSerialize}; +use masp_primitives::memo::MemoBytes; +use masp_primitives::merkle_tree::{CommitmentTree, IncrementalWitness}; +use masp_primitives::sapling::{Node, Note, PaymentAddress, ViewingKey}; +use masp_primitives::transaction::Transaction; +use namada_core::chain::BlockHeight; +use namada_core::collections::HashMap; +use namada_tx::{IndexedTx, IndexedTxRange}; + +/// Type alias for convenience and profit +pub type IndexedNoteData = BTreeMap>; + +/// Type alias for the entries of [`IndexedNoteData`] iterators +pub type IndexedNoteEntry = (IndexedTx, Vec); + +/// Borrowed version of an [`IndexedNoteEntry`] +pub type IndexedNoteEntryRefs<'a> = (&'a IndexedTx, &'a Vec); + +/// Type alias for a successful note decryption. +pub type DecryptedData = (Note, PaymentAddress, MemoBytes); + +/// Cache of decrypted notes. +#[derive(Default, BorshSerialize, BorshDeserialize)] +pub struct TrialDecrypted { + inner: + HashMap>>, +} + +impl TrialDecrypted { + /// Returns the number of successful trial decryptions in cache. + pub fn successful_decryptions(&self) -> usize { + self.inner + .values() + .flat_map(|viewing_keys_to_notes| viewing_keys_to_notes.values()) + .map(|decrypted_notes| decrypted_notes.len()) + .sum::() + } + + /// Get cached notes decrypted with `vk`, indexed at `itx`. + pub fn get( + &self, + itx: &IndexedTx, + vk: &ViewingKey, + ) -> Option<&BTreeMap> { + self.inner.get(itx).and_then(|h| h.get(vk)) + } + + /// Take cached notes decrypted with `vk`, indexed at `itx`. + pub fn take( + &mut self, + itx: &IndexedTx, + vk: &ViewingKey, + ) -> Option> { + let (notes, no_more_notes) = { + let viewing_keys_to_notes = self.inner.get_mut(itx)?; + let notes = viewing_keys_to_notes.swap_remove(vk)?; + (notes, viewing_keys_to_notes.is_empty()) + }; + if no_more_notes { + self.inner.swap_remove(itx); + } + Some(notes) + } + + /// Cache `notes` decrypted with `vk`, indexed at `itx`. + pub fn insert( + &mut self, + itx: IndexedTx, + vk: ViewingKey, + notes: BTreeMap, + ) { + self.inner.entry(itx).or_default().insert(vk, notes); + } + + /// Check if empty + pub fn is_empty(&self) -> bool { + self.inner.is_empty() + } +} + +/// A cache of fetched indexed transactions. +/// +/// An invariant that shielded-sync maintains is that +/// this cache either contains all transactions from +/// a given height, or none. +#[derive(Debug, Default, Clone, BorshSerialize, BorshDeserialize)] +pub struct Fetched { + pub(crate) txs: IndexedNoteData, +} + +impl Fetched { + /// Append elements to the cache from an iterator. + pub fn extend(&mut self, items: I) + where + I: IntoIterator, + { + self.txs.extend(items); + } + + /// Iterates over the fetched transactions in the order + /// they appear in blocks. + pub fn iter( + &self, + ) -> impl IntoIterator> + '_ { + &self.txs + } + + /// Iterates over the fetched transactions in the order + /// they appear in blocks, whilst taking ownership of + /// the returned data. + pub fn take(&mut self) -> impl IntoIterator { + std::mem::take(&mut self.txs) + } + + /// Add a single entry to the cache. + pub fn insert(&mut self, (k, v): IndexedNoteEntry) { + self.txs.insert(k, v); + } + + /// Check if this cache has already been populated for a given + /// block height. + pub fn contains_height(&self, height: BlockHeight) -> bool { + self.txs + .range(IndexedTxRange::with_height(height)) + .next() + .is_some() + } + + /// Check if empty + pub fn is_empty(&self) -> bool { + self.txs.is_empty() + } + + /// Check the length of the fetched cache + pub fn len(&self) -> usize { + self.txs.len() + } +} + +impl IntoIterator for Fetched { + type IntoIter = ::IntoIter; + type Item = IndexedNoteEntry; + + fn into_iter(self) -> Self::IntoIter { + self.txs.into_iter() + } +} + +/// When retrying to fetch all notes in a +/// loop, this dictates the strategy for +/// how many attempts should be made. +#[derive(Debug, Copy, Clone)] +pub enum RetryStrategy { + /// Always retry + Forever, + /// Limit number of retries to a fixed number + Times(u64), +} + +impl RetryStrategy { + /// Check if retries are exhausted. + pub fn may_retry(&mut self) -> bool { + match self { + RetryStrategy::Forever => true, + RetryStrategy::Times(left) => { + if *left == 0 { + false + } else { + *left -= 1; + true + } + } + } + } +} + +/// Enumerates the capabilities of a [`MaspClient`] implementation. +#[derive(Copy, Clone, Eq, PartialEq, Debug)] +pub enum MaspClientCapabilities { + /// The masp client implementation is only capable of fetching shielded + /// transfers. + OnlyTransfers, + /// The masp client implementation is capable of not only fetching shielded + /// transfers, but also of fetching commitment trees, witness maps, and + /// note maps. + AllData, +} + +impl MaspClientCapabilities { + /// Check if the lack of one or more capabilities in the + /// masp client implementation warrants a manual update + /// of the witnesses map. + pub const fn needs_witness_map_update(&self) -> bool { + matches!(self, Self::OnlyTransfers) + } + + /// Check if the masp client is able to fetch a pre-built + /// commitment tree. + pub const fn may_fetch_pre_built_tree(&self) -> bool { + matches!(self, Self::AllData) + } + + /// Check if the masp client is able to fetch a pre-built + /// notes index. + pub const fn may_fetch_pre_built_notes_index(&self) -> bool { + matches!(self, Self::AllData) + } + + /// Check if the masp client is able to fetch a pre-built + /// witness map. + pub const fn may_fetch_pre_built_witness_map(&self) -> bool { + matches!(self, Self::AllData) + } +} + +/// This abstracts away the implementation details +/// of how shielded-sync fetches the necessary data +/// from a remote server. +pub trait MaspClient: Clone { + /// Error type returned by the methods of this trait + type Error: std::error::Error + Send + Sync + 'static; + + /// Return the last block height we can retrieve data from. + #[allow(async_fn_in_trait)] + async fn last_block_height( + &self, + ) -> Result, Self::Error>; + + /// Fetch shielded transfers from blocks heights in the range `[from, to]`, + /// keeping track of progress through `progress`. The fetched transfers + /// are sent over to a separate worker through `tx_sender`. + #[allow(async_fn_in_trait)] + async fn fetch_shielded_transfers( + &self, + from: BlockHeight, + to: BlockHeight, + ) -> Result, Self::Error>; + + /// Return the capabilities of this client. + fn capabilities(&self) -> MaspClientCapabilities; + + /// Fetch the commitment tree of height `height`. + #[allow(async_fn_in_trait)] + async fn fetch_commitment_tree( + &self, + height: BlockHeight, + ) -> Result, Self::Error>; + + /// Fetch the tx notes map of height `height`. + #[allow(async_fn_in_trait)] + async fn fetch_note_index( + &self, + height: BlockHeight, + ) -> Result, Self::Error>; + + /// Fetch the witness map of height `height`. + #[allow(async_fn_in_trait)] + async fn fetch_witness_map( + &self, + height: BlockHeight, + ) -> Result>, Self::Error>; +} + +/// Given a block height range we wish to request and a cache of fetched block +/// heights, returns the set of sub-ranges we need to request so that all blocks +/// in the inclusive range `[from, to]` get cached. +pub fn blocks_left_to_fetch( + from: BlockHeight, + to: BlockHeight, + fetched: &Fetched, +) -> Vec<[BlockHeight; 2]> { + const ZERO: BlockHeight = BlockHeight(0); + + if from > to { + panic!("Empty range passed to `blocks_left_to_fetch`, [{from}, {to}]"); + } + if from == ZERO || to == ZERO { + panic!("Block height values start at 1"); + } + + let mut to_fetch = Vec::with_capacity((to.0 - from.0 + 1) as usize); + let mut current_from = from; + let mut need_to_fetch = true; + + for height in (from.0..=to.0).map(BlockHeight) { + let height_in_cache = fetched.contains_height(height); + + // cross an upper gap boundary + if need_to_fetch && height_in_cache { + if height > current_from { + to_fetch.push([ + current_from, + height.checked_sub(1).expect("Height is greater than zero"), + ]); + } + need_to_fetch = false; + } else if !need_to_fetch && !height_in_cache { + // cross a lower gap boundary + current_from = height; + need_to_fetch = true; + } + } + if need_to_fetch { + to_fetch.push([current_from, to]); + } + to_fetch +} + +#[cfg(test)] +mod test_blocks_left_to_fetch { + use namada_state::TxIndex; + use proptest::prelude::*; + + use super::*; + + struct ArbRange { + max_from: u64, + max_len: u64, + } + + impl Default for ArbRange { + fn default() -> Self { + Self { + max_from: u64::MAX, + max_len: 1000, + } + } + } + + fn fetched_cache_with_blocks( + blocks_in_cache: impl IntoIterator, + ) -> Fetched { + let txs = blocks_in_cache + .into_iter() + .map(|height| { + ( + IndexedTx { + height, + index: TxIndex(0), + }, + vec![], + ) + }) + .collect(); + Fetched { txs } + } + + fn blocks_in_range( + from: BlockHeight, + to: BlockHeight, + ) -> impl Iterator { + (from.0..=to.0).map(BlockHeight) + } + + prop_compose! { + fn arb_block_range(ArbRange { max_from, max_len }: ArbRange) + ( + from in 1u64..=max_from, + ) + ( + from in Just(from), + to in from..from.saturating_add(max_len) + ) + -> (BlockHeight, BlockHeight) + { + (BlockHeight(from), BlockHeight(to)) + } + } + + proptest! { + #[test] + fn test_empty_cache_with_singleton_output((from, to) in arb_block_range(ArbRange::default())) { + let empty_cache = fetched_cache_with_blocks([]); + + let &[[returned_from, returned_to]] = blocks_left_to_fetch( + from, + to, + &empty_cache, + ) + .as_slice() else { + return Err(TestCaseError::Fail("Test failed".into())); + }; + + prop_assert_eq!(returned_from, from); + prop_assert_eq!(returned_to, to); + } + + #[test] + fn test_non_empty_cache_with_empty_output((from, to) in arb_block_range(ArbRange::default())) { + let cache = fetched_cache_with_blocks( + blocks_in_range(from, to) + ); + + let &[] = blocks_left_to_fetch( + from, + to, + &cache, + ) + .as_slice() else { + return Err(TestCaseError::Fail("Test failed".into())); + }; + } + + #[test] + fn test_non_empty_cache_with_singleton_input_and_maybe_singleton_output( + (from, to) in arb_block_range(ArbRange::default()), + block_height in 1u64..1000, + ) { + test_non_empty_cache_with_singleton_input_and_maybe_singleton_output_inner( + from, + to, + BlockHeight(block_height), + )?; + } + + #[test] + fn test_non_empty_cache_with_singleton_hole_and_singleton_output( + (first_from, first_to) in + arb_block_range(ArbRange { + max_from: 1_000_000, + max_len: 1000, + }), + ) { + // [from, to], [to + 2, 2 * to - from + 2] + + let hole = first_to + 1; + let second_from = BlockHeight(first_to.0 + 2); + let second_to = BlockHeight(2 * first_to.0 - first_from.0 + 2); + + let cache = fetched_cache_with_blocks( + blocks_in_range(first_from, first_to) + .chain(blocks_in_range(second_from, second_to)), + ); + + let &[[returned_from, returned_to]] = blocks_left_to_fetch( + first_from, + second_to, + &cache, + ) + .as_slice() else { + return Err(TestCaseError::Fail("Test failed".into())); + }; + + prop_assert_eq!(returned_from, hole); + prop_assert_eq!(returned_to, hole); + } + } + + fn test_non_empty_cache_with_singleton_input_and_maybe_singleton_output_inner( + from: BlockHeight, + to: BlockHeight, + block_height: BlockHeight, + ) -> Result<(), TestCaseError> { + let cache = fetched_cache_with_blocks(blocks_in_range(from, to)); + + if block_height >= from && block_height <= to { + // random height is inside the range of txs in cache + + let &[] = blocks_left_to_fetch(block_height, block_height, &cache) + .as_slice() + else { + return Err(TestCaseError::Fail("Test failed".into())); + }; + } else { + // random height is outside the range of txs in cache + + let &[[returned_from, returned_to]] = + blocks_left_to_fetch(block_height, block_height, &cache) + .as_slice() + else { + return Err(TestCaseError::Fail("Test failed".into())); + }; + + prop_assert_eq!(returned_from, block_height); + prop_assert_eq!(returned_to, block_height); + } + + Ok(()) + } + + #[test] + fn test_happy_flow() { + let cache = fetched_cache_with_blocks([ + BlockHeight(1), + BlockHeight(5), + BlockHeight(6), + BlockHeight(8), + BlockHeight(11), + ]); + + let from = BlockHeight(1); + let to = BlockHeight(10); + + let blocks_to_fetch = blocks_left_to_fetch(from, to, &cache); + assert_eq!( + &blocks_to_fetch, + &[ + [BlockHeight(2), BlockHeight(4)], + [BlockHeight(7), BlockHeight(7)], + [BlockHeight(9), BlockHeight(10)], + ], + ); + } + + #[test] + fn test_endpoint_cases() { + let cache = + fetched_cache_with_blocks(blocks_in_range(2.into(), 4.into())); + let blocks_to_fetch = blocks_left_to_fetch(1.into(), 3.into(), &cache); + assert_eq!(&blocks_to_fetch, &[[BlockHeight(1), BlockHeight(1)]]); + + // ------------- + + let cache = + fetched_cache_with_blocks(blocks_in_range(1.into(), 3.into())); + let blocks_to_fetch = blocks_left_to_fetch(2.into(), 4.into(), &cache); + assert_eq!(&blocks_to_fetch, &[[BlockHeight(4), BlockHeight(4)]]); + + // ------------- + + let cache = + fetched_cache_with_blocks(blocks_in_range(2.into(), 4.into())); + let blocks_to_fetch = blocks_left_to_fetch(1.into(), 5.into(), &cache); + assert_eq!( + &blocks_to_fetch, + &[ + [BlockHeight(1), BlockHeight(1)], + [BlockHeight(5), BlockHeight(5)], + ], + ); + + // ------------- + + let cache = + fetched_cache_with_blocks(blocks_in_range(1.into(), 5.into())); + let blocks_to_fetch = blocks_left_to_fetch(2.into(), 4.into(), &cache); + assert!(blocks_to_fetch.is_empty()); + } +} diff --git a/crates/shielded_token/src/masp/shielded_wallet.rs b/crates/shielded_token/src/masp/shielded_wallet.rs new file mode 100644 index 0000000000..b51fb652de --- /dev/null +++ b/crates/shielded_token/src/masp/shielded_wallet.rs @@ -0,0 +1,1822 @@ +//! The shielded wallet implementation +use std::cmp::Ordering; +use std::collections::{btree_map, BTreeMap, BTreeSet}; + +use eyre::eyre; +use masp_primitives::asset_type::AssetType; +#[cfg(feature = "mainnet")] +use masp_primitives::consensus::MainNetwork as Network; +#[cfg(not(feature = "mainnet"))] +use masp_primitives::consensus::TestNetwork as Network; +use masp_primitives::convert::AllowedConversion; +use masp_primitives::ff::PrimeField; +use masp_primitives::memo::MemoBytes; +use masp_primitives::merkle_tree::{ + CommitmentTree, IncrementalWitness, MerklePath, +}; +use masp_primitives::sapling::{ + Diversifier, Node, Note, Nullifier, ViewingKey, +}; +use masp_primitives::transaction::builder::Builder; +use masp_primitives::transaction::components::sapling::builder::RngBuildParams; +use masp_primitives::transaction::components::{ + I128Sum, TxOut, U64Sum, ValueSum, +}; +use masp_primitives::transaction::fees::fixed::FeeRule; +use masp_primitives::transaction::{builder, Transaction}; +use masp_primitives::zip32::ExtendedSpendingKey as MaspExtendedSpendingKey; +use namada_core::address::Address; +use namada_core::arith::checked; +use namada_core::borsh::{BorshDeserialize, BorshSerialize}; +use namada_core::chain::BlockHeight; +use namada_core::collections::{HashMap, HashSet}; +use namada_core::control_flow; +use namada_core::masp::{ + encode_asset_type, AssetData, MaspEpoch, TransferSource, TransferTarget, +}; +use namada_core::task_env::TaskEnvironment; +use namada_core::time::{DateTimeUtc, DurationSecs}; +use namada_core::token::{ + Amount, Change, DenominatedAmount, Denomination, MaspDigitPos, +}; +use namada_io::client::Client; +use namada_io::{ + display_line, edisplay_line, Io, MaybeSend, MaybeSync, NamadaIo, + ProgressBar, +}; +use namada_tx::IndexedTx; +use namada_wallet::{DatedKeypair, DatedSpendingKey}; +use rand::prelude::StdRng; +use rand_core::{OsRng, SeedableRng}; + +use crate::masp::utils::MaspClient; +use crate::masp::{ + cloned_pair, is_amount_required, to_viewing_key, Changes, + ContextSyncStatus, Conversions, MaspAmount, MaspDataLog, MaspFeeData, + MaspSourceTransferData, MaspTargetTransferData, MaspTransferData, + MaspTxReorderedData, NoteIndex, ShieldedSyncConfig, ShieldedTransfer, + ShieldedUtils, SpentNotesTracker, TransferErr, WalletMap, WitnessMap, + NETWORK, +}; +#[cfg(any(test, feature = "testing"))] +use crate::masp::{testing, ENV_VAR_MASP_TEST_SEED}; + +/// Represents the current state of the shielded pool from the perspective of +/// the chosen viewing keys. +#[derive(BorshSerialize, BorshDeserialize, Debug)] +pub struct ShieldedWallet { + /// Location where this shielded context is saved + #[borsh(skip)] + pub utils: U, + /// The commitment tree produced by scanning all transactions up to tx_pos + pub tree: CommitmentTree, + /// Maps viewing keys to the block height to which they are synced. + /// In particular, the height given by the value *has been scanned*. + pub vk_heights: BTreeMap>, + /// Maps viewing keys to applicable note positions + pub pos_map: HashMap>, + /// Maps a nullifier to the note position to which it applies + pub nf_map: HashMap, + /// Maps note positions to their corresponding notes + pub note_map: HashMap, + /// Maps note positions to their corresponding memos + pub memo_map: HashMap, + /// Maps note positions to the diversifier of their payment address + pub div_map: HashMap, + /// Maps note positions to their witness (used to make merkle paths) + pub witness_map: WitnessMap, + /// The set of note positions that have been spent + pub spents: HashSet, + /// Maps asset types to their decodings + pub asset_types: HashMap, + /// Maps note positions to their corresponding viewing keys + pub vk_map: HashMap, + /// Maps a shielded tx to the index of its first output note. + pub note_index: NoteIndex, + /// The sync state of the context + pub sync_status: ContextSyncStatus, +} + +/// Default implementation to ease construction of TxContexts. Derive cannot be +/// used here due to CommitmentTree not implementing Default. +impl Default for ShieldedWallet { + fn default() -> ShieldedWallet { + ShieldedWallet:: { + utils: U::default(), + vk_heights: BTreeMap::new(), + note_index: BTreeMap::default(), + tree: CommitmentTree::empty(), + pos_map: HashMap::default(), + nf_map: HashMap::default(), + note_map: HashMap::default(), + memo_map: HashMap::default(), + div_map: HashMap::default(), + witness_map: HashMap::default(), + spents: HashSet::default(), + asset_types: HashMap::default(), + vk_map: HashMap::default(), + sync_status: ContextSyncStatus::Confirmed, + } + } +} + +impl ShieldedWallet { + /// Try to load the last saved shielded context from the given context + /// directory. If this fails, then leave the current context unchanged. + pub async fn load(&mut self) -> std::io::Result<()> { + self.utils.clone().load(self, false).await + } + + /// Try to load the last saved confirmed shielded context from the given + /// context directory. If this fails, then leave the current context + /// unchanged. + pub async fn load_confirmed(&mut self) -> std::io::Result<()> { + self.utils.clone().load(self, true).await?; + + Ok(()) + } + + /// Save this shielded context into its associated context directory. If the + /// state to be saved is confirmed than also delete the speculative one (if + /// available) + pub async fn save(&self) -> std::io::Result<()> { + self.utils.save(self).await + } + + /// Update the merkle tree of witnesses the first time we + /// scan new MASP transactions. + pub(crate) fn update_witness_map( + &mut self, + indexed_tx: IndexedTx, + shielded: &[Transaction], + ) -> Result<(), eyre::Error> { + let mut note_pos = self.tree.size(); + self.note_index.insert(indexed_tx, note_pos); + + for tx in shielded { + for so in + tx.sapling_bundle().map_or(&vec![], |x| &x.shielded_outputs) + { + // Create merkle tree leaf node from note commitment + let node = Node::new(so.cmu.to_repr()); + // Update each merkle tree in the witness map with the latest + // addition + for (_, witness) in self.witness_map.iter_mut() { + witness.append(node).map_err(|()| { + eyre!("note commitment tree is full".to_string()) + })?; + } + self.tree.append(node).map_err(|()| { + eyre!("note commitment tree is full".to_string()) + })?; + // Finally, make it easier to construct merkle paths to this new + // note + let witness = IncrementalWitness::::from_tree(&self.tree); + self.witness_map.insert(note_pos, witness); + note_pos = checked!(note_pos + 1).unwrap(); + } + } + Ok(()) + } + + /// Sync the current state of the multi-asset shielded pool in a + /// ShieldedContext with the state on-chain. + pub async fn sync( + &mut self, + env: impl TaskEnvironment, + config: ShieldedSyncConfig, + last_query_height: Option, + sks: &[DatedSpendingKey], + fvks: &[DatedKeypair], + ) -> Result<(), eyre::Error> + where + M: MaspClient + Send + Sync + Unpin + 'static, + T: ProgressBar, + I: control_flow::ShutdownSignal, + { + env.run(|spawner| async move { + let dispatcher = config.dispatcher(spawner, &self.utils).await; + + if let Some(updated_ctx) = + dispatcher.run(None, last_query_height, sks, fvks).await? + { + *self = updated_ctx; + } + + Ok(()) + }) + .await + } + + pub(crate) fn min_height_to_sync_from( + &self, + ) -> Result { + let Some(maybe_least_synced_vk_height) = + self.vk_heights.values().min().cloned() + else { + return Err(eyre!( + "No viewing keys are available in the shielded context to \ + decrypt notes with" + .to_string(), + )); + }; + Ok(maybe_least_synced_vk_height + .map_or_else(BlockHeight::first, |itx| itx.height)) + } + + #[allow(missing_docs)] + pub fn save_decrypted_shielded_outputs( + &mut self, + vk: &ViewingKey, + note_pos: usize, + note: Note, + pa: masp_primitives::sapling::PaymentAddress, + memo: MemoBytes, + ) -> Result<(), eyre::Error> { + // Add this note to list of notes decrypted by this + // viewing key + self.pos_map.entry(*vk).or_default().insert(note_pos); + // Compute the nullifier now to quickly recognize when + // spent + let nf = note.nf( + &vk.nk, + note_pos + .try_into() + .map_err(|_| eyre!("Can not get nullifier".to_string()))?, + ); + self.note_map.insert(note_pos, note); + self.memo_map.insert(note_pos, memo); + // The payment address' diversifier is required to spend + // note + self.div_map.insert(note_pos, *pa.diversifier()); + self.nf_map.insert(nf, note_pos); + self.vk_map.insert(note_pos, *vk); + Ok(()) + } + + #[allow(missing_docs)] + pub fn save_shielded_spends(&mut self, transactions: &[Transaction]) { + for stx in transactions { + for ss in + stx.sapling_bundle().map_or(&vec![], |x| &x.shielded_spends) + { + // If the shielded spend's nullifier is in our map, then target + // note is rendered unusable + if let Some(note_pos) = self.nf_map.get(&ss.nullifier) { + self.spents.insert(*note_pos); + } + } + } + } + + /// Compute the total unspent notes associated with the viewing key in the + /// context. If the key is not in the context, then we do not know the + /// balance and hence we return None. + pub async fn compute_shielded_balance( + &mut self, + vk: &ViewingKey, + ) -> Result, eyre::Error> { + // Cannot query the balance of a key that's not in the map + if !self.pos_map.contains_key(vk) { + return Ok(None); + } + let mut val_acc = I128Sum::zero(); + // Retrieve the notes that can be spent by this key + if let Some(avail_notes) = self.pos_map.get(vk) { + for note_idx in avail_notes { + // Spent notes cannot contribute a new transaction's pool + if self.spents.contains(note_idx) { + continue; + } + // Get note associated with this ID + let note = self + .note_map + .get(note_idx) + .ok_or_else(|| eyre!("Unable to get note {note_idx}"))?; + // Finally add value to multi-asset accumulator + val_acc += I128Sum::from_nonnegative( + note.asset_type, + i128::from(note.value), + ) + .map_err(|()| { + eyre!("found note with invalid value or asset type") + })? + } + } + Ok(Some(val_acc)) + } + + /// Try to convert as much of the given asset type-value pair using the + /// given allowed conversion. usage is incremented by the amount of the + /// conversion used, the conversions are applied to the given input, and + /// the trace amount that could not be converted is moved from input to + /// output. + #[allow(clippy::too_many_arguments)] + async fn apply_conversion( + &mut self, + io: &impl Io, + conv: AllowedConversion, + asset_type: AssetType, + value: i128, + usage: &mut i128, + input: &mut I128Sum, + output: &mut I128Sum, + normed_asset_type: AssetType, + normed_output: &mut I128Sum, + ) -> Result<(), eyre::Error> { + // we do not need to convert negative values + if value <= 0 { + return Ok(()); + } + // If conversion if possible, accumulate the exchanged amount + let conv: I128Sum = I128Sum::from_sum(conv.into()); + // The amount required of current asset to qualify for conversion + let threshold = -conv[&asset_type]; + if threshold == 0 { + edisplay_line!( + io, + "Asset threshold of selected conversion for asset type {} is \ + 0, this is a bug, please report it.", + asset_type + ); + } + // We should use an amount of the AllowedConversion that almost + // cancels the original amount + let required = value / threshold; + // Forget about the trace amount left over because we cannot + // realize its value + let trace = I128Sum::from_pair(asset_type, value % threshold); + let normed_trace = + I128Sum::from_pair(normed_asset_type, value % threshold); + // Record how much more of the given conversion has been used + *usage += required; + // Apply the conversions to input and move the trace amount to output + *input += conv * required - trace.clone(); + *output += trace; + *normed_output += normed_trace; + Ok(()) + } + + /// Updates the internal state with the data of the newly generated + /// transaction. More specifically invalidate the spent notes, but do not + /// cache the newly produced output descriptions and therefore the merkle + /// tree + async fn pre_cache_transaction( + &mut self, + masp_txs: &[Transaction], + ) -> Result<(), eyre::Error> { + self.save_shielded_spends(masp_txs); + + // Save the speculative state for future usage + self.sync_status = ContextSyncStatus::Speculative; + self.save().await.map_err(|e| eyre!(e.to_string()))?; + + Ok(()) + } +} + +/// A trait that allows downstream types specify how a shielded wallet +/// should interact / query a node. +pub trait ShieldedQueries: + std::ops::Deref> + std::ops::DerefMut +{ + /// Get the address of the native token + #[allow(async_fn_in_trait)] + async fn query_native_token( + client: &C, + ) -> Result; + + /// Query the denomination of a token type + #[allow(async_fn_in_trait)] + async fn query_denom( + client: &C, + token: &Address, + ) -> Option; + + /// Query for converting assets across epochs + #[allow(async_fn_in_trait)] + async fn query_conversion( + client: &C, + asset_type: AssetType, + ) -> Option<( + Address, + Denomination, + MaspDigitPos, + MaspEpoch, + I128Sum, + MerklePath, + )>; + + /// Get the last block height + #[allow(async_fn_in_trait)] + async fn query_block( + client: &C, + ) -> Result, eyre::Error>; + + /// Get the upper limit on the time to make a new block + #[allow(async_fn_in_trait)] + async fn query_max_block_time_estimate( + client: &C, + ) -> Result; + + /// Query the MASP epoch + #[allow(async_fn_in_trait)] + async fn query_masp_epoch( + client: &C, + ) -> Result; +} + +/// The methods of the shielded wallet that depend on the [`ShieldedQueries`] +/// trait. These cannot be overridden downstream. +pub trait ShieldedApi: + ShieldedQueries +{ + /// Use the addresses already stored in the wallet to precompute as many + /// asset types as possible. + #[allow(async_fn_in_trait)] + async fn precompute_asset_types( + &mut self, + client: &C, + tokens: Vec<&Address>, + ) -> Result<(), eyre::Error> { + // To facilitate lookups of human-readable token names + for token in tokens { + let Some(denom) = Self::query_denom(client, token).await else { + return Err(eyre!("denomination for token {token}")); + }; + for position in MaspDigitPos::iter() { + let asset_type = + encode_asset_type(token.clone(), denom, position, None) + .map_err(|_| eyre!("unable to create asset type",))?; + self.asset_types.insert( + asset_type, + AssetData { + token: token.clone(), + denom, + position, + epoch: None, + }, + ); + } + } + Ok(()) + } + + /// Query the ledger for the decoding of the given asset type and cache it + /// if it is found. + #[allow(async_fn_in_trait)] + async fn decode_asset_type( + &mut self, + client: &C, + asset_type: AssetType, + ) -> Option { + // Try to find the decoding in the cache + if let decoded @ Some(_) = self.asset_types.get(&asset_type) { + return decoded.cloned(); + } + // Query for the ID of the last accepted transaction + let (token, denom, position, ep, _conv, _path): ( + Address, + Denomination, + MaspDigitPos, + _, + I128Sum, + MerklePath, + ) = Self::query_conversion(client, asset_type).await?; + let pre_asset_type = AssetData { + token, + denom, + position, + epoch: Some(ep), + }; + self.asset_types.insert(asset_type, pre_asset_type.clone()); + Some(pre_asset_type) + } + + /// Query the ledger for the conversion that is allowed for the given asset + /// type and cache it. + #[allow(async_fn_in_trait)] + async fn query_allowed_conversion<'a, C: Client + Sync>( + &'a mut self, + client: &C, + asset_type: AssetType, + conversions: &'a mut Conversions, + ) { + if let btree_map::Entry::Vacant(conv_entry) = + conversions.entry(asset_type) + { + // Query for the ID of the last accepted transaction + let Some((token, denom, position, ep, conv, path)) = + Self::query_conversion(client, asset_type).await + else { + return; + }; + self.asset_types.insert( + asset_type, + AssetData { + token, + denom, + position, + epoch: Some(ep), + }, + ); + // If the conversion is 0, then we just have a pure decoding + if !conv.is_zero() { + conv_entry.insert((conv.into(), path, 0)); + } + } + } + + /// Convert the given amount into the latest asset types whilst making a + /// note of the conversions that were used. Note that this function does + /// not assume that allowed conversions from the ledger are expressed in + /// terms of the latest asset types. + #[allow(async_fn_in_trait)] + async fn compute_exchanged_amount( + &mut self, + client: &(impl Client + Sync), + io: &impl Io, + mut input: I128Sum, + target_epoch: MaspEpoch, + mut conversions: Conversions, + ) -> Result<(I128Sum, I128Sum, Conversions), eyre::Error> { + // Where we will store our exchanged value + let mut output = I128Sum::zero(); + // Where we will store our normed exchanged value + let mut normed_output = I128Sum::zero(); + // Repeatedly exchange assets until it is no longer possible + while let Some((asset_type, value)) = + input.components().next().map(cloned_pair) + { + // Get the equivalent to the current asset in the target epoch and + // note whether this equivalent chronologically comes after the + // current asset + let (target_asset_type, forward_conversion) = self + .decode_asset_type(client, asset_type) + .await + .map(|mut pre_asset_type| { + let old_epoch = pre_asset_type.redate(target_epoch); + pre_asset_type + .encode() + .map(|asset_type| { + ( + asset_type, + old_epoch.map_or(false, |epoch| { + target_epoch >= epoch + }), + ) + }) + .map_err(|_| eyre!("unable to create asset type",)) + }) + .transpose()? + .unwrap_or((asset_type, false)); + let at_target_asset_type = target_asset_type == asset_type; + let trace_asset_type = if forward_conversion { + // If we are doing a forward conversion, then we can assume that + // the trace left over in the older epoch has at least a 1-to-1 + // conversion to the newer epoch. + target_asset_type + } else { + // If we are not doing a forward conversion, then we cannot + // lower bound what the asset type will be worth in the target + // asset type. So leave the asset type fixed. + asset_type + }; + // Fetch and store the required conversions + self.query_allowed_conversion( + client, + target_asset_type, + &mut conversions, + ) + .await; + self.query_allowed_conversion(client, asset_type, &mut conversions) + .await; + if let (Some((conv, _wit, usage)), false) = + (conversions.get_mut(&asset_type), at_target_asset_type) + { + display_line!( + io, + "converting current asset type to latest asset type..." + ); + // Not at the target asset type, not at the latest asset + // type. Apply conversion to get from + // current asset type to the latest + // asset type. + self.apply_conversion( + io, + conv.clone(), + asset_type, + value, + usage, + &mut input, + &mut output, + trace_asset_type, + &mut normed_output, + ) + .await?; + } else if let (Some((conv, _wit, usage)), false) = ( + conversions.get_mut(&target_asset_type), + at_target_asset_type, + ) { + display_line!( + io, + "converting latest asset type to target asset type..." + ); + // Not at the target asset type, yet at the latest asset + // type. Apply inverse conversion to get + // from latest asset type to the target + // asset type. + self.apply_conversion( + io, + conv.clone(), + asset_type, + value, + usage, + &mut input, + &mut output, + trace_asset_type, + &mut normed_output, + ) + .await?; + } else { + // At the target asset type. Then move component over to + // output. + let comp = input.project(asset_type); + output += comp.clone(); + normed_output += comp.clone(); + input -= comp; + } + } + Ok((output, normed_output, conversions)) + } + + /// Compute the total unspent notes associated with the viewing key in the + /// context and express that value in terms of the currently timestamped + /// asset types. If the key is not in the context, then we do not know the + /// balance and hence we return None. + #[allow(async_fn_in_trait)] + async fn compute_exchanged_balance( + &mut self, + client: &(impl Client + Sync), + io: &impl Io, + vk: &ViewingKey, + target_epoch: MaspEpoch, + ) -> Result, eyre::Error> { + // First get the unexchanged balance + if let Some(balance) = self.compute_shielded_balance(vk).await? { + let exchanged_amount = self + .compute_exchanged_amount( + client, + io, + balance, + target_epoch, + BTreeMap::new(), + ) + .await? + .0; + // And then exchange balance into current asset types + Ok(Some(exchanged_amount)) + } else { + Ok(None) + } + } + + /// Collect enough unspent notes in this context to exceed the given amount + /// of the specified asset type. Return the total value accumulated plus + /// notes and the corresponding diversifiers/merkle paths that were used to + /// achieve the total value. Updates the changes map. + #[allow(clippy::too_many_arguments)] + #[allow(async_fn_in_trait)] + async fn collect_unspent_notes( + &mut self, + context: &impl NamadaIo, + spent_notes: &mut SpentNotesTracker, + sk: namada_core::masp::ExtendedSpendingKey, + is_native_token: bool, + target: I128Sum, + target_epoch: MaspEpoch, + changes: &mut Changes, + ) -> Result< + ( + I128Sum, + Vec<(Diversifier, Note, MerklePath)>, + Conversions, + ), + eyre::Error, + > { + let vk = &to_viewing_key(&sk.into()).vk; + // TODO: we should try to use the smallest notes possible to fund the + // transaction to allow people to fetch less often + // Establish connection with which to do exchange rate queries + let mut conversions = BTreeMap::new(); + let mut val_acc = I128Sum::zero(); + let mut normed_val_acc = I128Sum::zero(); + let mut notes = Vec::new(); + + // Retrieve the notes that can be spent by this key + if let Some(avail_notes) = self.pos_map.get(vk).cloned() { + for note_idx in &avail_notes { + // Skip spend notes already used in this transaction + if spent_notes + .get(vk) + .is_some_and(|set| set.contains(note_idx)) + { + continue; + } + // No more transaction inputs are required once we have met + // the target amount + if normed_val_acc >= target { + break; + } + // Spent notes from the shielded context (i.e. from previous + // transactions) cannot contribute a new transaction's pool + if self.spents.contains(note_idx) { + continue; + } + // Get note, merkle path, diversifier associated with this ID + let note = *self + .note_map + .get(note_idx) + .ok_or_else(|| eyre!("Unable to get note {note_idx}"))?; + + // The amount contributed by this note before conversion + let pre_contr = + I128Sum::from_pair(note.asset_type, i128::from(note.value)); + let (contr, normed_contr, proposed_convs) = self + .compute_exchanged_amount( + context.client(), + context.io(), + pre_contr, + target_epoch, + conversions.clone(), + ) + .await?; + + let opt_delta = if is_native_token { + None + } else { + Some(contr.clone()) + }; + // Use this note only if it brings us closer to our target + if let Some(change) = is_amount_required( + normed_val_acc.clone(), + target.clone(), + normed_contr.clone(), + opt_delta, + ) { + // Be sure to record the conversions used in computing + // accumulated value + val_acc += contr; + normed_val_acc += normed_contr; + + // Update the changes + changes + .entry(sk) + .and_modify(|amt| *amt += &change) + .or_insert(change); + + // Commit the conversions that were used to exchange + conversions = proposed_convs; + let merkle_path = self + .witness_map + .get(note_idx) + .ok_or_else(|| eyre!("Unable to get note {note_idx}"))? + .path() + .ok_or_else(|| { + eyre!("Unable to get path: {}", line!()) + })?; + let diversifier = + self.div_map.get(note_idx).ok_or_else(|| { + eyre!("Unable to get note {note_idx}") + })?; + // Commit this note to our transaction + notes.push((*diversifier, note, merkle_path)); + // Append the note the list of used ones + spent_notes + .entry(vk.to_owned()) + .and_modify(|set| { + set.insert(*note_idx); + }) + .or_insert([*note_idx].into_iter().collect()); + } + } + } + Ok((val_acc, notes, conversions)) + } + + /// Convert an amount whose units are AssetTypes to one whose units are + /// Addresses that they decode to. All asset types not corresponding to + /// the given epoch are ignored. + #[allow(async_fn_in_trait)] + async fn decode_combine_sum_to_epoch( + &mut self, + client: &C, + amt: I128Sum, + target_epoch: MaspEpoch, + ) -> (ValueSum, I128Sum) { + let mut res = ValueSum::zero(); + let mut undecoded = ValueSum::zero(); + for (asset_type, val) in amt.components() { + // Decode the asset type + let decoded = self.decode_asset_type(client, *asset_type).await; + // Only assets with the target timestamp count + match decoded { + Some(pre_asset_type) + if pre_asset_type + .epoch + .map_or(true, |epoch| epoch <= target_epoch) => + { + let decoded_change = Change::from_masp_denominated( + *val, + pre_asset_type.position, + ) + .expect("expected this to fit"); + res += ValueSum::from_pair( + pre_asset_type.token, + decoded_change, + ); + } + None => { + undecoded += ValueSum::from_pair(*asset_type, *val); + } + _ => {} + } + } + (res, undecoded) + } + + /// Convert an amount whose units are AssetTypes to one whose units are + /// Addresses that they decode to and combine the denominations. + #[allow(async_fn_in_trait)] + async fn decode_combine_sum( + &mut self, + client: &C, + amt: I128Sum, + ) -> (MaspAmount, I128Sum) { + let mut res = MaspAmount::zero(); + let mut undecoded = ValueSum::zero(); + for (asset_type, val) in amt.components() { + // Decode the asset type + if let Some(decoded) = + self.decode_asset_type(client, *asset_type).await + { + let decoded_change = + Change::from_masp_denominated(*val, decoded.position) + .expect("expected this to fit"); + res += MaspAmount::from_pair( + (decoded.epoch, decoded.token), + decoded_change, + ); + } else { + undecoded += ValueSum::from_pair(*asset_type, *val); + } + } + (res, undecoded) + } + + /// Convert an amount whose units are AssetTypes to one whose units are + /// Addresses that they decode to. + #[allow(async_fn_in_trait)] + async fn decode_sum( + &mut self, + client: &C, + amt: I128Sum, + ) -> ValueSum<(AssetType, AssetData), i128> { + let mut res = ValueSum::zero(); + for (asset_type, val) in amt.components() { + // Decode the asset type + if let Some(decoded) = + self.decode_asset_type(client, *asset_type).await + { + res += ValueSum::from_pair((*asset_type, decoded), *val); + } + } + res + } + + /// Make shielded components to embed within a Transfer object. If no + /// shielded payment address nor spending key is specified, then no + /// shielded components are produced. Otherwise, a transaction containing + /// nullifiers and/or note commitments are produced. Dummy transparent + /// UTXOs are sometimes used to make transactions balanced, but it is + /// understood that transparent account changes are effected only by the + /// amounts and signatures specified by the containing Transfer object. + #[allow(async_fn_in_trait)] + async fn gen_shielded_transfer( + &mut self, + context: &impl NamadaIo, + data: Vec, + fee_data: Option, + expiration: Option, + update_ctx: bool, + ) -> Result, TransferErr> { + let last_block_height = Self::query_block(context.client()) + .await + .map_err(|e| TransferErr::General(e.to_string()))? + .unwrap_or(1); + let max_block_time = + Self::query_max_block_time_estimate(context.client()) + .await + .map_err(|e| TransferErr::General(e.to_string()))?; + // Determine epoch in which to submit potential shielded transaction + let epoch = Self::query_masp_epoch(context.client()) + .await + .map_err(|e| TransferErr::General(e.to_string()))?; + let native_token = Self::query_native_token(context.client()) + .await + .map_err(|e| TransferErr::General(e.to_string()))?; + // Try to get a seed from env var, if any. + #[allow(unused_mut)] + let mut rng = StdRng::from_rng(OsRng).unwrap(); + #[cfg(feature = "testing")] + let mut rng = if let Ok(seed) = std::env::var(ENV_VAR_MASP_TEST_SEED) + .map_err(|e| TransferErr::General(e.to_string())) + .and_then(|seed| { + let exp_str = + format!("Env var {ENV_VAR_MASP_TEST_SEED} must be a u64."); + let parsed_seed: u64 = + seed.parse().map_err(|_| TransferErr::General(exp_str))?; + Ok(parsed_seed) + }) { + tracing::warn!( + "UNSAFE: Using a seed from {ENV_VAR_MASP_TEST_SEED} env var \ + to build proofs." + ); + StdRng::seed_from_u64(seed) + } else { + rng + }; + + // TODO: if the user requested the default expiration, there might be a + // small discrepancy between the datetime we calculate here and the one + // we set for the transaction. This should be small enough to not cause + // any issue, in case refactor this function to request the precise + // datetime to the caller + let expiration_height: u32 = match expiration { + Some(expiration) => { + // Try to match a DateTime expiration with a plausible + // corresponding block height + #[allow(clippy::disallowed_methods)] + let current_time = DateTimeUtc::now(); + let delta_time = + expiration.0.signed_duration_since(current_time.0); + + let delta_blocks = u32::try_from( + delta_time.num_seconds() + / i64::try_from(max_block_time.0).unwrap(), + ) + .map_err(|e| TransferErr::General(e.to_string()))?; + u32::try_from(last_block_height) + .map_err(|e| TransferErr::General(e.to_string()))? + + delta_blocks + } + None => { + // NOTE: The masp library doesn't support optional + // expiration so we set the max to mimic + // a never-expiring tx. We also need to + // remove 20 which is going to be added back by the builder + u32::MAX - 20 + } + }; + let mut builder = Builder::::new( + NETWORK, + // NOTE: this is going to add 20 more blocks to the actual + // expiration but there's no other exposed function that we could + // use from the masp crate to specify the expiration better + expiration_height.into(), + ); + + let mut notes_tracker = SpentNotesTracker::new(); + { + // Load the current shielded context given + // the spending key we possess + let _ = self.load().await; + } + + let Some(MaspTxReorderedData { + source_data, + target_data, + mut denoms, + }) = Self::reorder_data_for_masp_transfer(context, data).await? + else { + // No shielded components are needed when neither source nor + // destination are shielded + return Ok(None); + }; + let mut changes = Changes::default(); + + for (MaspSourceTransferData { source, token }, amount) in &source_data { + self.add_inputs( + context, + &mut builder, + source, + token, + amount, + epoch, + &denoms, + &mut notes_tracker, + &mut changes, + *token == native_token, + ) + .await?; + } + + for ( + MaspTargetTransferData { + source, + target, + token, + }, + amount, + ) in target_data + { + self.add_outputs( + context, + &mut builder, + source, + &target, + token, + amount, + epoch, + &denoms, + ) + .await?; + } + + // Collect the fees if needed + if let Some(MaspFeeData { + sources, + target, + token, + amount, + }) = fee_data + { + self.add_fees( + context, + &mut builder, + &source_data, + sources, + &target, + &token, + &amount, + epoch, + &mut denoms, + &mut notes_tracker, + &mut changes, + ) + .await?; + } + + // Finally, add outputs representing the change from this payment. + Self::add_changes(&mut builder, changes)?; + + let builder_clone = builder.clone().map_builder(WalletMap); + // Build and return the constructed transaction + #[cfg(not(feature = "testing"))] + let prover = self.utils.local_tx_prover(); + #[cfg(feature = "testing")] + let prover = testing::MockTxProver(std::sync::Mutex::new(OsRng)); + let (masp_tx, metadata) = builder + .build( + &prover, + &FeeRule::non_standard(U64Sum::zero()), + &mut rng, + &mut RngBuildParams::new(OsRng), + ) + .map_err(|error| TransferErr::Build { error, data: None })?; + + if update_ctx { + self.pre_cache_transaction(std::slice::from_ref(&masp_tx)) + .await + .map_err(|e| TransferErr::General(e.to_string()))?; + } + + Ok(Some(ShieldedTransfer { + builder: builder_clone, + masp_tx, + metadata, + epoch, + })) + } + + /// Group all the information for every source/token and target/token + /// couple, and extract the denominations for all the tokens involved + /// (expect the one involved in the fees if needed). This step is + /// required so that we can collect the amount required for every couple + /// and pass it to the appropriate function so that notes can be + /// collected based on the correct amount. + #[allow(async_fn_in_trait)] + async fn reorder_data_for_masp_transfer( + context: &impl NamadaIo, + data: Vec, + ) -> Result, TransferErr> { + let mut source_data = + HashMap::::new(); + let mut target_data = + HashMap::::new(); + let mut denoms = HashMap::new(); + + for MaspTransferData { + source, + target, + token, + amount, + } in data + { + let spending_key = source.spending_key(); + let payment_address = target.payment_address(); + // No shielded components are needed when neither source nor + // destination are shielded + if spending_key.is_none() && payment_address.is_none() { + return Ok(None); + } + + if denoms.get(&token).is_none() { + if let Some(denom) = + Self::query_denom(context.client(), &token).await + { + denoms.insert(token.clone(), denom); + } else { + return Err(TransferErr::General(format!( + "denomination for token {token}" + ))); + }; + } + + let key = MaspSourceTransferData { + source: source.clone(), + token: token.clone(), + }; + match source_data.get_mut(&key) { + Some(prev_amount) => { + *prev_amount = checked!(prev_amount.to_owned() + amount) + .map_err(|e| TransferErr::General(e.to_string()))?; + } + None => { + source_data.insert(key, amount); + } + } + + let key = MaspTargetTransferData { + source, + target, + token, + }; + match target_data.get_mut(&key) { + Some(prev_amount) => { + *prev_amount = checked!(prev_amount.to_owned() + amount) + .map_err(|e| TransferErr::General(e.to_string()))?; + } + None => { + target_data.insert(key, amount); + } + } + } + + Ok(Some(MaspTxReorderedData { + source_data, + target_data, + denoms, + })) + } + + /// Add the necessary transaction inputs to the builder. + #[allow(async_fn_in_trait)] + #[allow(clippy::too_many_arguments)] + async fn add_inputs( + &mut self, + context: &impl NamadaIo, + builder: &mut Builder, + source: &TransferSource, + token: &Address, + amount: &DenominatedAmount, + epoch: MaspEpoch, + denoms: &HashMap, + notes_tracker: &mut SpentNotesTracker, + changes: &mut Changes, + is_native_token: bool, + ) -> Result, TransferErr> { + // We want to fund our transaction solely from supplied spending key + let spending_key = source.spending_key(); + + // Now we build up the transaction within this object + + // Convert transaction amount into MASP types + // Ok to unwrap because we've already seen the token before, the + // denomination must be there + let denom = denoms.get(token).unwrap(); + let (asset_types, masp_amount) = { + // Do the actual conversion to an asset type + let amount = self + .convert_namada_amount_to_masp( + context.client(), + epoch, + token, + denom.to_owned(), + amount.amount(), + ) + .await + .map_err(|e| TransferErr::General(e.to_string()))?; + // Make sure to save any decodings of the asset types used so + // that balance queries involving them are + // successful + let _ = self.save().await; + amount + }; + + // If there are shielded inputs + let added_amt = if let Some(sk) = spending_key { + // Locate unspent notes that can help us meet the transaction + // amount + let (added_amount, unspent_notes, used_convs) = self + .collect_unspent_notes( + context, + notes_tracker, + sk, + is_native_token, + I128Sum::from_sum(masp_amount), + epoch, + changes, + ) + .await + .map_err(|e| TransferErr::General(e.to_string()))?; + // Commit the notes found to our transaction + for (diversifier, note, merkle_path) in unspent_notes { + builder + .add_sapling_spend( + sk.into(), + diversifier, + note, + merkle_path, + ) + .map_err(|e| TransferErr::Build { + error: builder::Error::SaplingBuild(e), + data: None, + })?; + } + // Commit the conversion notes used during summation + for (conv, wit, value) in used_convs.values() { + if value.is_positive() { + builder + .add_sapling_convert( + conv.clone(), + *value as u64, + wit.clone(), + ) + .map_err(|e| TransferErr::Build { + error: builder::Error::SaplingBuild(e), + data: None, + })?; + } + } + + Some(added_amount) + } else { + // We add a dummy UTXO to our transaction, but only the source + // of the parent Transfer object is used to + // validate fund availability + let script = source + .t_addr_data() + .ok_or_else(|| { + TransferErr::General( + "source address should be transparent".into(), + ) + })? + .taddress(); + + for (digit, asset_type) in + MaspDigitPos::iter().zip(asset_types.iter()) + { + let amount_part = digit.denominate(&amount.amount()); + // Skip adding an input if its value is 0 + if amount_part != 0 { + builder + .add_transparent_input(TxOut { + asset_type: *asset_type, + value: amount_part, + address: script, + }) + .map_err(|e| TransferErr::Build { + error: builder::Error::TransparentBuild(e), + data: None, + })?; + } + } + + None + }; + + Ok(added_amt) + } + + /// Add the necessary transaction outputs to the builder + #[allow(clippy::too_many_arguments)] + #[allow(async_fn_in_trait)] + async fn add_outputs( + &mut self, + context: &impl NamadaIo, + builder: &mut Builder, + source: TransferSource, + target: &TransferTarget, + token: Address, + amount: DenominatedAmount, + epoch: MaspEpoch, + denoms: &HashMap, + ) -> Result<(), TransferErr> { + // Anotate the asset type in the value balance with its decoding in + // order to facilitate cross-epoch computations + let value_balance = self + .decode_sum(context.client(), builder.value_balance()) + .await; + + let payment_address = target.payment_address(); + + // This indicates how many more assets need to be sent to the + // receiver in order to satisfy the requested transfer + // amount. + let mut rem_amount = amount.amount().raw_amount().0; + + // Ok to unwrap cause we've already seen the token before, the + // denomination must be there + let denom = denoms.get(&token).unwrap(); + + // Now handle the outputs of this transaction + // Loop through the value balance components and see which + // ones can be given to the receiver + for ((asset_type, decoded), val) in value_balance.components() { + let rem_amount = &mut rem_amount[decoded.position as usize]; + // Only asset types with the correct token can contribute. But + // there must be a demonstrated need for it. + if decoded.token == token + && &decoded.denom == denom + && decoded.epoch.map_or(true, |vbal_epoch| vbal_epoch <= epoch) + && *rem_amount > 0 + { + let val = u128::try_from(*val).expect( + "value balance in absence of output descriptors should be \ + non-negative", + ); + // We want to take at most the remaining quota for the + // current denomination to the receiver + let contr = std::cmp::min(*rem_amount as u128, val) as u64; + // If we are sending to a shielded address, we need the outgoing + // viewing key in the following computations. + let ovk_opt = source + .spending_key() + .map(|x| MaspExtendedSpendingKey::from(x).expsk.ovk); + // Make transaction output tied to the current token, + // denomination, and epoch. + if let Some(pa) = payment_address { + // If there is a shielded output + builder + .add_sapling_output( + ovk_opt, + pa.into(), + *asset_type, + contr, + MemoBytes::empty(), + ) + .map_err(|e| TransferErr::Build { + error: builder::Error::SaplingBuild(e), + data: None, + })?; + } else if let Some(t_addr_data) = target.t_addr_data() { + // If there is a transparent output + builder + .add_transparent_output( + &t_addr_data.taddress(), + *asset_type, + contr, + ) + .map_err(|e| TransferErr::Build { + error: builder::Error::TransparentBuild(e), + data: None, + })?; + } else { + return Result::Err(TransferErr::General( + "transaction target must be a payment address or \ + Namada address or IBC address" + .to_string(), + )); + } + // Lower what is required of the remaining contribution + *rem_amount -= contr; + } + } + + // Nothing must remain to be included in output + if rem_amount != [0; 4] { + let (asset_types, _) = { + // Do the actual conversion to an asset type + let amount = self + .convert_namada_amount_to_masp( + context.client(), + epoch, + &token, + denom.to_owned(), + amount.amount(), + ) + .await + .map_err(|e| TransferErr::General(e.to_string()))?; + // Make sure to save any decodings of the asset types used so + // that balance queries involving them are + // successful + let _ = self.save().await; + amount + }; + + // Convert the shortfall into a I128Sum + let mut shortfall = I128Sum::zero(); + for (asset_type, val) in asset_types.iter().zip(rem_amount) { + shortfall += I128Sum::from_pair(*asset_type, val.into()); + } + // Return an insufficient funds error + return Result::Err(TransferErr::Build { + error: builder::Error::InsufficientFunds(shortfall), + data: Some(MaspDataLog { + source: Some(source), + token, + amount, + }), + }); + } + + Ok(()) + } + + /// Add the necessary note to include a masp fee payment in the transaction. + /// Funds are gathered in the following order: + /// + /// 1. From the residual values of the already included spend notes (i.e. + /// changes) + /// 2. From new spend notes of the transaction's sources + /// 3. From new spend notes of the optional gas spending keys + #[allow(clippy::too_many_arguments)] + #[allow(async_fn_in_trait)] + async fn add_fees( + &mut self, + context: &impl NamadaIo, + builder: &mut Builder, + source_data: &HashMap, + sources: Vec, + target: &Address, + token: &Address, + amount: &DenominatedAmount, + epoch: MaspEpoch, + denoms: &mut HashMap, + notes_tracker: &mut SpentNotesTracker, + changes: &mut Changes, + ) -> Result<(), TransferErr> { + if denoms.get(token).is_none() { + if let Some(denom) = + Self::query_denom(context.client(), token).await + { + denoms.insert(token.to_owned(), denom); + } else { + return Err(TransferErr::General(format!( + "denomination for token {token}" + ))); + }; + } + let native_token = Self::query_native_token(context.client()) + .await + .map_err(|e| TransferErr::General(e.to_string()))?; + let raw_amount = amount.amount().raw_amount().0; + let (asset_types, _) = { + // Do the actual conversion to an asset type + let (asset_types, amount) = self + .convert_namada_amount_to_masp( + context.client(), + epoch, + token, + // Safe to unwrap + denoms.get(token).unwrap().to_owned(), + amount.amount(), + ) + .await + .map_err(|e| TransferErr::General(e.to_string()))?; + // Make sure to save any decodings of the asset types used so + // that balance queries involving them are + // successful + let _ = self.save().await; + (asset_types, amount) + }; + + let mut fees = I128Sum::zero(); + // Convert the shortfall into a I128Sum + for (asset_type, val) in asset_types.iter().zip(raw_amount) { + fees += I128Sum::from_nonnegative(*asset_type, val.into()) + .map_err(|()| { + TransferErr::General( + "Fee amount is expected expected to be non-negative" + .to_string(), + ) + })?; + } + + // 1. Try to use the change to pay fees + let mut temp_changes = Changes::default(); + + for (sp, changes) in changes.iter() { + for (asset_type, change) in changes.components() { + for (_, fee_amt) in fees + .clone() + .components() + .filter(|(axt, _)| *axt == asset_type) + { + // Get the minimum between the available change and + // the due fee + let output_amt = I128Sum::from_nonnegative( + asset_type.to_owned(), + *change.min(fee_amt), + ) + .map_err(|()| { + TransferErr::General( + "Fee amount is expected to be non-negative" + .to_string(), + ) + })?; + let denominated_output_amt = self + .convert_masp_amount_to_namada( + context.client(), + // Safe to unwrap + denoms.get(token).unwrap().to_owned(), + output_amt.clone(), + ) + .await + .map_err(|e| TransferErr::General(e.to_string()))?; + + self.add_outputs( + context, + builder, + TransferSource::ExtendedSpendingKey(sp.to_owned()), + &TransferTarget::Address(target.clone()), + token.clone(), + denominated_output_amt, + epoch, + denoms, + ) + .await?; + + fees -= &output_amt; + // Update the changes + temp_changes + .entry(*sp) + .and_modify(|amt| *amt += &output_amt) + .or_insert(output_amt); + } + } + + if fees.is_zero() { + break; + } + } + + // Decrease the changes by the amounts used for fee payment + for (sp, temp_changes) in temp_changes.iter() { + for (asset_type, temp_change) in temp_changes.components() { + let output_amt = I128Sum::from_nonnegative( + asset_type.to_owned(), + *temp_change, + ) + .map_err(|()| { + TransferErr::General( + "Fee amount is expected expected to be non-negative" + .to_string(), + ) + })?; + + // Entry is guaranteed to be in the map + changes.entry(*sp).and_modify(|amt| *amt -= &output_amt); + } + } + + if !fees.is_zero() { + // 2. Look for unused spent notes of the sources and the optional + // gas spending keys (sources first) + for fee_source in + source_data.iter().map(|(src, _)| src.source.clone()).chain( + sources + .into_iter() + .map(TransferSource::ExtendedSpendingKey), + ) + { + for (asset_type, fee_amt) in fees.clone().components() { + let input_amt = I128Sum::from_nonnegative( + asset_type.to_owned(), + *fee_amt, + ) + .map_err(|()| { + TransferErr::General( + "Fee amount is expected expected to be \ + non-negative" + .to_string(), + ) + })?; + let denominated_fee = self + .convert_masp_amount_to_namada( + context.client(), + // Safe to unwrap + denoms.get(token).unwrap().to_owned(), + input_amt.clone(), + ) + .await + .map_err(|e| TransferErr::General(e.to_string()))?; + + let Some(found_amt) = self + .add_inputs( + context, + builder, + &fee_source, + token, + &denominated_fee, + epoch, + denoms, + notes_tracker, + changes, + *token == native_token, + ) + .await + .map_err(|e| TransferErr::General(e.to_string()))? + else { + continue; + }; + // Pick the minimum between the due fee and the amount found + let output_amt = match found_amt.partial_cmp(&input_amt) { + None | Some(Ordering::Less) => found_amt, + _ => input_amt.clone(), + }; + let denom_amt = self + .convert_masp_amount_to_namada( + context.client(), + // Safe to unwrap + denoms.get(token).unwrap().to_owned(), + output_amt.clone(), + ) + .await + .map_err(|e| TransferErr::General(e.to_string()))?; + + self.add_outputs( + context, + builder, + fee_source.clone(), + &TransferTarget::Address(target.clone()), + token.clone(), + denom_amt, + epoch, + denoms, + ) + .await + .map_err(|e| TransferErr::General(e.to_string()))?; + + fees -= &output_amt; + } + + if fees.is_zero() { + break; + } + } + } + + if !fees.is_zero() { + return Result::Err(TransferErr::Build { + error: builder::Error::InsufficientFunds(fees), + data: Some(MaspDataLog { + source: None, + token: token.to_owned(), + amount: *amount, + }), + }); + } + + Ok(()) + } + + /// Consumes the changes and adds them back to the original sources to + /// balance the transaction. This function has to be called after + /// `add_fees` because we might have some change coming from there too + #[allow(clippy::result_large_err)] + #[allow(async_fn_in_trait)] + fn add_changes( + builder: &mut Builder, + changes: Changes, + ) -> Result<(), TransferErr> { + for (sp, changes) in changes.into_iter() { + for (asset_type, amt) in changes.components() { + if let Ordering::Greater = amt.cmp(&0) { + let sk = MaspExtendedSpendingKey::from(sp.to_owned()); + // Send the change in this asset type back to the sender + builder + .add_sapling_output( + Some(sk.expsk.ovk), + sk.default_address().1, + *asset_type, + *amt as u64, + MemoBytes::empty(), + ) + .map_err(|e| TransferErr::Build { + error: builder::Error::SaplingBuild(e), + data: None, + })?; + } + } + } + + // Final safety check on the value balance to verify that the + // transaction is balanced + let value_balance = builder.value_balance(); + if !value_balance.is_zero() { + return Result::Err(TransferErr::Build { + error: builder::Error::InsufficientFunds(value_balance), + data: None, + }); + } + + Ok(()) + } + + /// Get the asset type with the given epoch, token, and denomination. If it + /// does not exist in the protocol, then remove the timestamp. Make sure to + /// store the derived AssetType so that future decoding is possible. + #[allow(async_fn_in_trait)] + async fn get_asset_type( + &mut self, + client: &C, + decoded: &mut AssetData, + ) -> Result { + let mut asset_type = decoded + .encode() + .map_err(|_| eyre!("unable to create asset type"))?; + if self.decode_asset_type(client, asset_type).await.is_none() { + // If we fail to decode the epoched asset type, then remove the + // epoch + decoded.undate(); + asset_type = decoded + .encode() + .map_err(|_| eyre!("unable to create asset type"))?; + self.asset_types.insert(asset_type, decoded.clone()); + } + Ok(asset_type) + } + + /// Convert Namada amount and token type to MASP equivalents + #[allow(async_fn_in_trait)] + async fn convert_namada_amount_to_masp( + &mut self, + client: &C, + epoch: MaspEpoch, + token: &Address, + denom: Denomination, + val: Amount, + ) -> Result<([AssetType; 4], U64Sum), eyre::Error> { + let mut amount = U64Sum::zero(); + let mut asset_types = Vec::new(); + for position in MaspDigitPos::iter() { + let mut pre_asset_type = AssetData { + epoch: Some(epoch), + token: token.clone(), + denom, + position, + }; + let asset_type = + self.get_asset_type(client, &mut pre_asset_type).await?; + // Combine the value and unit into one amount + amount += + U64Sum::from_nonnegative(asset_type, position.denominate(&val)) + .map_err(|_| eyre!("invalid value for amount"))?; + asset_types.push(asset_type); + } + Ok(( + asset_types + .try_into() + .expect("there must be exactly 4 denominations"), + amount, + )) + } + + /// Convert MASP amount to Namada equivalent + #[allow(async_fn_in_trait)] + async fn convert_masp_amount_to_namada( + &mut self, + client: &C, + denom: Denomination, + amt: I128Sum, + ) -> Result { + let mut amount = Amount::zero(); + let value_sum = self.decode_sum(client, amt).await; + + for ((_, decoded), val) in value_sum.components() { + let positioned_amt = + Amount::from_masp_denominated_i128(*val, decoded.position) + .unwrap_or_default(); + amount = checked!(amount + positioned_amt)?; + } + + Ok(DenominatedAmount::new(amount, denom)) + } +} + +impl> + ShieldedApi for T +{ +} diff --git a/crates/sdk/src/masp/test_utils.rs b/crates/shielded_token/src/masp/test_utils.rs similarity index 97% rename from crates/sdk/src/masp/test_utils.rs rename to crates/shielded_token/src/masp/test_utils.rs index ede69e923d..a98f68ca46 100644 --- a/crates/sdk/src/masp/test_utils.rs +++ b/crates/shielded_token/src/masp/test_utils.rs @@ -10,12 +10,12 @@ use namada_core::chain::BlockHeight; use namada_core::collections::HashMap; use namada_core::masp::ExtendedViewingKey; use namada_tx::IndexedTx; +use namada_wallet::DatedKeypair; +use thiserror::Error; -use crate::error::Error; use crate::masp::utils::{ IndexedNoteEntry, MaspClient, MaspClientCapabilities, }; -use crate::wallet::DatedKeypair; /// A viewing key derived from A_SPENDING_KEY pub const AA_VIEWING_KEY: &str = "zvknam1qqqqqqqqqqqqqq9v0sls5r5de7njx8ehu49pqgmqr9ygelg87l5x8y4s9r0pjlvu6x74w9gjpw856zcu826qesdre628y6tjc26uhgj6d9zqur9l5u3p99d9ggc74ald6s8y3sdtka74qmheyqvdrasqpwyv2fsmxlz57lj4grm2pthzj3sflxc0jx0edrakx3vdcngrfjmru8ywkguru8mxss2uuqxdlglaz6undx5h8w7g70t2es850g48xzdkqay5qs0yw06rtxcpjdve6"; @@ -350,8 +350,19 @@ impl TestingMaspClient { } } +#[derive(Error, Debug)] +pub enum TestError { + /// Key Retrieval Errors + #[error("After retrying, could not fetch all MASP txs.")] + FetchFailure, +} + impl MaspClient for TestingMaspClient { - async fn last_block_height(&self) -> Result, Error> { + type Error = TestError; + + async fn last_block_height( + &self, + ) -> Result, Self::Error> { Ok(Some(self.last_height)) } @@ -359,16 +370,14 @@ impl MaspClient for TestingMaspClient { &self, from: BlockHeight, to: BlockHeight, - ) -> Result, Error> { + ) -> Result, Self::Error> { let mut txs = vec![]; for _height in from.0..=to.0 { if let Some(tx) = self.tx_recv.recv_async().await.unwrap() { txs.push(tx); } else { - return Err(Error::Other( - "After retrying, could not fetch all MASP txs.".into(), - )); + return Err(TestError::FetchFailure); } } @@ -383,7 +392,7 @@ impl MaspClient for TestingMaspClient { async fn fetch_commitment_tree( &self, _: BlockHeight, - ) -> Result, Error> { + ) -> Result, Self::Error> { unimplemented!( "Commitment tree fetching is not implemented by this client" ) @@ -392,7 +401,7 @@ impl MaspClient for TestingMaspClient { async fn fetch_note_index( &self, _: BlockHeight, - ) -> Result, Error> { + ) -> Result, Self::Error> { unimplemented!( "Transaction notes map fetching is not implemented by this client" ) @@ -401,7 +410,7 @@ impl MaspClient for TestingMaspClient { async fn fetch_witness_map( &self, _: BlockHeight, - ) -> Result>, Error> { + ) -> Result>, Self::Error> { unimplemented!("Witness map fetching is not implemented by this client") } } diff --git a/crates/tests/Cargo.toml b/crates/tests/Cargo.toml index 6f0134b207..92d86abe47 100644 --- a/crates/tests/Cargo.toml +++ b/crates/tests/Cargo.toml @@ -13,7 +13,7 @@ repository.workspace = true version.workspace = true [features] -default = [] +default = ["namada_sdk/std", "namada_sdk/masp"] mainnet = [ "namada_sdk/mainnet", ] @@ -28,7 +28,7 @@ namada-eth-bridge = [ [dependencies] namada_core = {path = "../core", features = ["testing"]} -namada_sdk = {path = "../sdk", features = ["download-params", "testing", "wasm-runtime"]} +namada_sdk = {path = "../sdk", default-features=false, features = ["download-params", "testing", "wasm-runtime"]} namada_test_utils = {path = "../test_utils"} namada_tx_env = {path = "../tx_env"} namada_tx_prelude = {path = "../tx_prelude"} diff --git a/crates/tests/src/e2e/ibc_tests.rs b/crates/tests/src/e2e/ibc_tests.rs index 6c14698c13..a99506e160 100644 --- a/crates/tests/src/e2e/ibc_tests.rs +++ b/crates/tests/src/e2e/ibc_tests.rs @@ -19,6 +19,7 @@ use namada_apps_lib::client::rpc::query_storage_value_bytes; use namada_apps_lib::config::ethereum_bridge; use namada_apps_lib::config::genesis::templates; use namada_apps_lib::tendermint_rpc::{Client, HttpClient, Url}; +use namada_core::masp::PaymentAddress; use namada_sdk::address::MASP; use namada_sdk::chain::Epoch; use namada_sdk::governance::cli::onchain::PgfFunding; @@ -32,7 +33,6 @@ use namada_sdk::ibc::core::host::types::identifiers::{ use namada_sdk::ibc::primitives::proto::Any; use namada_sdk::ibc::storage::*; use namada_sdk::ibc::trace::ibc_token; -use namada_sdk::masp::PaymentAddress; use namada_sdk::token::Amount; use namada_test_utils::TestWasms; use prost::Message; diff --git a/crates/token/Cargo.toml b/crates/token/Cargo.toml index 9e6ffe08d6..827da0c9e7 100644 --- a/crates/token/Cargo.toml +++ b/crates/token/Cargo.toml @@ -14,21 +14,28 @@ version.workspace = true [features] default = [] +std = ["namada_shielded_token/std"] +mainnet = ["namada_shielded_token/mainnet"] +masp = ["namada_shielded_token/masp"] +migrations = ["namada_migrations", "namada_shielded_token/migrations", "linkme"] multicore = ["namada_shielded_token/multicore"] download-params = ["namada_shielded_token/download-params"] -testing = ["namada_core/testing", "namada_shielded_token/testing", "proptest"] +testing = ["namada_core/testing", "namada_shielded_token/testing", "proptest", "masp_primitives"] arbitrary = ["dep:arbitrary", "namada_core/arbitrary"] [dependencies] +masp_primitives = {workspace = true, optional = true } namada_core = { path = "../core" } namada_events = { path = "../events", default-features = false } namada_macros = { path = "../macros" } +namada_migrations = { path = "../migrations", optional = true } namada_shielded_token = { path = "../shielded_token" } namada_systems = { path = "../systems" } namada_trans_token = { path = "../trans_token" } arbitrary = { workspace = true, optional = true } borsh.workspace = true +linkme = {workspace = true, optional = true} proptest = { workspace = true, optional = true } serde.workspace = true @@ -36,4 +43,7 @@ serde.workspace = true namada_core = { path = "../core", features = ["testing"] } namada_shielded_token = { path = "../shielded_token", features = ["testing"] } +masp_primitives.workspace = true + proptest.workspace = true + diff --git a/crates/token/src/lib.rs b/crates/token/src/lib.rs index e876b2abcd..69460ff73e 100644 --- a/crates/token/src/lib.rs +++ b/crates/token/src/lib.rs @@ -24,6 +24,8 @@ use namada_core::address::Address; use namada_core::borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; use namada_events::EmitEvents; use namada_macros::BorshDeserializer; +#[cfg(feature = "migrations")] +use namada_migrations::*; pub use namada_shielded_token::*; use namada_systems::parameters; pub use namada_trans_token::*; @@ -264,20 +266,48 @@ impl Transfer { } } -#[cfg(any(test, feature = "testing"))] +#[cfg(all(any(test, feature = "testing"), feature = "masp"))] /// Testing helpers and strategies for tokens pub mod testing { + use std::collections::BTreeMap; + use std::sync::Mutex; + + use masp_primitives::consensus::testing::arb_height; + #[cfg(feature = "mainnet")] + use masp_primitives::consensus::MainNetwork as Network; + #[cfg(not(feature = "mainnet"))] + use masp_primitives::consensus::TestNetwork as Network; + use masp_primitives::merkle_tree::FrozenCommitmentTree; + use masp_primitives::transaction::builder::Builder; + use masp_primitives::transaction::components::sapling::builder::{ + RngBuildParams, StoredBuildParams, + }; + use masp_primitives::transaction::components::{TxOut, U64Sum}; + use masp_primitives::transaction::fees::fixed::FeeRule; use namada_core::address::testing::{ arb_established_address, arb_non_internal_address, }; - use namada_core::address::Address; + use namada_core::address::{Address, MASP}; + use namada_core::collections::HashMap; + use namada_core::masp::{encode_asset_type, AssetData, TAddrData}; pub use namada_core::token::*; + use namada_shielded_token::masp::testing::{ + arb_masp_epoch, arb_output_descriptions, arb_pre_asset_type, arb_rng, + arb_spend_descriptions, MockTxProver, TestCsprng, + }; + use namada_shielded_token::masp::{ShieldedTransfer, WalletMap, NETWORK}; pub use namada_trans_token::testing::*; + use proptest::collection; use proptest::prelude::*; use proptest::sample::SizeRange; use super::Transfer; + // Maximum value for a note partition + const MAX_MONEY: u64 = 100; + // Maximum number of partitions for a note + const MAX_SPLITS: usize = 3; + prop_compose! { /// Generate a transparent transfer fn arb_single_transparent_transfer()( @@ -299,18 +329,145 @@ pub mod testing { pub fn arb_transparent_transfer( number_of_txs: impl Into, ) -> impl Strategy { - proptest::collection::vec( - arb_single_transparent_transfer(), - number_of_txs, - ) - .prop_filter_map("Transfers must not overflow", |data| { - data.into_iter().try_fold( - Transfer::default(), - |acc, (source, target, token, amount)| { - acc.transfer(source, target, token, amount) - }, - ) - }) + collection::vec(arb_single_transparent_transfer(), number_of_txs) + .prop_filter_map("Transfers must not overflow", |data| { + data.into_iter().try_fold( + Transfer::default(), + |acc, (source, target, token, amount)| { + acc.transfer(source, target, token, amount) + }, + ) + }) + } + + prop_compose! { + /// Generate an arbitrary shielded MASP transaction builder + #[allow(clippy::arithmetic_side_effects)] + pub fn arb_shielded_builder(asset_range: impl Into)( + assets in collection::hash_map( + arb_pre_asset_type(), + collection::vec(..MAX_MONEY, ..MAX_SPLITS), + asset_range, + ), + )( + expiration_height in arb_height(masp_primitives::consensus::BranchId::MASP, &Network), + spend_descriptions in assets + .iter() + .map(|(asset, values)| arb_spend_descriptions(asset.clone(), values.clone())) + .collect::>(), + output_descriptions in assets + .iter() + .map(|(asset, values)| arb_output_descriptions(asset.clone(), values.clone())) + .collect::>(), + input_data in collection::vec((any::(), arb_non_internal_address()), assets.len() * MAX_SPLITS), + output_data in collection::vec((any::(), arb_non_internal_address()), assets.len() * MAX_SPLITS), + assets in Just(assets), + ) -> ( + Transfer, + Builder::, + HashMap, + ) { + // Enable assets to be more easily decoded + let mut asset_decoder = BTreeMap::new(); + for asset_data in assets.keys() { + let asset_type = encode_asset_type( + asset_data.token.clone(), + asset_data.denom, + asset_data.position, + asset_data.epoch, + ).unwrap(); + asset_decoder.insert(asset_type, asset_data); + } + let mut transfer = Transfer::default(); + let mut builder = Builder::::new( + NETWORK, + // NOTE: this is going to add 20 more blocks to the actual + // expiration but there's no other exposed function that we could + // use from the masp crate to specify the expiration better + expiration_height.unwrap(), + ); + let mut leaves = Vec::new(); + // First construct a Merkle tree containing all notes to be used + for (_esk, _div, _note, node) in spend_descriptions.iter().flatten() { + leaves.push(*node); + } + let tree = FrozenCommitmentTree::new(&leaves); + // Then use the notes knowing that they all have the same anchor + for ((is_shielded, address), (idx, (esk, div, note, _node))) in + input_data.into_iter().zip(spend_descriptions.iter().flatten().enumerate()) + { + // Compute the equivalent transparent movement + let asset_data = asset_decoder[¬e.asset_type]; + let amount = DenominatedAmount::new( + Amount::from_masp_denominated(note.value, asset_data.position), + asset_data.denom, + ); + // Use either a transparent input or a shielded input + if is_shielded { + builder.add_sapling_spend(*esk, *div, *note, tree.path(idx)).unwrap(); + transfer = transfer.debit(MASP, asset_data.token.clone(), amount).unwrap(); + } else { + let txout = TxOut { + address: TAddrData::Addr(address.clone()).taddress(), + asset_type: note.asset_type, + value: note.value, + }; + builder.add_transparent_input(txout).unwrap(); + transfer = transfer.debit(address, asset_data.token.clone(), amount).unwrap(); + } + } + for ((is_shielded, address), (ovk, payment_addr, asset_type, value, memo)) in + output_data.into_iter().zip(output_descriptions.into_iter().flatten()) + { + // Compute the equivalent transparent movement + let asset_data = asset_decoder[&asset_type]; + let amount = DenominatedAmount::new( + Amount::from_masp_denominated(value, asset_data.position), + asset_data.denom, + ); + // Use either a transparent output or a shielded output + if is_shielded { + builder.add_sapling_output(ovk, payment_addr, asset_type, value, memo).unwrap(); + transfer = transfer.credit(MASP, asset_data.token.clone(), amount).unwrap(); + } else { + builder.add_transparent_output( + &TAddrData::Addr(address.clone()).taddress(), + asset_type, + value, + ).unwrap(); + transfer = transfer.credit(address, asset_data.token.clone(), amount).unwrap(); + } + } + (transfer, builder, assets.into_iter().map(|(k, v)| (k, v.iter().sum())).collect()) + } + } + + prop_compose! { + /// Generate an arbitrary MASP shielded transfer + pub fn arb_shielded_transfer( + asset_range: impl Into, + )(asset_range in Just(asset_range.into()))( + (mut transfer, builder, asset_types) in arb_shielded_builder(asset_range), + epoch in arb_masp_epoch(), + prover_rng in arb_rng().prop_map(TestCsprng), + mut rng in arb_rng().prop_map(TestCsprng), + bparams_rng in arb_rng().prop_map(TestCsprng), + ) -> (Transfer, ShieldedTransfer, HashMap, StoredBuildParams) { + let mut rng_build_params = RngBuildParams::new(bparams_rng); + let (masp_tx, metadata) = builder.clone().build( + &MockTxProver(Mutex::new(prover_rng)), + &FeeRule::non_standard(U64Sum::zero()), + &mut rng, + &mut rng_build_params, + ).unwrap(); + transfer.shielded_section_hash = Some(masp_tx.txid().into()); + (transfer, ShieldedTransfer { + builder: builder.map_builder(WalletMap), + metadata, + masp_tx, + epoch, + }, asset_types, rng_build_params.to_stored().unwrap()) + } } } diff --git a/crates/wallet/Cargo.toml b/crates/wallet/Cargo.toml new file mode 100644 index 0000000000..150a01f881 --- /dev/null +++ b/crates/wallet/Cargo.toml @@ -0,0 +1,48 @@ +[package] +name = "namada_wallet" +description = "Namada wallet" +resolver = "2" +authors.workspace = true +edition.workspace = true +documentation.workspace = true +homepage.workspace = true +keywords.workspace = true +license.workspace = true +readme.workspace = true +repository.workspace = true +version.workspace = true + +[features] +default = [] +std = ["fd-lock", "download-params"] +download-params = [] +migrations = ["namada_migrations"] + +[dependencies] +namada_core = { path = "../core", features = ["rand"]} +namada_ibc = {path = "../ibc" } +namada_macros = { path = "../macros" } +namada_migrations = {path = "../migrations", optional = true } + +bimap.workspace = true +borsh.workspace = true +borsh-ext.workspace = true +itertools.workspace = true +derivation-path.workspace = true +data-encoding.workspace = true +fd-lock = {workspace = true, optional = true} +masp_primitives.workspace = true +orion.workspace = true +rand.workspace = true +rand_core.workspace = true +serde.workspace = true +slip10_ed25519.workspace = true +smooth-operator.workspace = true +thiserror.workspace = true +tiny-bip39.workspace = true +tiny-hderive.workspace = true +toml.workspace = true +zeroize.workspace = true + +[dev-dependencies] +base58.workspace = true \ No newline at end of file diff --git a/crates/sdk/src/wallet/alias.rs b/crates/wallet/src/alias.rs similarity index 100% rename from crates/sdk/src/wallet/alias.rs rename to crates/wallet/src/alias.rs diff --git a/crates/sdk/src/wallet/derivation_path.rs b/crates/wallet/src/derivation_path.rs similarity index 100% rename from crates/sdk/src/wallet/derivation_path.rs rename to crates/wallet/src/derivation_path.rs diff --git a/crates/sdk/src/wallet/keys.rs b/crates/wallet/src/keys.rs similarity index 99% rename from crates/sdk/src/wallet/keys.rs rename to crates/wallet/src/keys.rs index 85ed6e4c96..c3bfd4140c 100644 --- a/crates/sdk/src/wallet/keys.rs +++ b/crates/wallet/src/keys.rs @@ -14,7 +14,7 @@ use serde::{Deserialize, Serialize}; use thiserror::Error; use zeroize::Zeroizing; -use crate::wallet::WalletIo; +use crate::WalletIo; const ENCRYPTED_KEY_PREFIX: &str = "encrypted:"; const UNENCRYPTED_KEY_PREFIX: &str = "unencrypted:"; diff --git a/crates/sdk/src/wallet/mod.rs b/crates/wallet/src/lib.rs similarity index 99% rename from crates/sdk/src/wallet/mod.rs rename to crates/wallet/src/lib.rs index fd91d6e1e6..e6ae5b87ff 100644 --- a/crates/sdk/src/wallet/mod.rs +++ b/crates/wallet/src/lib.rs @@ -37,7 +37,7 @@ pub use self::keys::{ StoredKeypair, }; pub use self::store::{ConfirmationResponse, ValidatorData, ValidatorKeys}; -use crate::wallet::store::{derive_hd_secret_key, derive_hd_spending_key}; +use crate::store::{derive_hd_secret_key, derive_hd_spending_key}; const DISPOSABLE_KEY_LIFETIME_IN_SECONDS: i64 = 7 * 24 * 60 * 60; // 1 week diff --git a/crates/sdk/src/wallet/pre_genesis.rs b/crates/wallet/src/pre_genesis.rs similarity index 82% rename from crates/sdk/src/wallet/pre_genesis.rs rename to crates/wallet/src/pre_genesis.rs index dbe1a11571..84f2de2364 100644 --- a/crates/sdk/src/wallet/pre_genesis.rs +++ b/crates/wallet/src/pre_genesis.rs @@ -5,8 +5,7 @@ use serde::{Deserialize, Serialize}; use thiserror::Error; use zeroize::Zeroizing; -use crate::wallet; -use crate::wallet::StoredKeypair; +use crate::StoredKeypair; /// Ways in which wallet store operations can fail #[derive(Error, Debug)] @@ -22,7 +21,7 @@ pub enum ReadError { StoreNewWallet(String), /// Failed to decode a key #[error("Failed to decode a key: {0}")] - Decryption(wallet::keys::DecryptionError), + Decryption(crate::keys::DecryptionError), } /// Validator pre-genesis wallet includes all the required keys for genesis @@ -45,13 +44,13 @@ pub struct ValidatorWallet { #[derive(Serialize, Deserialize, Debug)] pub struct ValidatorStore { /// Cryptographic keypair for consensus key - pub consensus_key: wallet::StoredKeypair, + pub consensus_key: StoredKeypair, /// Cryptographic keypair for eth cold key - pub eth_cold_key: wallet::StoredKeypair, + pub eth_cold_key: StoredKeypair, /// Cryptographic keypair for Tendermint node key - pub tendermint_node_key: wallet::StoredKeypair, + pub tendermint_node_key: StoredKeypair, /// Special validator keys. Contains the ETH hot key. - pub validator_keys: wallet::ValidatorKeys, + pub validator_keys: crate::ValidatorKeys, } impl ValidatorStore { @@ -74,12 +73,12 @@ pub fn gen_key_to_store( password: Option>, rng: &mut (impl CryptoRng + Rng), ) -> (StoredKeypair, common::SecretKey) { - let sk = wallet::gen_secret_key(scheme, rng); + let sk = crate::gen_secret_key(scheme, rng); StoredKeypair::new(sk, password) } -impl From for ReadError { - fn from(err: wallet::keys::DecryptionError) -> Self { +impl From for ReadError { + fn from(err: crate::keys::DecryptionError) -> Self { ReadError::Decryption(err) } } diff --git a/crates/sdk/src/wallet/store.rs b/crates/wallet/src/store.rs similarity index 99% rename from crates/sdk/src/wallet/store.rs rename to crates/wallet/src/store.rs index 4ddd27bbbd..287bd7822d 100644 --- a/crates/sdk/src/wallet/store.rs +++ b/crates/wallet/src/store.rs @@ -22,8 +22,8 @@ use zeroize::Zeroizing; use super::alias::{self, Alias}; use super::derivation_path::DerivationPath; use super::pre_genesis; -use crate::wallet::keys::{DatedKeypair, DatedSpendingKey, DatedViewingKey}; -use crate::wallet::{StoredKeypair, WalletIo}; +use crate::keys::{DatedKeypair, DatedSpendingKey, DatedViewingKey}; +use crate::{StoredKeypair, WalletIo}; /// Actions that can be taken when there is an alias conflict pub enum ConfirmationResponse { diff --git a/wasm/Cargo.lock b/wasm/Cargo.lock index ff9d872023..758152bf8e 100644 --- a/wasm/Cargo.lock +++ b/wasm/Cargo.lock @@ -3583,6 +3583,7 @@ dependencies = [ "prost-types", "rand 0.8.5", "rand_core 0.6.4", + "rayon", "ripemd", "serde", "serde_json", @@ -3708,6 +3709,18 @@ dependencies = [ "tracing", ] +[[package]] +name = "namada_io" +version = "0.43.0" +dependencies = [ + "async-trait", + "kdam", + "namada_core", + "tendermint-rpc", + "thiserror", + "tokio", +] + [[package]] name = "namada_macros" version = "0.43.0" @@ -3789,19 +3802,16 @@ dependencies = [ "circular-queue", "clap", "data-encoding", - "derivation-path", "duration-str", "either", "ethbridge-bridge-contract", "ethers", "eyre", "fd-lock", - "flume", "futures", "init-once", "itertools 0.12.1", "jubjub 0.10.0 (git+https://github.com/heliaxdev/jubjub.git?rev=a373686962f4e9d0edb3b4716f86ff6bbd9aa86c)", - "kdam", "lazy_static", "masp_primitives", "masp_proofs", @@ -3812,6 +3822,7 @@ dependencies = [ "namada_gas", "namada_governance", "namada_ibc", + "namada_io", "namada_macros", "namada_parameters", "namada_proof_of_stake", @@ -3822,9 +3833,9 @@ dependencies = [ "namada_vm", "namada_vote_ext", "namada_vp", + "namada_wallet", "num-traits 0.2.17", "num256", - "orion", "owo-colors", "paste", "patricia_tree", @@ -3838,17 +3849,14 @@ dependencies = [ "serde", "serde_json", "sha2 0.9.9", - "slip10_ed25519", "smooth-operator", "tempfile", "tendermint-rpc", "thiserror", "tiny-bip39", - "tiny-hderive", "tokio", "toml 0.5.11", "tracing", - "typed-builder", "xorf", "zeroize", ] @@ -3857,26 +3865,41 @@ dependencies = [ name = "namada_shielded_token" version = "0.43.0" dependencies = [ + "async-trait", "borsh", + "eyre", + "flume", + "futures", + "itertools 0.12.1", "lazy_static", "masp_primitives", "masp_proofs", "namada_account", "namada_controller", "namada_core", + "namada_events", "namada_gas", + "namada_io", + "namada_macros", "namada_state", "namada_systems", "namada_tx", "namada_vp", + "namada_wallet", + "proptest", + "rand 0.8.5", "rand_core 0.6.4", "rayon", "ripemd", "serde", + "serde_json", "sha2 0.9.9", "smooth-operator", + "tempfile", "thiserror", "tracing", + "typed-builder", + "xorf", ] [[package]] @@ -3973,6 +3996,7 @@ name = "namada_token" version = "0.43.0" dependencies = [ "borsh", + "masp_primitives", "namada_core", "namada_events", "namada_macros", @@ -4153,6 +4177,33 @@ dependencies = [ "sha2 0.9.9", ] +[[package]] +name = "namada_wallet" +version = "0.43.0" +dependencies = [ + "bimap", + "borsh", + "borsh-ext", + "data-encoding", + "derivation-path", + "itertools 0.12.1", + "masp_primitives", + "namada_core", + "namada_ibc", + "namada_macros", + "orion", + "rand 0.8.5", + "rand_core 0.6.4", + "serde", + "slip10_ed25519", + "smooth-operator", + "thiserror", + "tiny-bip39", + "tiny-hderive", + "toml 0.5.11", + "zeroize", +] + [[package]] name = "nanorand" version = "0.7.0" diff --git a/wasm_for_tests/Cargo.lock b/wasm_for_tests/Cargo.lock index bc9fb2d037..65f9a77b98 100644 --- a/wasm_for_tests/Cargo.lock +++ b/wasm_for_tests/Cargo.lock @@ -150,6 +150,17 @@ version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" +[[package]] +name = "async-trait" +version = "0.1.82" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a27b8a3a6e1a44fa4c8baf1f653e4172e81486d4941f2237e20dc2d0cf4ddff1" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.65", +] + [[package]] name = "autocfg" version = "1.1.0" @@ -492,6 +503,31 @@ dependencies = [ "libc", ] +[[package]] +name = "crossbeam-deque" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d" +dependencies = [ + "crossbeam-epoch", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.9.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" + [[package]] name = "crunchy" version = "0.2.2" @@ -860,6 +896,7 @@ checksum = "da0290714b38af9b4a7b094b8a37086d1b4e61f2df9122c3cad2577669145335" dependencies = [ "futures-channel", "futures-core", + "futures-executor", "futures-io", "futures-sink", "futures-task", @@ -882,12 +919,34 @@ version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eb1d22c66e66d9d72e1758f0bd7d4fd0bee04cad842ee34587d68c07e45d088c" +[[package]] +name = "futures-executor" +version = "0.3.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f4fb8693db0cf099eadcca0efe2a5a22e4550f98ed16aba6c48700da29597bc" +dependencies = [ + "futures-core", + "futures-task", + "futures-util", +] + [[package]] name = "futures-io" version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8bf34a163b5c4c52d0478a4d757da8fb65cabef42ba90515efee0f6f9fa45aaa" +[[package]] +name = "futures-macro" +version = "0.3.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53b153fd91e4b0147f4aced87be237c98248656bb01050b96bf3ee89220a8ddb" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.65", +] + [[package]] name = "futures-sink" version = "0.3.29" @@ -906,11 +965,16 @@ version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a19526d624e703a3179b3d322efec918b6246ea0fa51d41124525f00f1cc8104" dependencies = [ + "futures-channel", "futures-core", + "futures-io", + "futures-macro", "futures-sink", "futures-task", + "memchr", "pin-project-lite", "pin-utils", + "slab", ] [[package]] @@ -1918,6 +1982,7 @@ dependencies = [ "num_enum", "primitive-types", "prost-types", + "rayon", "ripemd", "serde", "serde_json", @@ -2082,25 +2147,36 @@ dependencies = [ name = "namada_shielded_token" version = "0.43.0" dependencies = [ + "async-trait", "borsh", + "eyre", + "futures", + "itertools 0.12.1", "lazy_static", "masp_primitives", "masp_proofs", "namada_account", "namada_controller", "namada_core", + "namada_events", "namada_gas", + "namada_macros", "namada_state", "namada_systems", "namada_tx", "namada_vp", + "rand", "rand_core", "ripemd", "serde", + "serde_json", "sha2 0.9.9", "smooth-operator", + "tempfile", "thiserror", "tracing", + "typed-builder", + "xorf", ] [[package]] @@ -2785,6 +2861,26 @@ dependencies = [ "getrandom", ] +[[package]] +name = "rayon" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa" +dependencies = [ + "either", + "rayon-core", +] + +[[package]] +name = "rayon-core" +version = "1.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" +dependencies = [ + "crossbeam-deque", + "crossbeam-utils", +] + [[package]] name = "reddsa" version = "0.5.1" @@ -3152,6 +3248,15 @@ dependencies = [ "rand_core", ] +[[package]] +name = "slab" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" +dependencies = [ + "autocfg", +] + [[package]] name = "smooth-operator" version = "0.7.0" @@ -3612,6 +3717,26 @@ dependencies = [ "rlsf", ] +[[package]] +name = "typed-builder" +version = "0.19.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a06fbd5b8de54c5f7c91f6fe4cebb949be2125d7758e630bb58b1d831dbce600" +dependencies = [ + "typed-builder-macro", +] + +[[package]] +name = "typed-builder-macro" +version = "0.19.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9534daa9fd3ed0bd911d462a37f172228077e7abf18c18a5f67199d959205f8" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.65", +] + [[package]] name = "typenum" version = "1.17.0" @@ -4006,6 +4131,17 @@ dependencies = [ "tap", ] +[[package]] +name = "xorf" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf24c008fe464f5d8f58b8d16a1ab7e930bd73b2a6933ff8704c414b2bed7f92" +dependencies = [ + "libm", + "rand", + "serde", +] + [[package]] name = "zcash_encoding" version = "0.2.0"