diff --git a/.changelog/unreleased/improvements/3835-refactor-tx-crate.md b/.changelog/unreleased/improvements/3835-refactor-tx-crate.md new file mode 100644 index 0000000000..add1ab8cdb --- /dev/null +++ b/.changelog/unreleased/improvements/3835-refactor-tx-crate.md @@ -0,0 +1,2 @@ +- Refactored tx crate modules and slightly improved its API. + ([\#3835](https://github.com/anoma/namada/pull/3835)) \ No newline at end of file diff --git a/crates/apps_lib/src/client/tx.rs b/crates/apps_lib/src/client/tx.rs index 1ffae47901..2384f21b2b 100644 --- a/crates/apps_lib/src/client/tx.rs +++ b/crates/apps_lib/src/client/tx.rs @@ -1069,7 +1069,8 @@ pub async fn sign_tx( where ::Error: std::fmt::Display, { - let tx = if let Ok(transaction) = Tx::deserialize(tx_data.as_ref()) { + let tx = if let Ok(transaction) = Tx::try_from_json_bytes(tx_data.as_ref()) + { transaction } else { edisplay_line!(namada.io(), "Couldn't decode the transaction."); @@ -1117,8 +1118,9 @@ where }; let signature_path = File::create(&output_path) .expect("Should be able to create signature file."); - serde_json::to_writer_pretty(signature_path, &signature) - .expect("Signature should be serializable."); + signature.to_writer_json(signature_path).expect( + "Signature should be serializable and the file writeable.", + ); display_line!( namada.io(), diff --git a/crates/apps_lib/src/client/utils.rs b/crates/apps_lib/src/client/utils.rs index 8e8c7c6fc5..1bb53164b6 100644 --- a/crates/apps_lib/src/client/utils.rs +++ b/crates/apps_lib/src/client/utils.rs @@ -1050,7 +1050,8 @@ pub async fn sign_offline( safe_exit(1) }; - let tx = if let Ok(transaction) = Tx::deserialize(tx_data.as_ref()) { + let tx = if let Ok(transaction) = Tx::try_from_json_bytes(tx_data.as_ref()) + { transaction } else { eprintln!("Couldn't decode the transaction."); diff --git a/crates/benches/native_vps.rs b/crates/benches/native_vps.rs index f2350ca722..e40512dde2 100644 --- a/crates/benches/native_vps.rs +++ b/crates/benches/native_vps.rs @@ -1684,7 +1684,7 @@ fn ibc_vp_validate_action(c: &mut Criterion) { let verifiers_from_tx = shielded_ctx.shell.write().execute_tx(&signed_tx.to_ref()); - let tx_data = signed_tx.tx.data(&signed_tx.cmt).unwrap(); + let tx_data = signed_tx.to_ref().data().unwrap(); let shell_read = shielded_ctx.shell.read(); let (verifiers, keys_changed) = shell_read .state @@ -1745,7 +1745,7 @@ fn ibc_vp_execute_action(c: &mut Criterion) { let verifiers_from_tx = shielded_ctx.shell.write().execute_tx(&signed_tx.to_ref()); let shell_read = shielded_ctx.shell.read(); - let tx_data = signed_tx.tx.data(&signed_tx.cmt).unwrap(); + let tx_data = signed_tx.to_ref().data().unwrap(); let (verifiers, keys_changed) = shell_read .state .write_log() diff --git a/crates/ethereum_bridge/src/vp/bridge_pool_vp.rs b/crates/ethereum_bridge/src/vp/bridge_pool_vp.rs index 6935c63cb7..0e8c08c061 100644 --- a/crates/ethereum_bridge/src/vp/bridge_pool_vp.rs +++ b/crates/ethereum_bridge/src/vp/bridge_pool_vp.rs @@ -87,7 +87,7 @@ where "Rejecting transaction, since the Ethereum bridge is disabled.", )); } - let Some(tx_data) = batched_tx.tx.data(batched_tx.cmt) else { + let Some(tx_data) = batched_tx.data() else { return Err(Error::new_const("No transaction data found")); }; let transfer: PendingTransfer = diff --git a/crates/governance/src/vp/mod.rs b/crates/governance/src/vp/mod.rs index 45e13a12e1..28f3c13867 100644 --- a/crates/governance/src/vp/mod.rs +++ b/crates/governance/src/vp/mod.rs @@ -60,7 +60,7 @@ where /// Run the validity predicate pub fn validate_tx( ctx: &'ctx CTX, - tx_data: &BatchedTxRef<'_>, + tx: &BatchedTxRef<'_>, keys_changed: &BTreeSet, verifiers: &BTreeSet
, ) -> Result<()> { @@ -74,7 +74,7 @@ where // Is VP triggered by a governance proposal? if is_proposal_accepted( &ctx.pre(), - tx_data.tx.data(tx_data.cmt).unwrap_or_default().as_ref(), + tx.data().unwrap_or_default().as_ref(), )? { return Ok(()); } @@ -168,9 +168,7 @@ where (KeyType::PROPOSAL_COMMIT, _) => { Self::is_valid_proposal_commit(ctx) } - (KeyType::PARAMETER, _) => { - Self::is_valid_parameter(ctx, tx_data) - } + (KeyType::PARAMETER, _) => Self::is_valid_parameter(ctx, tx), (KeyType::BALANCE, _) => { Self::is_valid_balance(ctx, &native_token) } @@ -988,8 +986,7 @@ where ctx: &'ctx CTX, batched_tx: &BatchedTxRef<'_>, ) -> Result<()> { - let BatchedTxRef { tx, cmt } = batched_tx; - tx.data(cmt).map_or_else( + batched_tx.data().map_or_else( || { Err(Error::new_const( "Governance parameter changes require tx data to be \ diff --git a/crates/governance/src/vp/pgf.rs b/crates/governance/src/vp/pgf.rs index c11bedc428..097858a3af 100644 --- a/crates/governance/src/vp/pgf.rs +++ b/crates/governance/src/vp/pgf.rs @@ -55,11 +55,7 @@ where // Is VP triggered by a governance proposal? if is_proposal_accepted( &ctx.pre(), - batched_tx - .tx - .data(batched_tx.cmt) - .unwrap_or_default() - .as_ref(), + batched_tx.data().unwrap_or_default().as_ref(), )? { return Ok(()); } @@ -189,7 +185,7 @@ where ctx: &'ctx CTX, batched_tx: &BatchedTxRef<'_>, ) -> Result<()> { - batched_tx.tx.data(batched_tx.cmt).map_or_else( + batched_tx.data().map_or_else( || { Err(Error::new_const( "PGF parameter changes require tx data to be present", diff --git a/crates/ibc/src/vp/mod.rs b/crates/ibc/src/vp/mod.rs index 6df0eba624..6371c9d0d9 100644 --- a/crates/ibc/src/vp/mod.rs +++ b/crates/ibc/src/vp/mod.rs @@ -148,19 +148,12 @@ where // Is VP triggered by a governance proposal? if Gov::is_proposal_accepted( &self.ctx.pre(), - batched_tx - .tx - .data(batched_tx.cmt) - .unwrap_or_default() - .as_ref(), + batched_tx.data().unwrap_or_default().as_ref(), )? { return Ok(()); } - let tx_data = batched_tx - .tx - .data(batched_tx.cmt) - .ok_or(VpError::NoTxData)?; + let tx_data = batched_tx.data().ok_or(VpError::NoTxData)?; // Pseudo execution and compare them self.validate_state(&tx_data, keys_changed)?; diff --git a/crates/node/src/bench_utils.rs b/crates/node/src/bench_utils.rs index e8224f4a24..35be04fa70 100644 --- a/crates/node/src/bench_utils.rs +++ b/crates/node/src/bench_utils.rs @@ -1338,7 +1338,7 @@ impl BenchShieldedCtx { }; let vectorized_transfer = - Transfer::deserialize(&mut tx.tx.data(&tx.cmt).unwrap().as_slice()) + Transfer::deserialize(&mut tx.to_ref().data().unwrap().as_slice()) .unwrap(); let sources = vec![vectorized_transfer.sources.into_iter().next().unwrap()] diff --git a/crates/node/src/dry_run_tx.rs b/crates/node/src/dry_run_tx.rs index d5efc49921..1957045f0b 100644 --- a/crates/node/src/dry_run_tx.rs +++ b/crates/node/src/dry_run_tx.rs @@ -29,7 +29,7 @@ where H: 'static + StorageHasher + Sync, CA: 'static + WasmCacheAccess + Sync, { - let tx = Tx::try_from(&request.data[..]).into_storage_result()?; + let tx = Tx::try_from_bytes(&request.data[..]).into_storage_result()?; tx.validate_tx().into_storage_result()?; let gas_scale = parameters::get_gas_scale(&state)?; diff --git a/crates/node/src/protocol.rs b/crates/node/src/protocol.rs index 6466c75b83..1405bada74 100644 --- a/crates/node/src/protocol.rs +++ b/crates/node/src/protocol.rs @@ -28,7 +28,7 @@ use namada_sdk::tx::data::{ BatchedTxResult, ExtendedTxResult, TxResult, VpStatusFlags, VpsResult, WrapperTx, }; -use namada_sdk::tx::{BatchedTxRef, Tx, TxCommitments}; +use namada_sdk::tx::{BatchedTxRef, InnerTxRef, Tx, TxCommitments}; use namada_sdk::validation::{ EthBridgeNutVp, EthBridgePoolVp, EthBridgeVp, GovernanceVp, IbcVp, MaspVp, MultitokenVp, NativeVpCtx, ParametersVp, PgfVp, PosVp, @@ -272,8 +272,11 @@ where DispatchArgs::Protocol(protocol_tx) => { // No bundles of protocol transactions, only take the first one let cmt = tx.first_commitments().ok_or(Error::MissingInnerTxs)?; - let batched_tx_result = - apply_protocol_tx(protocol_tx.tx, tx.data(cmt), state)?; + let batched_tx_result = apply_protocol_tx( + protocol_tx.tx, + tx.data(InnerTxRef::Commitment(cmt)), + state, + )?; Ok({ let mut batch_results = TxResult::new(); diff --git a/crates/node/src/shell/finalize_block.rs b/crates/node/src/shell/finalize_block.rs index 48165a8548..b71c18c730 100644 --- a/crates/node/src/shell/finalize_block.rs +++ b/crates/node/src/shell/finalize_block.rs @@ -605,16 +605,17 @@ where let mut successful_wrappers = vec![]; for (tx_index, processed_tx) in processed_txs.iter().enumerate() { - let tx = if let Ok(tx) = Tx::try_from(processed_tx.tx.as_ref()) { - tx - } else { - tracing::error!( - "FinalizeBlock received a tx that could not be \ - deserialized to a Tx type. This is likely a protocol \ - transaction." - ); - continue; - }; + let tx = + if let Ok(tx) = Tx::try_from_bytes(processed_tx.tx.as_ref()) { + tx + } else { + tracing::error!( + "FinalizeBlock received a tx that could not be \ + deserialized to a Tx type. This is likely a protocol \ + transaction." + ); + continue; + }; let result_code = ResultCode::from_u32(processed_tx.result.code) .expect("Result code conversion should not fail"); diff --git a/crates/node/src/shell/mod.rs b/crates/node/src/shell/mod.rs index ea27241312..6115fcb789 100644 --- a/crates/node/src/shell/mod.rs +++ b/crates/node/src/shell/mod.rs @@ -1057,7 +1057,7 @@ where } // Tx format check - let tx = match Tx::try_from(tx_bytes).map_err(Error::TxDecoding) { + let tx = match Tx::try_from_bytes(tx_bytes).map_err(Error::TxDecoding) { Ok(t) => t, Err(msg) => { response.code = ResultCode::InvalidTx.into(); @@ -2108,7 +2108,7 @@ mod shell_tests { ) .await .unwrap(); - let tx = Tx::try_from(&serialized_tx[..]).unwrap(); + let tx = Tx::try_from_bytes(&serialized_tx[..]).unwrap(); match ethereum_tx_data_variants::ValSetUpdateVext::try_from(&tx) { Ok(signed_valset_upd) => break signed_valset_upd, @@ -2158,7 +2158,7 @@ mod shell_tests { // attempt to receive vote extension tx aggregating // all expired events let serialized_tx = broadcaster_rx.blocking_recv().unwrap(); - let tx = Tx::try_from(&serialized_tx[..]).unwrap(); + let tx = Tx::try_from_bytes(&serialized_tx[..]).unwrap(); // check data inside tx let vote_extension = diff --git a/crates/node/src/shell/prepare_proposal.rs b/crates/node/src/shell/prepare_proposal.rs index f5271c1115..f791ffb606 100644 --- a/crates/node/src/shell/prepare_proposal.rs +++ b/crates/node/src/shell/prepare_proposal.rs @@ -289,7 +289,7 @@ where H: StorageHasher + Sync + 'static, CA: 'static + WasmCacheAccess + Sync, { - let tx = Tx::try_from(tx_bytes).map_err(|_| ())?; + let tx = Tx::try_from_bytes(tx_bytes).map_err(|_| ())?; let wrapper = tx.header.wrapper().ok_or(())?; // If tx doesn't have an expiration it is valid. If time cannot be @@ -754,7 +754,7 @@ mod test_prepare_proposal { assert_eq!(rsp.txs.len(), 1); let tx_bytes = rsp.txs.remove(0); - let got = Tx::try_from(&tx_bytes[..]).unwrap(); + let got = Tx::try_from_bytes(&tx_bytes[..]).unwrap(); let eth_tx_data = (&got).try_into().expect("Test failed"); let rsp_ext = match eth_tx_data { EthereumTxData::EthEventsVext(ext) => ext, @@ -1371,7 +1371,7 @@ mod test_prepare_proposal { }; let proposed_txs = shell.prepare_proposal(req).txs.into_iter().map(|tx_bytes| { - Tx::try_from(tx_bytes.as_ref()).expect("Test failed") + Tx::try_from_bytes(tx_bytes.as_ref()).expect("Test failed") }); // since no events with valid nonces are contained in the vote // extension, we drop it from the proposal @@ -1419,7 +1419,7 @@ mod test_prepare_proposal { }; let proposed_txs = shell.prepare_proposal(req).txs.into_iter().map(|tx_bytes| { - Tx::try_from(tx_bytes.as_ref()).expect("Test failed") + Tx::try_from_bytes(tx_bytes.as_ref()).expect("Test failed") }); // find the event with the good nonce let mut ext = 'ext: { diff --git a/crates/node/src/shell/process_proposal.rs b/crates/node/src/shell/process_proposal.rs index 1485b06f15..53ca249f6f 100644 --- a/crates/node/src/shell/process_proposal.rs +++ b/crates/node/src/shell/process_proposal.rs @@ -238,7 +238,7 @@ where }; } - let maybe_tx = Tx::try_from(tx_bytes).map_or_else( + let maybe_tx = Tx::try_from_bytes(tx_bytes).map_or_else( |err| { tracing::debug!( ?err, diff --git a/crates/node/src/shell/vote_extensions.rs b/crates/node/src/shell/vote_extensions.rs index ed0925f6b8..6b5958debe 100644 --- a/crates/node/src/shell/vote_extensions.rs +++ b/crates/node/src/shell/vote_extensions.rs @@ -124,7 +124,7 @@ where ) -> DrainFilter<'shell, TxBytes, impl FnMut(&mut TxBytes) -> bool + 'shell> { drain_filter_polyfill::VecExt::drain_filter(txs, move |tx_bytes| { - let tx = match Tx::try_from(tx_bytes.as_ref()) { + let tx = match Tx::try_from_bytes(tx_bytes.as_ref()) { Ok(tx) => tx, Err(err) => { tracing::warn!( diff --git a/crates/parameters/src/vp.rs b/crates/parameters/src/vp.rs index 50f3b462b9..9762a2433d 100644 --- a/crates/parameters/src/vp.rs +++ b/crates/parameters/src/vp.rs @@ -31,7 +31,7 @@ where ) -> Result<()> { keys_changed.iter().try_for_each(|key| { let key_type: KeyType = key.into(); - let data = if let Some(data) = batched_tx.tx.data(batched_tx.cmt) { + let data = if let Some(data) = batched_tx.data() { data } else { return Err(Error::new_const( diff --git a/crates/proof_of_stake/src/vp.rs b/crates/proof_of_stake/src/vp.rs index dcb2d989f1..0c70c61c71 100644 --- a/crates/proof_of_stake/src/vp.rs +++ b/crates/proof_of_stake/src/vp.rs @@ -10,7 +10,7 @@ use namada_systems::governance; use namada_tx::action::{ Action, Bond, ClaimRewards, PosAction, Redelegation, Unbond, Withdraw, }; -use namada_tx::BatchedTxRef; +use namada_tx::{BatchedTxRef, InnerTxRef}; use namada_vp_env::{Error, Result, VpEnv}; use thiserror::Error; @@ -57,7 +57,7 @@ where // Check if this is a governance proposal first if batched_tx .tx - .data(batched_tx.cmt) + .data(InnerTxRef::Index(0)) .map(|tx_data| Gov::is_proposal_accepted(&ctx.pre(), &tx_data)) .transpose()? .unwrap_or(false) diff --git a/crates/sdk/src/masp.rs b/crates/sdk/src/masp.rs index bbd3becf37..9284400574 100644 --- a/crates/sdk/src/masp.rs +++ b/crates/sdk/src/masp.rs @@ -20,7 +20,7 @@ use namada_ibc::{decode_message, extract_masp_tx_from_envelope, IbcMessage}; use namada_io::client::Client; use namada_token::masp::shielded_wallet::ShieldedQueries; pub use namada_token::masp::{utils, *}; -use namada_tx::Tx; +use namada_tx::{InnerTxRef, Tx}; pub use utilities::{IndexerMaspClient, LedgerMaspClient}; use crate::error::{Error, QueryError}; @@ -72,20 +72,13 @@ fn extract_masp_tx( // Dereference the masp ref to the first instance that // matches is, even if it is not the exact one that produced // the event, the data we extract will be exactly the same - let masp_ibc_tx = tx - .commitments() - .iter() - .find(|cmt| cmt.data_sechash() == hash) + let tx_data = tx + .data(InnerTxRef::CommitmentHash(*hash)) .ok_or_else(|| { Error::Other(format!( - "Couldn't find data section with hash {hash}" + "Couldn't find data section with hash {hash}", )) })?; - let tx_data = tx.data(masp_ibc_tx).ok_or_else(|| { - Error::Other( - "Missing expected data section".to_string(), - ) - })?; let IbcMessage::Envelope(envelope) = decode_message::(&tx_data) diff --git a/crates/sdk/src/masp/utilities.rs b/crates/sdk/src/masp/utilities.rs index ebe0c83b0d..95c9565005 100644 --- a/crates/sdk/src/masp/utilities.rs +++ b/crates/sdk/src/masp/utilities.rs @@ -110,8 +110,9 @@ impl MaspClient for LedgerMaspClient { masp_refs, } in txs_results { - let tx = Tx::try_from(block[tx_index.0 as usize].as_ref()) - .map_err(|e| Error::Other(e.to_string()))?; + let tx = + Tx::try_from_bytes(block[tx_index.0 as usize].as_ref()) + .map_err(|e| Error::Other(e.to_string()))?; let extracted_masp_txs = extract_masp_tx(&tx, &masp_refs) .map_err(|e| Error::Other(e.to_string()))?; diff --git a/crates/sdk/src/signing.rs b/crates/sdk/src/signing.rs index 8d1c058489..82913d85c8 100644 --- a/crates/sdk/src/signing.rs +++ b/crates/sdk/src/signing.rs @@ -34,7 +34,7 @@ use namada_token::storage_key::balance_key; use namada_tx::data::pgf::UpdateStewardCommission; use namada_tx::data::pos::BecomeValidator; use namada_tx::data::{pos, Fee}; -use namada_tx::{MaspBuilder, Section, SignatureIndex, Tx}; +use namada_tx::{InnerTxRef, MaspBuilder, Section, SignatureIndex, Tx}; use rand::rngs::OsRng; use serde::{Deserialize, Serialize}; use tokio::sync::RwLock; @@ -224,7 +224,7 @@ where .iter() .map(|bytes| { let sigidx = - serde_json::from_slice::(bytes).unwrap(); + SignatureIndex::try_from_json_bytes(bytes).unwrap(); used_pubkeys.insert(sigidx.pubkey.clone()); sigidx }) @@ -987,6 +987,7 @@ pub async fn to_ledger_vector( }; for cmt in tx.commitments() { + let inner_tx = InnerTxRef::Commitment(cmt); // FIXME: need to push some string to differentiate between the // different txs of the bundle? let code_sec = tx @@ -1007,7 +1008,7 @@ pub async fn to_ledger_vector( if code_sec.tag == Some(TX_INIT_ACCOUNT_WASM.to_string()) { let init_account = InitAccount::try_from_slice( - &tx.data(cmt) + &tx.data(inner_tx.clone()) .ok_or_else(|| Error::Other("Invalid Data".to_string()))?, ) .map_err(|err| { @@ -1050,7 +1051,7 @@ pub async fn to_ledger_vector( ]); } else if code_sec.tag == Some(TX_BECOME_VALIDATOR_WASM.to_string()) { let init_validator = BecomeValidator::try_from_slice( - &tx.data(cmt) + &tx.data(inner_tx.clone()) .ok_or_else(|| Error::Other("Invalid Data".to_string()))?, ) .map_err(|err| { @@ -1123,7 +1124,7 @@ pub async fn to_ledger_vector( } } else if code_sec.tag == Some(TX_INIT_PROPOSAL.to_string()) { let init_proposal_data = InitProposalData::try_from_slice( - &tx.data(cmt) + &tx.data(inner_tx.clone()) .ok_or_else(|| Error::Other("Invalid Data".to_string()))?, ) .map_err(|err| { @@ -1185,7 +1186,7 @@ pub async fn to_ledger_vector( ]); } else if code_sec.tag == Some(TX_VOTE_PROPOSAL.to_string()) { let vote_proposal = VoteProposalData::try_from_slice( - &tx.data(cmt) + &tx.data(inner_tx.clone()) .ok_or_else(|| Error::Other("Invalid Data".to_string()))?, ) .map_err(|err| { @@ -1208,7 +1209,7 @@ pub async fn to_ledger_vector( ]); } else if code_sec.tag == Some(TX_REVEAL_PK.to_string()) { let public_key = common::PublicKey::try_from_slice( - &tx.data(cmt) + &tx.data(inner_tx.clone()) .ok_or_else(|| Error::Other("Invalid Data".to_string()))?, ) .map_err(|err| { @@ -1226,7 +1227,7 @@ pub async fn to_ledger_vector( .extend(vec![format!("Public key : {}", public_key)]); } else if code_sec.tag == Some(TX_UPDATE_ACCOUNT_WASM.to_string()) { let update_account = UpdateAccount::try_from_slice( - &tx.data(cmt) + &tx.data(inner_tx.clone()) .ok_or_else(|| Error::Other("Invalid Data".to_string()))?, ) .map_err(|err| { @@ -1292,7 +1293,7 @@ pub async fn to_ledger_vector( } } else if code_sec.tag == Some(TX_TRANSFER_WASM.to_string()) { let transfer = token::Transfer::try_from_slice( - &tx.data(cmt) + &tx.data(inner_tx.clone()) .ok_or_else(|| Error::Other("Invalid Data".to_string()))?, ) .map_err(|err| { @@ -1327,7 +1328,7 @@ pub async fn to_ledger_vector( .await?; } else if code_sec.tag == Some(TX_IBC_WASM.to_string()) { let data = tx - .data(cmt) + .data(inner_tx.clone()) .ok_or_else(|| Error::Other("Invalid Data".to_string()))?; if let Ok(transfer) = @@ -1582,7 +1583,7 @@ pub async fn to_ledger_vector( } } else if code_sec.tag == Some(TX_BOND_WASM.to_string()) { let bond = pos::Bond::try_from_slice( - &tx.data(cmt) + &tx.data(inner_tx.clone()) .ok_or_else(|| Error::Other("Invalid Data".to_string()))?, ) .map_err(|err| { @@ -1615,7 +1616,7 @@ pub async fn to_ledger_vector( ]); } else if code_sec.tag == Some(TX_UNBOND_WASM.to_string()) { let unbond = pos::Unbond::try_from_slice( - &tx.data(cmt) + &tx.data(inner_tx.clone()) .ok_or_else(|| Error::Other("Invalid Data".to_string()))?, ) .map_err(|err| { @@ -1648,7 +1649,7 @@ pub async fn to_ledger_vector( ]); } else if code_sec.tag == Some(TX_WITHDRAW_WASM.to_string()) { let withdraw = pos::Withdraw::try_from_slice( - &tx.data(cmt) + &tx.data(inner_tx.clone()) .ok_or_else(|| Error::Other("Invalid Data".to_string()))?, ) .map_err(|err| { @@ -1671,7 +1672,7 @@ pub async fn to_ledger_vector( .push(format!("Validator : {}", withdraw.validator)); } else if code_sec.tag == Some(TX_CLAIM_REWARDS_WASM.to_string()) { let claim = pos::Withdraw::try_from_slice( - &tx.data(cmt) + &tx.data(inner_tx.clone()) .ok_or_else(|| Error::Other("Invalid Data".to_string()))?, ) .map_err(|err| { @@ -1693,7 +1694,7 @@ pub async fn to_ledger_vector( .push(format!("Validator : {}", claim.validator)); } else if code_sec.tag == Some(TX_CHANGE_COMMISSION_WASM.to_string()) { let commission_change = pos::CommissionChange::try_from_slice( - &tx.data(cmt) + &tx.data(inner_tx.clone()) .ok_or_else(|| Error::Other("Invalid Data".to_string()))?, ) .map_err(|err| { @@ -1714,7 +1715,7 @@ pub async fn to_ledger_vector( ]); } else if code_sec.tag == Some(TX_CHANGE_METADATA_WASM.to_string()) { let metadata_change = pos::MetaDataChange::try_from_slice( - &tx.data(cmt) + &tx.data(inner_tx.clone()) .ok_or_else(|| Error::Other("Invalid Data".to_string()))?, ) .map_err(|err| { @@ -1757,7 +1758,7 @@ pub async fn to_ledger_vector( } else if code_sec.tag == Some(TX_CHANGE_CONSENSUS_KEY_WASM.to_string()) { let consensus_key_change = pos::ConsensusKeyChange::try_from_slice( - &tx.data(cmt) + &tx.data(inner_tx.clone()) .ok_or_else(|| Error::Other("Invalid Data".to_string()))?, ) .map_err(|err| { @@ -1784,7 +1785,7 @@ pub async fn to_ledger_vector( ]); } else if code_sec.tag == Some(TX_UNJAIL_VALIDATOR_WASM.to_string()) { let address = Address::try_from_slice( - &tx.data(cmt) + &tx.data(inner_tx.clone()) .ok_or_else(|| Error::Other("Invalid Data".to_string()))?, ) .map_err(|err| { @@ -1802,7 +1803,7 @@ pub async fn to_ledger_vector( } else if code_sec.tag == Some(TX_DEACTIVATE_VALIDATOR_WASM.to_string()) { let address = Address::try_from_slice( - &tx.data(cmt) + &tx.data(inner_tx.clone()) .ok_or_else(|| Error::Other("Invalid Data".to_string()))?, ) .map_err(|err| { @@ -1820,7 +1821,7 @@ pub async fn to_ledger_vector( } else if code_sec.tag == Some(TX_REACTIVATE_VALIDATOR_WASM.to_string()) { let address = Address::try_from_slice( - &tx.data(cmt) + &tx.data(inner_tx.clone()) .ok_or_else(|| Error::Other("Invalid Data".to_string()))?, ) .map_err(|err| { @@ -1837,7 +1838,7 @@ pub async fn to_ledger_vector( tv.output_expert.push(format!("Validator : {}", address)); } else if code_sec.tag == Some(TX_REDELEGATE_WASM.to_string()) { let redelegation = pos::Redelegation::try_from_slice( - &tx.data(cmt) + &tx.data(inner_tx.clone()) .ok_or_else(|| Error::Other("Invalid Data".to_string()))?, ) .map_err(|err| { @@ -1875,7 +1876,7 @@ pub async fn to_ledger_vector( } else if code_sec.tag == Some(TX_UPDATE_STEWARD_COMMISSION.to_string()) { let update = UpdateStewardCommission::try_from_slice( - &tx.data(cmt) + &tx.data(inner_tx.clone()) .ok_or_else(|| Error::Other("Invalid Data".to_string()))?, ) .map_err(|err| { @@ -1900,7 +1901,7 @@ pub async fn to_ledger_vector( } } else if code_sec.tag == Some(TX_RESIGN_STEWARD.to_string()) { let address = Address::try_from_slice( - &tx.data(cmt) + &tx.data(inner_tx.clone()) .ok_or_else(|| Error::Other("Invalid Data".to_string()))?, ) .map_err(|err| { @@ -1917,7 +1918,7 @@ pub async fn to_ledger_vector( tv.output_expert.push(format!("Steward : {}", address)); } else if code_sec.tag == Some(TX_BRIDGE_POOL_WASM.to_string()) { let transfer = PendingTransfer::try_from_slice( - &tx.data(cmt) + &tx.data(inner_tx) .ok_or_else(|| Error::Other("Invalid Data".to_string()))?, ) .map_err(|err| { diff --git a/crates/sdk/src/tx.rs b/crates/sdk/src/tx.rs index 9e74fd3cc5..3e03cac17e 100644 --- a/crates/sdk/src/tx.rs +++ b/crates/sdk/src/tx.rs @@ -200,7 +200,7 @@ pub fn dump_tx(io: &IO, args: &args::Tx, tx: Tx) { )); let out = File::create(&tx_path) .expect("Should be able to create a file to dump tx"); - serde_json::to_writer_pretty(out, &tx) + tx.to_writer_json(out) .expect("Should be able to write to file."); display_line!( io, @@ -3734,7 +3734,7 @@ pub async fn build_custom( let fee_amount = validate_fee(context, tx_args).await?; let mut tx = if let Some(serialized_tx) = serialized_tx { - Tx::deserialize(serialized_tx.as_ref()).map_err(|_| { + Tx::try_from_json_bytes(serialized_tx.as_ref()).map_err(|_| { Error::Other( "Invalid tx deserialization. Please make sure you are passing \ a file in .tx format, typically produced from using the \ diff --git a/crates/shielded_token/src/vp.rs b/crates/shielded_token/src/vp.rs index ef8cdc2781..dc98e78217 100644 --- a/crates/shielded_token/src/vp.rs +++ b/crates/shielded_token/src/vp.rs @@ -117,7 +117,7 @@ where ctx: &'ctx CTX, tx: &BatchedTxRef<'_>, ) -> Result<()> { - tx.tx.data(tx.cmt).map_or_else( + tx.data().map_or_else( || { Err(Error::new_const( "MASP parameter changes require tx data to be present", @@ -398,10 +398,7 @@ where ) .map_err(Error::new_const)?; let conversion_state = ctx.conversion_state(); - let tx_data = batched_tx - .tx - .data(batched_tx.cmt) - .ok_or_err_msg("No transaction data")?; + let tx_data = batched_tx.data().ok_or_err_msg("No transaction data")?; let actions = ctx.read_actions()?; // Try to get the Transaction object from the tx first (IBC) and from // the actions afterwards diff --git a/crates/tests/src/integration/ledger_tests.rs b/crates/tests/src/integration/ledger_tests.rs index 7d6322d448..dac6f9a52b 100644 --- a/crates/tests/src/integration/ledger_tests.rs +++ b/crates/tests/src/integration/ledger_tests.rs @@ -27,7 +27,7 @@ use namada_sdk::collections::HashMap; use namada_sdk::migrations; use namada_sdk::queries::RPC; use namada_sdk::token::{self, DenominatedAmount}; -use namada_sdk::tx::{TX_TRANSFER_WASM, VP_USER_WASM}; +use namada_sdk::tx::{self, Tx, TX_TRANSFER_WASM, VP_USER_WASM}; use namada_test_utils::TestWasms; use test_log::test; @@ -1901,9 +1901,9 @@ fn enforce_fee_payment() -> Result<()> { let mut txs = vec![]; for bytes in txs_bytes { - let mut tx = namada_sdk::tx::Tx::deserialize(&bytes).unwrap(); + let mut tx = Tx::try_from_json_bytes(&bytes).unwrap(); tx.add_wrapper( - namada_sdk::tx::data::wrapper::Fee { + tx::data::wrapper::Fee { amount_per_gas_unit: DenominatedAmount::native( token::Amount::native_whole(1), ), diff --git a/crates/tests/src/integration/masp.rs b/crates/tests/src/integration/masp.rs index 07690d1e0e..b9e5442cfc 100644 --- a/crates/tests/src/integration/masp.rs +++ b/crates/tests/src/integration/masp.rs @@ -16,7 +16,8 @@ use namada_sdk::state::{StorageRead, StorageWrite}; use namada_sdk::time::DateTimeUtc; use namada_sdk::token::storage_key::masp_token_map_key; use namada_sdk::token::{self, DenominatedAmount}; -use namada_sdk::DEFAULT_GAS_LIMIT; +use namada_sdk::tx::Tx; +use namada_sdk::{tx, DEFAULT_GAS_LIMIT}; use test_log::test; use super::setup; @@ -1503,9 +1504,9 @@ fn multiple_unfetched_txs_same_block() -> Result<()> { .clone(); let mut txs = vec![]; for bytes in txs_bytes { - let mut tx = namada_sdk::tx::Tx::deserialize(&bytes).unwrap(); + let mut tx = Tx::try_from_json_bytes(&bytes).unwrap(); tx.add_wrapper( - namada_sdk::tx::data::wrapper::Fee { + tx::data::wrapper::Fee { amount_per_gas_unit: DenominatedAmount::native(1.into()), token: native_token.clone(), }, @@ -1640,7 +1641,7 @@ fn expired_masp_tx() -> Result<()> { .in_mem() .native_token .clone(); - let mut tx = namada_sdk::tx::Tx::deserialize(&tx_bytes).unwrap(); + let mut tx = Tx::try_from_json_bytes(&tx_bytes).unwrap(); // Remove the expiration field to avoid a failure because of it, we only // want to check the expiration in the masp vp tx.header.expiration = None; diff --git a/crates/tests/src/vm_host_env/mod.rs b/crates/tests/src/vm_host_env/mod.rs index 0a7a3c858a..77cd09c0a5 100644 --- a/crates/tests/src/vm_host_env/mod.rs +++ b/crates/tests/src/vm_host_env/mod.rs @@ -37,7 +37,7 @@ mod tests { use namada_sdk::storage::{self, BlockHeight, Key, KeySeg}; use namada_sdk::time::DateTimeUtc; use namada_sdk::token::{self, Amount}; - use namada_sdk::tx::Tx; + use namada_sdk::tx::{InnerTxRef, Tx}; use namada_sdk::{address, key}; use namada_test_utils::TestWasms; use namada_tx_env::TxEnv; @@ -562,7 +562,10 @@ mod tests { env.batched_tx = batched_tx; env.batched_tx.clone() }); - assert_eq!(tx.data(&cmt).as_ref(), Some(data)); + assert_eq!( + tx.data(InnerTxRef::Commitment(&cmt)).as_ref(), + Some(data) + ); assert!( tx.verify_signatures( &[tx.header_hash(),], diff --git a/crates/trans_token/src/vp.rs b/crates/trans_token/src/vp.rs index 64b7aae644..8c6bf49ba3 100644 --- a/crates/trans_token/src/vp.rs +++ b/crates/trans_token/src/vp.rs @@ -43,14 +43,14 @@ where /// Run the validity predicate pub fn validate_tx( ctx: &'ctx CTX, - tx_data: &BatchedTxRef<'_>, + tx: &BatchedTxRef<'_>, keys_changed: &BTreeSet, verifiers: &BTreeSet
, ) -> Result<()> { // Is VP triggered by a governance proposal? if Gov::is_proposal_accepted( &ctx.pre(), - tx_data.tx.data(tx_data.cmt).unwrap_or_default().as_ref(), + tx.data().unwrap_or_default().as_ref(), )? { return Ok(()); } @@ -178,7 +178,7 @@ where } else if let Some(token) = is_any_minter_key(key) { Self::is_valid_minter(ctx, token, verifiers)?; } else if is_any_token_parameter_key(key).is_some() { - return Self::is_valid_parameter(ctx, tx_data); + return Self::is_valid_parameter(ctx, tx); } else if key.segments.first() == Some( &Address::Internal(InternalAddress::Multitoken).to_db_key(), @@ -275,7 +275,7 @@ where ctx: &'ctx CTX, batched_tx: &BatchedTxRef<'_>, ) -> Result<()> { - batched_tx.tx.data(batched_tx.cmt).map_or_else( + batched_tx.data().map_or_else( || { Err(Error::new_const( "Token parameter changes require tx data to be present", diff --git a/crates/tx/src/action.rs b/crates/tx/src/action.rs index 449647fa3d..4b477a2d2c 100644 --- a/crates/tx/src/action.rs +++ b/crates/tx/src/action.rs @@ -69,7 +69,7 @@ pub enum PgfAction { /// MASP tx actions. #[derive(Clone, Debug, BorshDeserialize, BorshSerialize, PartialEq)] pub enum MaspAction { - /// The hash of the masp [`crate::types::Section`] + /// The hash of the masp [`crate::Section`] MaspSectionRef(MaspTxId), /// A required authorizer for the transaction MaspAuthorizer(Address), diff --git a/crates/tx/src/data/mod.rs b/crates/tx/src/data/mod.rs index 364e7f4af9..a299ac9b2e 100644 --- a/crates/tx/src/data/mod.rs +++ b/crates/tx/src/data/mod.rs @@ -37,7 +37,7 @@ use sha2::{Digest, Sha256}; pub use wrapper::*; use crate::data::protocol::ProtocolTx; -use crate::types::TxCommitments; +use crate::TxCommitments; /// The different result codes that the ledger may send back to a client /// indicating the status of their submitted tx. diff --git a/crates/tx/src/lib.rs b/crates/tx/src/lib.rs index de7406595d..53f79fcb57 100644 --- a/crates/tx/src/lib.rs +++ b/crates/tx/src/lib.rs @@ -21,6 +21,7 @@ pub mod action; pub mod data; pub mod event; pub mod proto; +mod section; mod sign; mod types; @@ -28,12 +29,17 @@ use data::TxType; pub use either; pub use event::new_tx_event; pub use namada_core::key::SignableEthMessage; -pub use sign::{SigIndexDecodeError, SignatureIndex}; +pub use section::{ + Authorization, Code, Commitment, CompressedAuthorization, Data, Header, + MaspBuilder, Memo, Section, Signer, TxCommitments, +}; +pub use sign::{ + standalone_signature, verify_standalone_sig, SignatureIndex, Signed, + VerifySigError, +}; pub use types::{ - standalone_signature, verify_standalone_sig, Authorization, BatchedTx, - BatchedTxRef, Code, Commitment, CompressedAuthorization, Data, DecodeError, - Header, IndexedTx, IndexedTxRange, MaspBuilder, Memo, Section, Signed, - Signer, Tx, TxCommitments, TxError, VerifySigError, + BatchedTx, BatchedTxRef, DecodeError, IndexedTx, IndexedTxRange, + InnerTxRef, Tx, TxError, }; /// Length of the transaction sections salt diff --git a/crates/tx/src/section.rs b/crates/tx/src/section.rs new file mode 100644 index 0000000000..fb416d24c7 --- /dev/null +++ b/crates/tx/src/section.rs @@ -0,0 +1,887 @@ +use std::collections::BTreeMap; +use std::hash::Hash; + +use masp_primitives::transaction::builder::Builder; +use masp_primitives::transaction::components::sapling::builder::SaplingMetadata; +use masp_primitives::transaction::Transaction; +use masp_primitives::zip32::ExtendedFullViewingKey; +use namada_account::AccountPublicKeysMap; +use namada_core::address::Address; +use namada_core::borsh::{ + self, BorshDeserialize, BorshSchema, BorshSerialize, BorshSerializeExt, +}; +use namada_core::chain::ChainId; +use namada_core::collections::HashSet; +use namada_core::key::*; +use namada_core::masp::{AssetData, MaspTxId}; +use namada_core::time::DateTimeUtc; +use namada_macros::BorshDeserializer; +#[cfg(feature = "migrations")] +use namada_migrations::*; +use serde::de::Error as SerdeError; +use serde::{Deserialize, Serialize}; +use sha2::{Digest, Sha256}; + +use crate::data::protocol::ProtocolTx; +use crate::data::{hash_tx, TxType, WrapperTx}; +use crate::sign::VerifySigError; +use crate::{hex_data_serde, hex_salt_serde, Tx, SALT_LENGTH}; + +/// A section of a transaction. Carries an independent piece of information +/// necessary for the processing of a transaction. +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +#[derive( + Clone, + Debug, + BorshSerialize, + BorshDeserialize, + BorshDeserializer, + BorshSchema, + Serialize, + Deserialize, + PartialEq, +)] +pub enum Section { + /// Transaction data that needs to be sent to hardware wallets + Data(Data), + /// Transaction data that does not need to be sent to hardware wallets + ExtraData(Code), + /// Transaction code. Sending to hardware wallets optional + Code(Code), + /// A transaction header/protocol signature + Authorization(Authorization), + /// Embedded MASP transaction section + #[serde( + serialize_with = "borsh_serde::", + deserialize_with = "serde_borsh::" + )] + MaspTx(Transaction), + /// A section providing the auxiliary inputs used to construct a MASP + /// transaction. Only send to wallet, never send to protocol. + MaspBuilder(MaspBuilder), + /// Wrap a header with a section for the purposes of computing hashes + Header(Header), +} + +/// A Namada transaction header indicating where transaction subcomponents can +/// be found +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +#[derive( + Clone, + Debug, + BorshSerialize, + BorshDeserialize, + BorshDeserializer, + BorshSchema, + Serialize, + Deserialize, + PartialEq, +)] +pub struct Header { + /// The chain which this transaction is being submitted to + pub chain_id: ChainId, + /// The time at which this transaction expires + pub expiration: Option, + /// A transaction timestamp + pub timestamp: DateTimeUtc, + /// The commitments to the transaction's sections + pub batch: HashSet, + /// Whether the inner txs should be executed atomically + pub atomic: bool, + /// The type of this transaction + pub tx_type: TxType, +} + +impl Header { + /// Make a new header of the given transaction type + pub fn new(tx_type: TxType) -> Self { + Self { + tx_type, + chain_id: ChainId::default(), + expiration: None, + #[allow(clippy::disallowed_methods)] + timestamp: DateTimeUtc::now(), + batch: Default::default(), + atomic: Default::default(), + } + } + + /// Get the hash of this transaction header. + pub fn hash<'a>(&self, hasher: &'a mut Sha256) -> &'a mut Sha256 { + hasher.update(self.serialize_to_vec()); + hasher + } + + /// Get the wrapper header if it is present + pub fn wrapper(&self) -> Option { + if let TxType::Wrapper(wrapper) = &self.tx_type { + Some(*wrapper.clone()) + } else { + None + } + } + + /// Get the protocol header if it is present + pub fn protocol(&self) -> Option { + if let TxType::Protocol(protocol) = &self.tx_type { + Some(*protocol.clone()) + } else { + None + } + } +} + +impl Section { + /// Hash this section. Section hashes are useful for signatures and also for + /// allowing transaction sections to cross reference. + pub fn hash<'a>(&self, hasher: &'a mut Sha256) -> &'a mut Sha256 { + // Get the index corresponding to this variant + let discriminant = self.serialize_to_vec()[0]; + // Use Borsh's discriminant in the Section's hash + hasher.update([discriminant]); + match self { + Self::Data(data) => data.hash(hasher), + Self::ExtraData(extra) => extra.hash(hasher), + Self::Code(code) => code.hash(hasher), + Self::Authorization(signature) => signature.hash(hasher), + Self::MaspBuilder(mb) => mb.hash(hasher), + Self::MaspTx(tx) => { + hasher.update(tx.serialize_to_vec()); + hasher + } + Self::Header(header) => header.hash(hasher), + } + } + + /// Get the hash of this section + pub fn get_hash(&self) -> namada_core::hash::Hash { + namada_core::hash::Hash( + self.hash(&mut Sha256::new()).finalize_reset().into(), + ) + } + + /// Extract the data from this section if possible + pub fn data(&self) -> Option { + if let Self::Data(data) = self { + Some(data.clone()) + } else { + None + } + } + + /// Extract the extra data from this section if possible + pub fn extra_data_sec(&self) -> Option { + if let Self::ExtraData(data) = self { + Some(data.clone()) + } else { + None + } + } + + /// Extract the extra data from this section if possible + pub fn extra_data(&self) -> Option> { + if let Self::ExtraData(data) = self { + data.code.id() + } else { + None + } + } + + /// Extract the code from this section is possible + pub fn code_sec(&self) -> Option { + if let Self::Code(data) = self { + Some(data.clone()) + } else { + None + } + } + + /// Extract the code from this section is possible + pub fn code(&self) -> Option> { + if let Self::Code(data) = self { + data.code.id() + } else { + None + } + } + + /// Extract the signature from this section if possible + pub fn signature(&self) -> Option { + if let Self::Authorization(data) = self { + Some(data.clone()) + } else { + None + } + } + + /// Extract the MASP transaction from this section if possible + pub fn masp_tx(&self) -> Option { + if let Self::MaspTx(data) = self { + Some(data.clone()) + } else { + None + } + } + + /// Extract the MASP builder from this section if possible + pub fn masp_builder(&self) -> Option { + if let Self::MaspBuilder(data) = self { + Some(data.clone()) + } else { + None + } + } +} + +/// A section representing transaction data +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +#[derive( + Clone, + Debug, + BorshSerialize, + BorshDeserialize, + BorshDeserializer, + BorshSchema, + Serialize, + Deserialize, +)] +pub struct Data { + /// Salt with additional random data (usually a timestamp) + #[serde(with = "hex_salt_serde")] + pub salt: [u8; SALT_LENGTH], + /// Data bytes + #[serde(with = "hex_data_serde")] + pub data: Vec, +} + +impl PartialEq for Data { + fn eq(&self, other: &Self) -> bool { + self.data == other.data + } +} + +impl Data { + /// Make a new data section with the given bytes + pub fn new(data: Vec) -> Self { + use rand_core::{OsRng, RngCore}; + + Self { + salt: { + let mut buf = [0; SALT_LENGTH]; + OsRng.fill_bytes(&mut buf); + buf + }, + data, + } + } + + /// Hash this data section + pub fn hash<'a>(&self, hasher: &'a mut Sha256) -> &'a mut Sha256 { + hasher.update(self.serialize_to_vec()); + hasher + } +} + +/// Represents either some code bytes or their SHA-256 hash +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +#[derive( + Clone, + Debug, + BorshSerialize, + BorshDeserialize, + BorshDeserializer, + BorshSchema, + Serialize, + Deserialize, +)] +pub enum Commitment { + /// Result of applying hash function to bytes + Hash(namada_core::hash::Hash), + /// Result of applying identity function to bytes + Id(Vec), +} + +impl PartialEq for Commitment { + fn eq(&self, other: &Self) -> bool { + self.hash() == other.hash() + } +} + +impl Commitment { + /// Return the contained hash commitment + pub fn hash(&self) -> namada_core::hash::Hash { + match self { + Self::Id(code) => hash_tx(code), + Self::Hash(hash) => *hash, + } + } + + /// Return the result of applying identity function if there is any + pub fn id(&self) -> Option> { + if let Self::Id(code) = self { + Some(code.clone()) + } else { + None + } + } +} + +/// A section representing transaction code +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +#[derive( + Clone, + Debug, + BorshSerialize, + BorshDeserialize, + BorshDeserializer, + BorshSchema, + Serialize, + Deserialize, +)] +pub struct Code { + /// Additional random data + #[serde(with = "hex_salt_serde")] + pub salt: [u8; SALT_LENGTH], + /// Actual transaction code + pub code: Commitment, + /// The tag for the transaction code + pub tag: Option, +} + +impl PartialEq for Code { + fn eq(&self, other: &Self) -> bool { + self.code == other.code + } +} + +impl Code { + /// Make a new code section with the given bytes + pub fn new(code: Vec, tag: Option) -> Self { + use rand_core::{OsRng, RngCore}; + + Self { + salt: { + let mut buf = [0; SALT_LENGTH]; + OsRng.fill_bytes(&mut buf); + buf + }, + code: Commitment::Id(code), + tag, + } + } + + /// Make a new code section with the given hash + pub fn from_hash( + hash: namada_core::hash::Hash, + tag: Option, + ) -> Self { + use rand_core::{OsRng, RngCore}; + + Self { + salt: { + let mut buf = [0; SALT_LENGTH]; + OsRng.fill_bytes(&mut buf); + buf + }, + code: Commitment::Hash(hash), + tag, + } + } + + /// Hash this code section + pub fn hash<'a>(&self, hasher: &'a mut Sha256) -> &'a mut Sha256 { + hasher.update(self.salt); + hasher.update(self.code.hash()); + hasher.update(self.tag.serialize_to_vec()); + hasher + } +} + +/// A memo field (bytes). +pub type Memo = Vec; + +/// Indicates the list of public keys against which signatures will be verified +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +#[derive( + Clone, + Debug, + BorshSerialize, + BorshDeserialize, + BorshDeserializer, + BorshSchema, + Serialize, + Deserialize, + PartialEq, +)] +pub enum Signer { + /// The address of a multisignature account + Address(Address), + /// The public keys that constitute a signer + PubKeys(Vec), +} + +/// A section representing a multisig over another section +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +#[derive( + Clone, + Debug, + BorshSerialize, + BorshDeserialize, + BorshDeserializer, + BorshSchema, + Serialize, + Deserialize, + PartialEq, +)] +pub struct Authorization { + /// The hash of the section being signed + pub targets: Vec, + /// The public keys against which the signatures should be verified + pub signer: Signer, + /// The signature over the above hash + pub signatures: BTreeMap, +} + +impl Authorization { + /// Sign the given section hash with the given key and return a section + pub fn new( + targets: Vec, + secret_keys: BTreeMap, + signer: Option
, + ) -> Self { + // If no signer address is given, then derive the signer's public keys + // from the given secret keys. + let signer = if let Some(addr) = signer { + Signer::Address(addr) + } else { + // Make sure the corresponding public keys can be represented by a + // vector instead of a map + assert!( + secret_keys + .keys() + .cloned() + .eq(0..(u8::try_from(secret_keys.len()) + .expect("Number of SKs must not exceed `u8::MAX`"))), + "secret keys must be enumerated when signer address is absent" + ); + Signer::PubKeys(secret_keys.values().map(RefTo::ref_to).collect()) + }; + + // Commit to the given targets + let partial = Self { + targets, + signer, + signatures: BTreeMap::new(), + }; + let target = partial.get_raw_hash(); + // Turn the map of secret keys into a map of signatures over the + // commitment made above + let signatures = secret_keys + .iter() + .map(|(index, secret_key)| { + (*index, common::SigScheme::sign(secret_key, target)) + }) + .collect(); + Self { + signatures, + ..partial + } + } + + /// Hash this signature section + pub fn hash<'a>(&self, hasher: &'a mut Sha256) -> &'a mut Sha256 { + hasher.update(self.serialize_to_vec()); + hasher + } + + /// Get the hash of this section + pub fn get_hash(&self) -> namada_core::hash::Hash { + namada_core::hash::Hash( + self.hash(&mut Sha256::new()).finalize_reset().into(), + ) + } + + /// Get a hash of this section with its signer and signatures removed + pub fn get_raw_hash(&self) -> namada_core::hash::Hash { + Self { + signer: Signer::PubKeys(vec![]), + signatures: BTreeMap::new(), + ..self.clone() + } + .get_hash() + } + + /// Verify that the signature contained in this section is valid + pub fn verify_signature( + &self, + verified_pks: &mut HashSet, + public_keys_index_map: &AccountPublicKeysMap, + signer: &Option
, + consume_verify_sig_gas: &mut F, + ) -> std::result::Result + where + F: FnMut() -> std::result::Result<(), namada_gas::Error>, + { + // Records whether there are any successful verifications + let mut verifications = 0; + match &self.signer { + // Verify the signatures against the given public keys if the + // account addresses match + Signer::Address(addr) if Some(addr) == signer.as_ref() => { + for (idx, sig) in &self.signatures { + if let Some(pk) = + public_keys_index_map.get_public_key_from_index(*idx) + { + consume_verify_sig_gas()?; + common::SigScheme::verify_signature( + &pk, + &self.get_raw_hash(), + sig, + )?; + verified_pks.insert(*idx); + // Cannot overflow + #[allow(clippy::arithmetic_side_effects)] + { + verifications += 1; + } + } + } + } + // If the account addresses do not match, then there is no efficient + // way to map signatures to the given public keys + Signer::Address(_) => {} + // Verify the signatures against the subset of this section's public + // keys that are also in the given map + Signer::PubKeys(pks) => { + let hash = self.get_raw_hash(); + for (idx, pk) in pks.iter().enumerate() { + let map_idx = + public_keys_index_map.get_index_from_public_key(pk); + + // Use the first signature when fuzzing as the map is + // unlikely to contain matching PKs + #[cfg(fuzzing)] + let map_idx = map_idx.or(Some(0_u8)); + + if let Some(map_idx) = map_idx { + let sig_idx = u8::try_from(idx) + .map_err(|_| VerifySigError::PksOverflow)?; + consume_verify_sig_gas()?; + let sig = self + .signatures + .get(&sig_idx) + .ok_or(VerifySigError::MissingSignature)?; + common::SigScheme::verify_signature(pk, &hash, sig)?; + verified_pks.insert(map_idx); + // Cannot overflow + #[allow(clippy::arithmetic_side_effects)] + { + verifications += 1; + } + } + } + } + } + + // There's usually not enough signatures when fuzzing, this makes it + // more likely to pass authorization. + #[cfg(fuzzing)] + { + verifications = 1; + } + + Ok(verifications) + } +} + +/// A section representing a multisig over another section +#[derive( + Clone, + Debug, + BorshSerialize, + BorshDeserialize, + BorshDeserializer, + BorshSchema, + Serialize, + Deserialize, +)] +pub struct CompressedAuthorization { + /// The hash of the section being signed + pub targets: Vec, + /// The public keys against which the signatures should be verified + pub signer: Signer, + /// The signature over the above hash + pub signatures: BTreeMap, +} + +impl CompressedAuthorization { + /// Decompress this signature object with respect to the given transaction + /// by looking up the necessary section hashes. Used by constrained hardware + /// wallets. + pub fn expand(self, tx: &Tx) -> Authorization { + let mut targets = Vec::new(); + for idx in self.targets { + if idx == 0 { + // The "zeroth" section is the header + targets.push(tx.header_hash()); + } else if idx == 255 { + // The 255th section is the raw header + targets.push(tx.raw_header_hash()); + } else { + targets.push( + tx.sections[(idx as usize) + .checked_sub(1) + .expect("cannot underflow")] + .get_hash(), + ); + } + } + Authorization { + targets, + signer: self.signer, + signatures: self.signatures, + } + } +} + +/// An inner transaction of the batch, represented by its commitments to the +/// [`Code`], [`Data`] and [`Memo`] sections +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +#[derive( + Clone, + Debug, + Default, + BorshSerialize, + BorshDeserialize, + BorshDeserializer, + BorshSchema, + Serialize, + Deserialize, + Eq, + PartialEq, + Ord, + PartialOrd, + Hash, +)] +pub struct TxCommitments { + /// The SHA-256 hash of the transaction's code section + pub code_hash: namada_core::hash::Hash, + /// The SHA-256 hash of the transaction's data section + pub data_hash: namada_core::hash::Hash, + /// The SHA-256 hash of the transaction's memo section + /// + /// In case a memo is not present in the transaction, a + /// byte array filled with zeroes is present instead + pub memo_hash: namada_core::hash::Hash, +} + +impl TxCommitments { + /// Get the hash of this transaction's code + pub fn code_sechash(&self) -> &namada_core::hash::Hash { + &self.code_hash + } + + /// Get the transaction data hash + pub fn data_sechash(&self) -> &namada_core::hash::Hash { + &self.data_hash + } + + /// Get the hash of this transaction's memo + pub fn memo_sechash(&self) -> &namada_core::hash::Hash { + &self.memo_hash + } + + /// Hash the commitments to the transaction's sections + pub fn hash<'a>(&self, hasher: &'a mut Sha256) -> &'a mut Sha256 { + hasher.update(self.serialize_to_vec()); + hasher + } + + /// Get the hash of this Commitments + pub fn get_hash(&self) -> namada_core::hash::Hash { + namada_core::hash::Hash( + self.hash(&mut Sha256::new()).finalize_reset().into(), + ) + } +} + +/// A section providing the auxiliary inputs used to construct a MASP +/// transaction +#[derive( + Clone, + Debug, + BorshSerialize, + BorshDeserialize, + BorshSchema, + Serialize, + Deserialize, +)] +pub struct MaspBuilder { + /// The MASP transaction that this section witnesses + pub target: MaspTxId, + /// The decoded set of asset types used by the transaction. Useful for + /// offline wallets trying to display AssetTypes. + pub asset_types: HashSet, + /// Track how Info objects map to descriptors and outputs + #[serde( + serialize_with = "borsh_serde::", + deserialize_with = "serde_borsh::" + )] + pub metadata: SaplingMetadata, + /// The data that was used to construct the target transaction + #[serde( + serialize_with = "borsh_serde::", + deserialize_with = "serde_borsh::" + )] + pub builder: Builder<(), ExtendedFullViewingKey, ()>, +} + +impl PartialEq for MaspBuilder { + fn eq(&self, other: &Self) -> bool { + self.target == other.target + } +} + +impl MaspBuilder { + /// Get the hash of this ciphertext section. This operation is done in such + /// a way it matches the hash of the type pun + pub fn hash<'a>(&self, hasher: &'a mut Sha256) -> &'a mut Sha256 { + hasher.update(self.serialize_to_vec()); + hasher + } +} + +#[cfg(feature = "arbitrary")] +impl arbitrary::Arbitrary<'_> for MaspBuilder { + fn arbitrary( + u: &mut arbitrary::Unstructured<'_>, + ) -> arbitrary::Result { + use masp_primitives::transaction::builder::MapBuilder; + use masp_primitives::transaction::components::sapling::builder::MapBuilder as SapMapBuilder; + use masp_primitives::zip32::ExtendedSpendingKey; + struct WalletMap; + + impl + SapMapBuilder + for WalletMap + { + fn map_params(&self, _s: P1) {} + + fn map_key( + &self, + s: ExtendedSpendingKey, + ) -> ExtendedFullViewingKey { + (&s).into() + } + } + impl + MapBuilder< + P1, + ExtendedSpendingKey, + N1, + (), + ExtendedFullViewingKey, + (), + > for WalletMap + { + fn map_notifier(&self, _s: N1) {} + } + + let target_height = masp_primitives::consensus::BlockHeight::from( + u.int_in_range(0_u32..=100_000_000)?, + ); + Ok(MaspBuilder { + target: arbitrary::Arbitrary::arbitrary(u)?, + asset_types: arbitrary::Arbitrary::arbitrary(u)?, + metadata: arbitrary::Arbitrary::arbitrary(u)?, + builder: Builder::new( + masp_primitives::consensus::TestNetwork, + target_height, + ) + .map_builder(WalletMap), + }) + } + + fn size_hint(depth: usize) -> (usize, Option) { + arbitrary::size_hint::and_all( + &[ + ::size_hint(depth), + ::size_hint(depth), + as arbitrary::Arbitrary>::size_hint(depth), + ::size_hint(depth), + ], + ) + } +} + +#[derive(serde::Serialize, serde::Deserialize)] +struct TransactionSerde(Vec); + +impl From> for TransactionSerde { + fn from(tx: Vec) -> Self { + Self(tx) + } +} + +impl From for Vec { + fn from(tx: TransactionSerde) -> Vec { + tx.0 + } +} + +/// A structure to facilitate Serde (de)serializations of Builders +#[derive(serde::Serialize, serde::Deserialize)] +struct BuilderSerde(Vec); + +impl From> for BuilderSerde { + fn from(tx: Vec) -> Self { + Self(tx) + } +} + +impl From for Vec { + fn from(tx: BuilderSerde) -> Vec { + tx.0 + } +} + +/// A structure to facilitate Serde (de)serializations of SaplingMetadata +#[derive(serde::Serialize, serde::Deserialize)] +pub struct SaplingMetadataSerde(Vec); + +impl From> for SaplingMetadataSerde { + fn from(tx: Vec) -> Self { + Self(tx) + } +} + +impl From for Vec { + fn from(tx: SaplingMetadataSerde) -> Vec { + tx.0 + } +} + +fn borsh_serde( + obj: &impl BorshSerialize, + ser: S, +) -> std::result::Result +where + S: serde::Serializer, + T: From>, + T: serde::Serialize, +{ + Into::::into(obj.serialize_to_vec()).serialize(ser) +} + +fn serde_borsh<'de, T, S, U>(ser: S) -> std::result::Result +where + S: serde::Deserializer<'de>, + T: Into>, + T: serde::Deserialize<'de>, + U: BorshDeserialize, +{ + BorshDeserialize::try_from_slice(&Into::>::into(T::deserialize( + ser, + )?)) + .map_err(S::Error::custom) +} diff --git a/crates/tx/src/sign.rs b/crates/tx/src/sign.rs index 296778e0ae..8bd203b25d 100644 --- a/crates/tx/src/sign.rs +++ b/crates/tx/src/sign.rs @@ -1,25 +1,160 @@ //! Types for signing use std::cmp::Ordering; +use std::collections::BTreeMap; +use std::hash::{Hash, Hasher}; +use std::io; +use std::marker::PhantomData; +use borsh::schema::{self, Declaration, Definition}; use namada_core::address::Address; use namada_core::borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; -use namada_core::key::common; +use namada_core::key::{common, SerializeWithBorsh, SigScheme, Signable}; use namada_macros::BorshDeserializer; #[cfg(feature = "migrations")] use namada_migrations::*; use serde::{Deserialize, Serialize}; use thiserror::Error; +/// Represents an error in signature verification #[allow(missing_docs)] #[derive(Error, Debug)] -pub enum SigIndexDecodeError { - #[error("Invalid signature index bytes: {0}")] - Encoding(std::io::Error), - #[error("Invalid signature index JSON string")] - JsonString, - #[error("Invalid signature index: {0}")] - Hex(data_encoding::DecodeError), +pub enum VerifySigError { + #[error("{0}")] + VerifySig(#[from] namada_core::key::VerifySigError), + #[error("{0}")] + Gas(#[from] namada_gas::Error), + #[error("The wrapper signature is invalid.")] + InvalidWrapperSignature, + #[error("The section signature is invalid: {0}")] + InvalidSectionSignature(String), + #[error("The number of PKs overflows u8::MAX")] + PksOverflow, + #[error("An expected signature is missing.")] + MissingSignature, +} + +/// A generic signed data wrapper for serialize-able types. +/// +/// The default serialization method is [`BorshSerialize`]. +#[derive( + Clone, Debug, BorshSerialize, BorshDeserialize, Serialize, Deserialize, +)] +pub struct Signed { + /// Arbitrary data to be signed + pub data: T, + /// The signature of the data + pub sig: common::Signature, + /// The method to serialize the data with, + /// before it being signed + _serialization: PhantomData, +} + +impl Eq for Signed {} + +impl PartialEq for Signed { + fn eq(&self, other: &Self) -> bool { + self.data == other.data && self.sig == other.sig + } +} + +impl Hash for Signed { + fn hash(&self, state: &mut H) { + self.data.hash(state); + self.sig.hash(state); + } +} + +impl PartialOrd for Signed { + fn partial_cmp(&self, other: &Self) -> Option { + self.data.partial_cmp(&other.data) + } +} +impl Ord for Signed { + fn cmp(&self, other: &Self) -> Ordering { + self.data.cmp(&other.data) + } +} + +impl BorshSchema for Signed { + fn add_definitions_recursively( + definitions: &mut BTreeMap, + ) { + let fields = schema::Fields::NamedFields(vec![ + ("data".to_string(), T::declaration()), + ("sig".to_string(), ::declaration()), + ]); + let definition = schema::Definition::Struct { fields }; + schema::add_definition(Self::declaration(), definition, definitions); + T::add_definitions_recursively(definitions); + ::add_definitions_recursively(definitions); + } + + fn declaration() -> schema::Declaration { + format!("Signed<{}>", T::declaration()) + } +} + +impl Signed { + /// Initialize a new [`Signed`] instance from an existing signature. + #[inline] + pub fn new_from(data: T, sig: common::Signature) -> Self { + Self { + data, + sig, + _serialization: PhantomData, + } + } +} + +impl> Signed { + /// Initialize a new [`Signed`] instance. + pub fn new(keypair: &common::SecretKey, data: T) -> Self { + let to_sign = S::as_signable(&data); + let sig = + common::SigScheme::sign_with_hasher::(keypair, to_sign); + Self::new_from(data, sig) + } + + /// Verify that the data has been signed by the secret key + /// counterpart of the given public key. + pub fn verify( + &self, + pk: &common::PublicKey, + ) -> std::result::Result<(), VerifySigError> { + let signed_bytes = S::as_signable(&self.data); + common::SigScheme::verify_signature_with_hasher::( + pk, + &signed_bytes, + &self.sig, + ) + .map_err(Into::into) + } +} + +/// Get a signature for data +pub fn standalone_signature>( + keypair: &common::SecretKey, + data: &T, +) -> common::Signature { + let to_sign = S::as_signable(data); + common::SigScheme::sign_with_hasher::(keypair, to_sign) +} + +/// Verify that the input data has been signed by the secret key +/// counterpart of the given public key. +pub fn verify_standalone_sig>( + data: &T, + pk: &common::PublicKey, + sig: &common::Signature, +) -> std::result::Result<(), VerifySigError> { + let signed_data = S::as_signable(data); + common::SigScheme::verify_signature_with_hasher::( + pk, + &signed_data, + sig, + ) + .map_err(Into::into) } #[derive( @@ -61,6 +196,21 @@ impl SignatureIndex { pub fn to_vec(&self) -> Vec { vec![self.clone()] } + + /// Serialize signature to pretty JSON into an I/O stream + pub fn to_writer_json(&self, writer: W) -> serde_json::Result<()> + where + W: io::Write, + { + serde_json::to_writer_pretty(writer, self) + } + + /// Try to parse a signature from JSON string bytes + pub fn try_from_json_bytes( + bytes: &[u8], + ) -> Result { + serde_json::from_slice::(bytes) + } } impl Ord for SignatureIndex { @@ -74,3 +224,61 @@ impl PartialOrd for SignatureIndex { Some(self.cmp(other)) } } + +#[cfg(test)] +mod test { + use assert_matches::assert_matches; + use namada_core::key::SigScheme; + use namada_core::{address, key}; + + use super::*; + + #[test] + fn test_signature_serialization() { + let sk = key::testing::keypair_1(); + let pubkey = sk.to_public(); + let data = [0_u8]; + let signature = key::common::SigScheme::sign(&sk, data); + let sig_index = SignatureIndex { + pubkey, + index: Some((address::testing::established_address_1(), 1)), + signature, + }; + + let mut buffer = vec![]; + sig_index.to_writer_json(&mut buffer).unwrap(); + + let deserialized = + SignatureIndex::try_from_json_bytes(&buffer).unwrap(); + assert_eq!(sig_index, deserialized); + } + + #[test] + fn test_standalone_signing() { + let sk1 = key::testing::keypair_1(); + let sk2 = key::testing::keypair_2(); + let data = vec![30_u8, 1, 5]; + let sig = + standalone_signature::, SerializeWithBorsh>(&sk1, &data); + + assert_matches!( + verify_standalone_sig::, SerializeWithBorsh>( + &data, + &sk1.to_public(), + &sig + ), + Ok(()) + ); + + let wrong_sig = + standalone_signature::, SerializeWithBorsh>(&sk2, &data); + assert_matches!( + verify_standalone_sig::, SerializeWithBorsh>( + &data, + &sk1.to_public(), + &wrong_sig + ), + Err(VerifySigError::VerifySig(_)) + ); + } +} diff --git a/crates/tx/src/types.rs b/crates/tx/src/types.rs index 156f62c211..d54e009404 100644 --- a/crates/tx/src/types.rs +++ b/crates/tx/src/types.rs @@ -1,1111 +1,49 @@ use std::borrow::Cow; -use std::cmp::Ordering; use std::collections::BTreeMap; -use std::hash::{Hash, Hasher}; -use std::marker::PhantomData; +use std::hash::Hash; +use std::io; use std::ops::{Bound, RangeBounds}; -use data_encoding::HEXUPPER; -use masp_primitives::transaction::builder::Builder; -use masp_primitives::transaction::components::sapling::builder::SaplingMetadata; use masp_primitives::transaction::Transaction; -use masp_primitives::zip32::ExtendedFullViewingKey; use namada_account::AccountPublicKeysMap; use namada_core::address::Address; -use namada_core::borsh::schema::{add_definition, Declaration, Definition}; use namada_core::borsh::{ self, BorshDeserialize, BorshSchema, BorshSerialize, BorshSerializeExt, }; use namada_core::chain::{BlockHeight, ChainId}; use namada_core::collections::{HashMap, HashSet}; use namada_core::key::*; -use namada_core::masp::{AssetData, MaspTxId}; +use namada_core::masp::MaspTxId; use namada_core::storage::TxIndex; use namada_core::time::DateTimeUtc; use namada_macros::BorshDeserializer; #[cfg(feature = "migrations")] use namada_migrations::*; -use serde::de::Error as SerdeError; use serde::{Deserialize, Serialize}; -use sha2::{Digest, Sha256}; use thiserror::Error; -use crate::data::protocol::ProtocolTx; -use crate::data::{hash_tx, Fee, GasLimit, TxType, WrapperTx}; -use crate::sign::SignatureIndex; -use crate::{hex_data_serde, hex_salt_serde, proto, SALT_LENGTH}; - -/// Represents an error in signature verification -#[allow(missing_docs)] -#[derive(Error, Debug)] -pub enum VerifySigError { - #[error("{0}")] - VerifySig(#[from] namada_core::key::VerifySigError), - #[error("{0}")] - Gas(#[from] namada_gas::Error), - #[error("The wrapper signature is invalid.")] - InvalidWrapperSignature, - #[error("The section signature is invalid: {0}")] - InvalidSectionSignature(String), - #[error("The number of PKs overflows u8::MAX")] - PksOverflow, - #[error("An expected signature is missing.")] - MissingSignature, -} - -#[allow(missing_docs)] -#[derive(Error, Debug)] -pub enum DecodeError { - #[error("Invalid signature index bytes: {0}")] - InvalidEncoding(std::io::Error), - #[error("Invalid signature index JSON string")] - InvalidJsonString, - #[error("Invalid signature index: {0}")] - InvalidHex(data_encoding::DecodeError), - #[error("Error decoding a transaction from bytes: {0}")] - TxDecodingError(prost::DecodeError), - #[error("Timestamp is empty")] - NoTimestampError, - #[error("Timestamp is invalid: {0}")] - InvalidTimestamp(prost_types::TimestampError), - #[error("Couldn't serialize transaction from JSON at {0}")] - InvalidJSONDeserialization(String), -} - -/// This can be used to sign an arbitrary tx. The signature is produced and -/// verified on the tx data concatenated with the tx code, however the tx code -/// itself is not part of this structure. -/// -/// Because the signature is not checked by the ledger, we don't inline it into -/// the `Tx` type directly. Instead, the signature is attached to the `tx.data`, -/// which can then be checked by a validity predicate wasm. -#[derive( - Clone, - Debug, - BorshSerialize, - BorshDeserialize, - BorshDeserializer, - BorshSchema, -)] -pub struct SignedTxData { - /// The original tx data bytes, if any - pub data: Option>, - /// The signature is produced on the tx data concatenated with the tx code - /// and the timestamp. - pub sig: common::Signature, -} - -/// A generic signed data wrapper for serialize-able types. -/// -/// The default serialization method is [`BorshSerialize`]. -#[derive( - Clone, Debug, BorshSerialize, BorshDeserialize, Serialize, Deserialize, -)] -pub struct Signed { - /// Arbitrary data to be signed - pub data: T, - /// The signature of the data - pub sig: common::Signature, - /// The method to serialize the data with, - /// before it being signed - _serialization: PhantomData, -} - -impl Eq for Signed {} - -impl PartialEq for Signed { - fn eq(&self, other: &Self) -> bool { - self.data == other.data && self.sig == other.sig - } -} - -impl Hash for Signed { - fn hash(&self, state: &mut H) { - self.data.hash(state); - self.sig.hash(state); - } -} - -impl PartialOrd for Signed { - fn partial_cmp(&self, other: &Self) -> Option { - self.data.partial_cmp(&other.data) - } -} -impl Ord for Signed { - fn cmp(&self, other: &Self) -> Ordering { - self.data.cmp(&other.data) - } -} - -impl BorshSchema for Signed { - fn add_definitions_recursively( - definitions: &mut BTreeMap, - ) { - let fields = borsh::schema::Fields::NamedFields(vec![ - ("data".to_string(), T::declaration()), - ("sig".to_string(), ::declaration()), - ]); - let definition = borsh::schema::Definition::Struct { fields }; - add_definition(Self::declaration(), definition, definitions); - T::add_definitions_recursively(definitions); - ::add_definitions_recursively(definitions); - } - - fn declaration() -> borsh::schema::Declaration { - format!("Signed<{}>", T::declaration()) - } -} - -impl Signed { - /// Initialize a new [`Signed`] instance from an existing signature. - #[inline] - pub fn new_from(data: T, sig: common::Signature) -> Self { - Self { - data, - sig, - _serialization: PhantomData, - } - } -} - -impl> Signed { - /// Initialize a new [`Signed`] instance. - pub fn new(keypair: &common::SecretKey, data: T) -> Self { - let to_sign = S::as_signable(&data); - let sig = - common::SigScheme::sign_with_hasher::(keypair, to_sign); - Self::new_from(data, sig) - } - - /// Verify that the data has been signed by the secret key - /// counterpart of the given public key. - pub fn verify( - &self, - pk: &common::PublicKey, - ) -> std::result::Result<(), VerifySigError> { - let signed_bytes = S::as_signable(&self.data); - common::SigScheme::verify_signature_with_hasher::( - pk, - &signed_bytes, - &self.sig, - ) - .map_err(Into::into) - } -} - -/// Get a signature for data -pub fn standalone_signature>( - keypair: &common::SecretKey, - data: &T, -) -> common::Signature { - let to_sign = S::as_signable(data); - common::SigScheme::sign_with_hasher::(keypair, to_sign) -} - -/// Verify that the input data has been signed by the secret key -/// counterpart of the given public key. -pub fn verify_standalone_sig>( - data: &T, - pk: &common::PublicKey, - sig: &common::Signature, -) -> std::result::Result<(), VerifySigError> { - let signed_data = S::as_signable(data); - common::SigScheme::verify_signature_with_hasher::( - pk, - &signed_data, - sig, - ) - .map_err(Into::into) -} - -/// A section representing transaction data -#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] -#[derive( - Clone, - Debug, - BorshSerialize, - BorshDeserialize, - BorshDeserializer, - BorshSchema, - Serialize, - Deserialize, -)] -pub struct Data { - /// Salt with additional random data (usually a timestamp) - #[serde(with = "hex_salt_serde")] - pub salt: [u8; SALT_LENGTH], - /// Data bytes - #[serde(with = "hex_data_serde")] - pub data: Vec, -} - -impl PartialEq for Data { - fn eq(&self, other: &Self) -> bool { - self.data == other.data - } -} - -impl Data { - /// Make a new data section with the given bytes - pub fn new(data: Vec) -> Self { - use rand_core::{OsRng, RngCore}; - - Self { - salt: { - let mut buf = [0; SALT_LENGTH]; - OsRng.fill_bytes(&mut buf); - buf - }, - data, - } - } - - /// Hash this data section - pub fn hash<'a>(&self, hasher: &'a mut Sha256) -> &'a mut Sha256 { - hasher.update(self.serialize_to_vec()); - hasher - } -} - -/// Error representing the case where the supplied code has incorrect hash -pub struct CommitmentError; - -/// Represents either some code bytes or their SHA-256 hash -#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] -#[derive( - Clone, - Debug, - BorshSerialize, - BorshDeserialize, - BorshDeserializer, - BorshSchema, - Serialize, - Deserialize, -)] -pub enum Commitment { - /// Result of applying hash function to bytes - Hash(namada_core::hash::Hash), - /// Result of applying identity function to bytes - Id(Vec), -} - -impl PartialEq for Commitment { - fn eq(&self, other: &Self) -> bool { - self.hash() == other.hash() - } -} - -impl Commitment { - /// Substitute bytes with their SHA-256 hash - pub fn contract(&mut self) { - if let Self::Id(code) = self { - *self = Self::Hash(hash_tx(code)); - } - } - - /// Substitute a code hash with the supplied bytes if the hashes are - /// consistent, otherwise return an error - pub fn expand( - &mut self, - code: Vec, - ) -> std::result::Result<(), CommitmentError> { - match self { - Self::Id(c) if *c == code => Ok(()), - Self::Hash(hash) if *hash == hash_tx(&code) => { - *self = Self::Id(code); - Ok(()) - } - _ => Err(CommitmentError), - } - } - - /// Return the contained hash commitment - pub fn hash(&self) -> namada_core::hash::Hash { - match self { - Self::Id(code) => hash_tx(code), - Self::Hash(hash) => *hash, - } - } - - /// Return the result of applying identity function if there is any - pub fn id(&self) -> Option> { - if let Self::Id(code) = self { - Some(code.clone()) - } else { - None - } - } -} - -/// A section representing transaction code -#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] -#[derive( - Clone, - Debug, - BorshSerialize, - BorshDeserialize, - BorshDeserializer, - BorshSchema, - Serialize, - Deserialize, -)] -pub struct Code { - /// Additional random data - #[serde(with = "hex_salt_serde")] - pub salt: [u8; SALT_LENGTH], - /// Actual transaction code - pub code: Commitment, - /// The tag for the transaction code - pub tag: Option, -} - -impl PartialEq for Code { - fn eq(&self, other: &Self) -> bool { - self.code == other.code - } -} - -impl Code { - /// Make a new code section with the given bytes - pub fn new(code: Vec, tag: Option) -> Self { - use rand_core::{OsRng, RngCore}; - - Self { - salt: { - let mut buf = [0; SALT_LENGTH]; - OsRng.fill_bytes(&mut buf); - buf - }, - code: Commitment::Id(code), - tag, - } - } - - /// Make a new code section with the given hash - pub fn from_hash( - hash: namada_core::hash::Hash, - tag: Option, - ) -> Self { - use rand_core::{OsRng, RngCore}; - - Self { - salt: { - let mut buf = [0; SALT_LENGTH]; - OsRng.fill_bytes(&mut buf); - buf - }, - code: Commitment::Hash(hash), - tag, - } - } - - /// Hash this code section - pub fn hash<'a>(&self, hasher: &'a mut Sha256) -> &'a mut Sha256 { - hasher.update(self.salt); - hasher.update(self.code.hash()); - hasher.update(self.tag.serialize_to_vec()); - hasher - } -} - -/// A memo field (bytes). -pub type Memo = Vec; - -/// Indicates the list of public keys against which signatures will be verified -#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] -#[derive( - Clone, - Debug, - BorshSerialize, - BorshDeserialize, - BorshDeserializer, - BorshSchema, - Serialize, - Deserialize, - PartialEq, -)] -pub enum Signer { - /// The address of a multisignature account - Address(Address), - /// The public keys that constitute a signer - PubKeys(Vec), -} - -/// A section representing a multisig over another section -#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] -#[derive( - Clone, - Debug, - BorshSerialize, - BorshDeserialize, - BorshDeserializer, - BorshSchema, - Serialize, - Deserialize, - PartialEq, -)] -pub struct Authorization { - /// The hash of the section being signed - pub targets: Vec, - /// The public keys against which the signatures should be verified - pub signer: Signer, - /// The signature over the above hash - pub signatures: BTreeMap, -} - -impl Authorization { - /// Sign the given section hash with the given key and return a section - pub fn new( - targets: Vec, - secret_keys: BTreeMap, - signer: Option
, - ) -> Self { - // If no signer address is given, then derive the signer's public keys - // from the given secret keys. - let signer = if let Some(addr) = signer { - Signer::Address(addr) - } else { - // Make sure the corresponding public keys can be represented by a - // vector instead of a map - assert!( - secret_keys - .keys() - .cloned() - .eq(0..(u8::try_from(secret_keys.len()) - .expect("Number of SKs must not exceed `u8::MAX`"))), - "secret keys must be enumerated when signer address is absent" - ); - Signer::PubKeys(secret_keys.values().map(RefTo::ref_to).collect()) - }; - - // Commit to the given targets - let partial = Self { - targets, - signer, - signatures: BTreeMap::new(), - }; - let target = partial.get_raw_hash(); - // Turn the map of secret keys into a map of signatures over the - // commitment made above - let signatures = secret_keys - .iter() - .map(|(index, secret_key)| { - (*index, common::SigScheme::sign(secret_key, target)) - }) - .collect(); - Self { - signatures, - ..partial - } - } - - /// Get the number of signatures if it fits in `u8` - pub fn total_signatures(&self) -> Option { - u8::try_from(self.signatures.len()).ok() - } - - /// Hash this signature section - pub fn hash<'a>(&self, hasher: &'a mut Sha256) -> &'a mut Sha256 { - hasher.update(self.serialize_to_vec()); - hasher - } - - /// Get the hash of this section - pub fn get_hash(&self) -> namada_core::hash::Hash { - namada_core::hash::Hash( - self.hash(&mut Sha256::new()).finalize_reset().into(), - ) - } - - /// Get a hash of this section with its signer and signatures removed - pub fn get_raw_hash(&self) -> namada_core::hash::Hash { - Self { - signer: Signer::PubKeys(vec![]), - signatures: BTreeMap::new(), - ..self.clone() - } - .get_hash() - } - - /// Verify that the signature contained in this section is valid - pub fn verify_signature( - &self, - verified_pks: &mut HashSet, - public_keys_index_map: &AccountPublicKeysMap, - signer: &Option
, - consume_verify_sig_gas: &mut F, - ) -> std::result::Result - where - F: FnMut() -> std::result::Result<(), namada_gas::Error>, - { - // Records whether there are any successful verifications - let mut verifications = 0; - match &self.signer { - // Verify the signatures against the given public keys if the - // account addresses match - Signer::Address(addr) if Some(addr) == signer.as_ref() => { - for (idx, sig) in &self.signatures { - if let Some(pk) = - public_keys_index_map.get_public_key_from_index(*idx) - { - consume_verify_sig_gas()?; - common::SigScheme::verify_signature( - &pk, - &self.get_raw_hash(), - sig, - )?; - verified_pks.insert(*idx); - // Cannot overflow - #[allow(clippy::arithmetic_side_effects)] - { - verifications += 1; - } - } - } - } - // If the account addresses do not match, then there is no efficient - // way to map signatures to the given public keys - Signer::Address(_) => {} - // Verify the signatures against the subset of this section's public - // keys that are also in the given map - Signer::PubKeys(pks) => { - let hash = self.get_raw_hash(); - for (idx, pk) in pks.iter().enumerate() { - let map_idx = - public_keys_index_map.get_index_from_public_key(pk); - - // Use the first signature when fuzzing as the map is - // unlikely to contain matching PKs - #[cfg(fuzzing)] - let map_idx = map_idx.or(Some(0_u8)); - - if let Some(map_idx) = map_idx { - let sig_idx = u8::try_from(idx) - .map_err(|_| VerifySigError::PksOverflow)?; - consume_verify_sig_gas()?; - let sig = self - .signatures - .get(&sig_idx) - .ok_or(VerifySigError::MissingSignature)?; - common::SigScheme::verify_signature(pk, &hash, sig)?; - verified_pks.insert(map_idx); - // Cannot overflow - #[allow(clippy::arithmetic_side_effects)] - { - verifications += 1; - } - } - } - } - } - - // There's usually not enough signatures when fuzzing, this makes it - // more likely to pass authorization. - #[cfg(fuzzing)] - { - verifications = 1; - } - - Ok(verifications) - } -} - -/// A section representing a multisig over another section -#[derive( - Clone, - Debug, - BorshSerialize, - BorshDeserialize, - BorshDeserializer, - BorshSchema, - Serialize, - Deserialize, -)] -pub struct CompressedAuthorization { - /// The hash of the section being signed - pub targets: Vec, - /// The public keys against which the signatures should be verified - pub signer: Signer, - /// The signature over the above hash - pub signatures: BTreeMap, -} - -impl CompressedAuthorization { - /// Decompress this signature object with respect to the given transaction - /// by looking up the necessary section hashes. Used by constrained hardware - /// wallets. - pub fn expand(self, tx: &Tx) -> Authorization { - let mut targets = Vec::new(); - for idx in self.targets { - if idx == 0 { - // The "zeroth" section is the header - targets.push(tx.header_hash()); - } else if idx == 255 { - // The 255th section is the raw header - targets.push(tx.raw_header_hash()); - } else { - targets.push( - tx.sections[(idx as usize) - .checked_sub(1) - .expect("cannot underflow")] - .get_hash(), - ); - } - } - Authorization { - targets, - signer: self.signer, - signatures: self.signatures, - } - } -} - -#[derive(serde::Serialize, serde::Deserialize)] -struct TransactionSerde(Vec); - -impl From> for TransactionSerde { - fn from(tx: Vec) -> Self { - Self(tx) - } -} - -impl From for Vec { - fn from(tx: TransactionSerde) -> Vec { - tx.0 - } -} - -fn borsh_serde( - obj: &impl BorshSerialize, - ser: S, -) -> std::result::Result -where - S: serde::Serializer, - T: From>, - T: serde::Serialize, -{ - Into::::into(obj.serialize_to_vec()).serialize(ser) -} - -fn serde_borsh<'de, T, S, U>(ser: S) -> std::result::Result -where - S: serde::Deserializer<'de>, - T: Into>, - T: serde::Deserialize<'de>, - U: BorshDeserialize, -{ - BorshDeserialize::try_from_slice(&Into::>::into(T::deserialize( - ser, - )?)) - .map_err(S::Error::custom) -} - -/// A structure to facilitate Serde (de)serializations of Builders -#[derive(serde::Serialize, serde::Deserialize)] -struct BuilderSerde(Vec); - -impl From> for BuilderSerde { - fn from(tx: Vec) -> Self { - Self(tx) - } -} - -impl From for Vec { - fn from(tx: BuilderSerde) -> Vec { - tx.0 - } -} - -/// A structure to facilitate Serde (de)serializations of SaplingMetadata -#[derive(serde::Serialize, serde::Deserialize)] -pub struct SaplingMetadataSerde(Vec); - -impl From> for SaplingMetadataSerde { - fn from(tx: Vec) -> Self { - Self(tx) - } -} - -impl From for Vec { - fn from(tx: SaplingMetadataSerde) -> Vec { - tx.0 - } -} - -/// A section providing the auxiliary inputs used to construct a MASP -/// transaction -#[derive( - Clone, - Debug, - BorshSerialize, - BorshDeserialize, - BorshSchema, - Serialize, - Deserialize, -)] -pub struct MaspBuilder { - /// The MASP transaction that this section witnesses - pub target: MaspTxId, - /// The decoded set of asset types used by the transaction. Useful for - /// offline wallets trying to display AssetTypes. - pub asset_types: HashSet, - /// Track how Info objects map to descriptors and outputs - #[serde( - serialize_with = "borsh_serde::", - deserialize_with = "serde_borsh::" - )] - pub metadata: SaplingMetadata, - /// The data that was used to construct the target transaction - #[serde( - serialize_with = "borsh_serde::", - deserialize_with = "serde_borsh::" - )] - pub builder: Builder<(), ExtendedFullViewingKey, ()>, -} - -impl PartialEq for MaspBuilder { - fn eq(&self, other: &Self) -> bool { - self.target == other.target - } -} - -impl MaspBuilder { - /// Get the hash of this ciphertext section. This operation is done in such - /// a way it matches the hash of the type pun - pub fn hash<'a>(&self, hasher: &'a mut Sha256) -> &'a mut Sha256 { - hasher.update(self.serialize_to_vec()); - hasher - } -} - -#[cfg(feature = "arbitrary")] -impl arbitrary::Arbitrary<'_> for MaspBuilder { - fn arbitrary( - u: &mut arbitrary::Unstructured<'_>, - ) -> arbitrary::Result { - use masp_primitives::transaction::builder::MapBuilder; - use masp_primitives::transaction::components::sapling::builder::MapBuilder as SapMapBuilder; - use masp_primitives::zip32::ExtendedSpendingKey; - struct WalletMap; - - impl - SapMapBuilder - for WalletMap - { - fn map_params(&self, _s: P1) {} - - fn map_key( - &self, - s: ExtendedSpendingKey, - ) -> ExtendedFullViewingKey { - (&s).into() - } - } - impl - MapBuilder< - P1, - ExtendedSpendingKey, - N1, - (), - ExtendedFullViewingKey, - (), - > for WalletMap - { - fn map_notifier(&self, _s: N1) {} - } - - let target_height = masp_primitives::consensus::BlockHeight::from( - u.int_in_range(0_u32..=100_000_000)?, - ); - Ok(MaspBuilder { - target: arbitrary::Arbitrary::arbitrary(u)?, - asset_types: arbitrary::Arbitrary::arbitrary(u)?, - metadata: arbitrary::Arbitrary::arbitrary(u)?, - builder: Builder::new( - masp_primitives::consensus::TestNetwork, - target_height, - ) - .map_builder(WalletMap), - }) - } - - fn size_hint(depth: usize) -> (usize, Option) { - arbitrary::size_hint::and_all( - &[ - ::size_hint(depth), - ::size_hint(depth), - as arbitrary::Arbitrary>::size_hint(depth), - ::size_hint(depth), - ], - ) - } -} - -/// A section of a transaction. Carries an independent piece of information -/// necessary for the processing of a transaction. -#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] -#[derive( - Clone, - Debug, - BorshSerialize, - BorshDeserialize, - BorshDeserializer, - BorshSchema, - Serialize, - Deserialize, - PartialEq, -)] -pub enum Section { - /// Transaction data that needs to be sent to hardware wallets - Data(Data), - /// Transaction data that does not need to be sent to hardware wallets - ExtraData(Code), - /// Transaction code. Sending to hardware wallets optional - Code(Code), - /// A transaction header/protocol signature - Authorization(Authorization), - /// Embedded MASP transaction section - #[serde( - serialize_with = "borsh_serde::", - deserialize_with = "serde_borsh::" - )] - MaspTx(Transaction), - /// A section providing the auxiliary inputs used to construct a MASP - /// transaction. Only send to wallet, never send to protocol. - MaspBuilder(MaspBuilder), - /// Wrap a header with a section for the purposes of computing hashes - Header(Header), -} - -impl Section { - /// Hash this section. Section hashes are useful for signatures and also for - /// allowing transaction sections to cross reference. - pub fn hash<'a>(&self, hasher: &'a mut Sha256) -> &'a mut Sha256 { - // Get the index corresponding to this variant - let discriminant = self.serialize_to_vec()[0]; - // Use Borsh's discriminant in the Section's hash - hasher.update([discriminant]); - match self { - Self::Data(data) => data.hash(hasher), - Self::ExtraData(extra) => extra.hash(hasher), - Self::Code(code) => code.hash(hasher), - Self::Authorization(signature) => signature.hash(hasher), - Self::MaspBuilder(mb) => mb.hash(hasher), - Self::MaspTx(tx) => { - hasher.update(tx.serialize_to_vec()); - hasher - } - Self::Header(header) => header.hash(hasher), - } - } - - /// Get the hash of this section - pub fn get_hash(&self) -> namada_core::hash::Hash { - namada_core::hash::Hash( - self.hash(&mut Sha256::new()).finalize_reset().into(), - ) - } - - /// Extract the data from this section if possible - pub fn data(&self) -> Option { - if let Self::Data(data) = self { - Some(data.clone()) - } else { - None - } - } - - /// Extract the extra data from this section if possible - pub fn extra_data_sec(&self) -> Option { - if let Self::ExtraData(data) = self { - Some(data.clone()) - } else { - None - } - } - - /// Extract the extra data from this section if possible - pub fn extra_data(&self) -> Option> { - if let Self::ExtraData(data) = self { - data.code.id() - } else { - None - } - } - - /// Extract the code from this section is possible - pub fn code_sec(&self) -> Option { - if let Self::Code(data) = self { - Some(data.clone()) - } else { - None - } - } - - /// Extract the code from this section is possible - pub fn code(&self) -> Option> { - if let Self::Code(data) = self { - data.code.id() - } else { - None - } - } - - /// Extract the signature from this section if possible - pub fn signature(&self) -> Option { - if let Self::Authorization(data) = self { - Some(data.clone()) - } else { - None - } - } - - /// Extract the MASP transaction from this section if possible - pub fn masp_tx(&self) -> Option { - if let Self::MaspTx(data) = self { - Some(data.clone()) - } else { - None - } - } - - /// Extract the MASP builder from this section if possible - pub fn masp_builder(&self) -> Option { - if let Self::MaspBuilder(data) = self { - Some(data.clone()) - } else { - None - } - } -} - -/// An inner transaction of the batch, represented by its commitments to the -/// [`Code`], [`Data`] and [`Memo`] sections -#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] -#[derive( - Clone, - Debug, - Default, - BorshSerialize, - BorshDeserialize, - BorshDeserializer, - BorshSchema, - Serialize, - Deserialize, - Eq, - PartialEq, - Ord, - PartialOrd, - Hash, -)] -pub struct TxCommitments { - /// The SHA-256 hash of the transaction's code section - pub code_hash: namada_core::hash::Hash, - /// The SHA-256 hash of the transaction's data section - pub data_hash: namada_core::hash::Hash, - /// The SHA-256 hash of the transaction's memo section - /// - /// In case a memo is not present in the transaction, a - /// byte array filled with zeroes is present instead - pub memo_hash: namada_core::hash::Hash, -} - -impl TxCommitments { - /// Get the hash of this transaction's code - pub fn code_sechash(&self) -> &namada_core::hash::Hash { - &self.code_hash - } - - /// Get the transaction data hash - pub fn data_sechash(&self) -> &namada_core::hash::Hash { - &self.data_hash - } - - /// Get the hash of this transaction's memo - pub fn memo_sechash(&self) -> &namada_core::hash::Hash { - &self.memo_hash - } - - /// Hash the commitments to the transaction's sections - pub fn hash<'a>(&self, hasher: &'a mut Sha256) -> &'a mut Sha256 { - hasher.update(self.serialize_to_vec()); - hasher - } - - /// Get the hash of this Commitments - pub fn get_hash(&self) -> namada_core::hash::Hash { - namada_core::hash::Hash( - self.hash(&mut Sha256::new()).finalize_reset().into(), - ) - } -} - -/// A Namada transaction header indicating where transaction subcomponents can -/// be found -#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] -#[derive( - Clone, - Debug, - BorshSerialize, - BorshDeserialize, - BorshDeserializer, - BorshSchema, - Serialize, - Deserialize, - PartialEq, -)] -pub struct Header { - /// The chain which this transaction is being submitted to - pub chain_id: ChainId, - /// The time at which this transaction expires - pub expiration: Option, - /// A transaction timestamp - pub timestamp: DateTimeUtc, - /// The commitments to the transaction's sections - pub batch: HashSet, - /// Whether the inner txs should be executed atomically - pub atomic: bool, - /// The type of this transaction - pub tx_type: TxType, -} - -impl Header { - /// Make a new header of the given transaction type - pub fn new(tx_type: TxType) -> Self { - Self { - tx_type, - chain_id: ChainId::default(), - expiration: None, - #[allow(clippy::disallowed_methods)] - timestamp: DateTimeUtc::now(), - batch: Default::default(), - atomic: Default::default(), - } - } - - /// Get the hash of this transaction header. - pub fn hash<'a>(&self, hasher: &'a mut Sha256) -> &'a mut Sha256 { - hasher.update(self.serialize_to_vec()); - hasher - } - - /// Get the wrapper header if it is present - pub fn wrapper(&self) -> Option { - if let TxType::Wrapper(wrapper) = &self.tx_type { - Some(*wrapper.clone()) - } else { - None - } - } +use crate::data::{Fee, GasLimit, TxType, WrapperTx}; +use crate::sign::{SignatureIndex, VerifySigError}; +use crate::{ + proto, Authorization, Code, Data, Header, MaspBuilder, Section, Signer, + TxCommitments, +}; - /// Get the protocol header if it is present - pub fn protocol(&self) -> Option { - if let TxType::Protocol(protocol) = &self.tx_type { - Some(*protocol.clone()) - } else { - None - } - } +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub enum DecodeError { + #[error("Invalid signature index bytes: {0}")] + InvalidEncoding(std::io::Error), + #[error("Invalid signature index: {0}")] + InvalidHex(data_encoding::DecodeError), + #[error("Error decoding a transaction from bytes: {0}")] + TxDecodingError(prost::DecodeError), + #[error("Timestamp is empty")] + NoTimestampError, + #[error("Timestamp is invalid: {0}")] + InvalidTimestamp(prost_types::TimestampError), + #[error("Couldn't serialize transaction from JSON at {0}")] + InvalidJSONDeserialization(String), } /// Errors relating to decrypting a wrapper tx and its @@ -1133,6 +71,7 @@ pub enum TxError { BorshSchema, Serialize, Deserialize, + PartialEq, )] pub struct Tx { /// Type indicating how to process transaction @@ -1146,12 +85,7 @@ impl TryFrom<&[u8]> for Tx { type Error = DecodeError; fn try_from(tx_bytes: &[u8]) -> Result { - use prost::Message; - - let tx = proto::Tx::decode(tx_bytes) - .map_err(DecodeError::TxDecodingError)?; - BorshDeserialize::try_from_slice(&tx.data) - .map_err(DecodeError::InvalidEncoding) + Tx::try_from_bytes(tx_bytes) } } @@ -1185,19 +119,21 @@ impl Tx { } } - /// Serialize tx to hex string - pub fn serialize(&self) -> String { - let tx_bytes = self.serialize_to_vec(); - HEXUPPER.encode(&tx_bytes) + /// Serialize tx to pretty JSON into an I/O stream + /// + /// For protobuf encoding, see `to_bytes/try_to_bytes`. + pub fn to_writer_json(&self, writer: W) -> serde_json::Result<()> + where + W: io::Write, + { + serde_json::to_writer_pretty(writer, self) } - /// Deserialize tx from json - pub fn deserialize(data: &[u8]) -> Result { - if let Ok(tx) = serde_json::from_slice::(data) { - Ok(tx) - } else { - Err(DecodeError::InvalidJsonString) - } + /// Deserialize tx from JSON string bytes + /// + /// For protobuf decoding, see `try_from_bytes`. + pub fn try_from_json_bytes(data: &[u8]) -> serde_json::Result { + serde_json::from_slice::(data) } /// Add new default commitments to the transaction. Returns false if the @@ -1308,9 +244,10 @@ impl Tx { self.header.batch.insert(item); } - /// Get the memo designated by the memo hash in the header for the specified - /// commitment - pub fn memo(&self, cmt: &TxCommitments) -> Option> { + /// Get the memo designated by the memo hash in the header for the selected + /// tx, if contained within this tx + pub fn memo(&self, inner_tx: InnerTxRef<'_>) -> Option> { + let cmt = self.get_inner_tx_commitments(inner_tx)?; if cmt.memo_hash == namada_core::hash::Hash::default() { return None; } @@ -1345,7 +282,8 @@ impl Tx { /// Get the code designated by the transaction code hash in the header for /// the specified commitment - pub fn code(&self, cmt: &TxCommitments) -> Option> { + pub fn code(&self, inner_tx: InnerTxRef<'_>) -> Option> { + let cmt = self.get_inner_tx_commitments(inner_tx)?; match self.get_section(&cmt.code_hash).as_ref().map(Cow::as_ref) { Some(Section::Code(section)) => section.code.id(), _ => None, @@ -1384,9 +322,10 @@ impl Tx { self.sections.last_mut().unwrap() } - /// Get the data designated by the transaction data hash in the header at - /// the specified commitment - pub fn data(&self, cmt: &TxCommitments) -> Option> { + /// Get the data designated by the transaction data hash in the header for + /// the selected tx, if contained within this tx + pub fn data(&self, inner_tx: InnerTxRef<'_>) -> Option> { + let cmt = self.get_inner_tx_commitments(inner_tx)?; self.get_data_section(&cmt.data_hash) } @@ -1401,7 +340,9 @@ impl Tx { } } - /// Convert this transaction into protobufs bytes + /// Convert this transaction into protobufs bytes. + /// + /// For JSON encoding see `to_writer_json`. pub fn to_bytes(&self) -> Vec { use prost::Message; @@ -1415,6 +356,8 @@ impl Tx { } /// Convert this transaction into protobufs bytes + /// + /// For JSON encoding see `to_writer_json`. pub fn try_to_bytes(&self) -> std::io::Result> { use prost::Message; @@ -1428,6 +371,18 @@ impl Tx { Ok(bytes) } + /// Try to deserialize a tx from protobuf bytes + /// + /// For JSON decoding see `try_from_json_bytes`. + pub fn try_from_bytes(tx_bytes: &[u8]) -> Result { + use prost::Message; + + let tx = proto::Tx::decode(tx_bytes) + .map_err(DecodeError::TxDecodingError)?; + BorshDeserialize::try_from_slice(&tx.data) + .map_err(DecodeError::InvalidEncoding) + } + /// Verify that the section with the given hash has been signed by the given /// public key pub fn verify_signatures( @@ -1615,29 +570,6 @@ impl Tx { filtered } - /// Filter out all the sections that need not be sent to the hardware wallet - /// and return them - pub fn wallet_filter(&mut self) -> Vec
{ - let mut filtered = Vec::new(); - for i in (0..self.sections.len()).rev() { - match &mut self.sections[i] { - // This section is known to be large and can be contracted - Section::Code(section) => { - filtered.push(Section::Code(section.clone())); - section.code.contract(); - } - // This section is known to be large and can be contracted - Section::ExtraData(section) => { - filtered.push(Section::ExtraData(section.clone())); - section.code.contract(); - } - // Everything else is fine to add - _ => {} - } - } - filtered - } - /// Add an extra section to the tx builder by hash pub fn add_extra_section_from_hash( &mut self, @@ -1843,6 +775,28 @@ impl Tx { let cmt = self.first_commitments().unwrap().to_owned(); BatchedTx { tx: self, cmt } } + + /// Select a given inner tx commitment if contained within this tx. + fn get_inner_tx_commitments( + &self, + commitment: InnerTxRef<'_>, + ) -> Option<&TxCommitments> { + match commitment { + InnerTxRef::Commitment(commitments) => { + // Check that `commitments` is present in `batch` + self.commitments() + .into_iter() + .find(|batch_commitments| *batch_commitments == commitments) + } + InnerTxRef::CommitmentHash(commitments_hash) => self + .commitments() + .into_iter() + .find(|commitments| commitments.get_hash() == commitments_hash), + InnerTxRef::Index(index) => { + self.commitments().into_iter().nth(index) + } + } + } } impl<'tx> Tx { @@ -1855,6 +809,17 @@ impl<'tx> Tx { } } +/// A key to select an inner tx and its sections +#[derive(Debug, Clone)] +pub enum InnerTxRef<'a> { + /// A direct ref to tx commitments + Commitment(&'a TxCommitments), + /// A hash of a `TxCommitments` + CommitmentHash(namada_core::hash::Hash), + /// Index of an inner tx + Index(usize), +} + /// Represents the pointers to a indexed tx, which are the block height and the /// index inside that block. Optionally points to a specific inner tx inside a /// batch if such level of granularity is required. @@ -1975,6 +940,23 @@ pub struct BatchedTxRef<'tx> { pub cmt: &'tx TxCommitments, } +impl BatchedTxRef<'_> { + /// Get the code designated by the code hash in the header + pub fn code(&self) -> Option> { + self.tx.code(InnerTxRef::Commitment(self.cmt)) + } + + /// Get the data designated by the data hash in the header + pub fn data(&self) -> Option> { + self.tx.data(InnerTxRef::Commitment(self.cmt)) + } + + /// Get the memo designated by the memo hash in the header + pub fn memo(&self) -> Option> { + self.tx.memo(InnerTxRef::Commitment(self.cmt)) + } +} + /// A transaction with the commitment to a specific inner transaction of the /// batch #[derive( @@ -2002,18 +984,25 @@ mod test { use std::collections::BTreeMap; use std::fs; + use assert_matches::assert_matches; use data_encoding::HEXLOWER; + use namada_core::address::testing::nam; use namada_core::borsh::schema::BorshSchema; + use namada_core::hash::Hash; + use namada_core::key; + use namada_core::token::DenominatedAmount; use super::*; + use crate::data; + use crate::data::protocol::{ProtocolTx, ProtocolTxType}; /// Test that the BorshSchema for Tx gets generated without any name /// conflicts #[test] fn test_tx_schema() { - let _declaration = super::Tx::declaration(); + let _declaration = Tx::declaration(); let mut definitions = BTreeMap::new(); - super::Tx::add_definitions_recursively(&mut definitions); + Tx::add_definitions_recursively(&mut definitions); } /// Tx encoding must not change @@ -2025,8 +1014,493 @@ mod test { serde_json::from_reader(file).expect("file should be proper JSON"); for serialized_tx in serialized_txs { - let tmp = HEXLOWER.decode(serialized_tx.as_bytes()).unwrap(); - Tx::try_from(tmp.as_ref()).unwrap(); + let raw_bytes = HEXLOWER.decode(serialized_tx.as_bytes()).unwrap(); + let tx = Tx::try_from_bytes(raw_bytes.as_ref()).unwrap(); + + assert_eq!(tx.try_to_bytes().unwrap(), raw_bytes); + assert_eq!(tx.to_bytes(), raw_bytes); + } + } + + #[test] + fn test_tx_protobuf_serialization() { + let tx = Tx::default(); + + let buffer = tx.to_bytes(); + + let deserialized = Tx::try_from_bytes(&buffer).unwrap(); + assert_eq!(tx, deserialized); + } + + #[test] + fn test_tx_json_serialization() { + let tx = Tx::default(); + + let mut buffer = vec![]; + tx.to_writer_json(&mut buffer).unwrap(); + + let deserialized = Tx::try_from_json_bytes(&buffer).unwrap(); + assert_eq!(tx, deserialized); + } + + #[test] + fn test_wrapper_tx_signing() { + let sk1 = key::testing::keypair_1(); + let sk2 = key::testing::keypair_2(); + let pk1 = sk1.to_public(); + let token = nam(); + + let mut tx = Tx::default(); + tx.add_wrapper( + data::wrapper::Fee { + amount_per_gas_unit: DenominatedAmount::native(1.into()), + token, + }, + pk1, + 1.into(), + ); + + // Unsigned tx should fail validation + tx.validate_tx().expect_err("Unsigned"); + + { + let mut tx = tx.clone(); + // Sign the tx + tx.sign_wrapper(sk1); + + // Signed tx should pass validation + tx.validate_tx() + .expect("valid tx") + .expect("with authorization"); + } + + { + let mut tx = tx.clone(); + // Sign the tx with a wrong key + tx.sign_wrapper(sk2); + + // Should be rejected + tx.validate_tx().expect_err("invalid signature - wrong key"); + } + } + + #[test] + fn test_protocol_tx_signing() { + let sk1 = key::testing::keypair_1(); + let sk2 = key::testing::keypair_2(); + let pk1 = sk1.to_public(); + let tx = Tx::from_type(TxType::Protocol(Box::new(ProtocolTx { + pk: pk1, + tx: ProtocolTxType::BridgePool, + }))); + + // Unsigned tx should fail validation + tx.validate_tx().expect_err("Unsigned"); + + { + let mut tx = tx.clone(); + // Sign the tx + tx.add_section(Section::Authorization(Authorization::new( + tx.sechashes(), + BTreeMap::from_iter([(0, sk1)]), + None, + ))); + + // Signed tx should pass validation + tx.validate_tx() + .expect("valid tx") + .expect("with authorization"); + } + + { + let mut tx = tx.clone(); + // Sign the tx with a wrong key + tx.add_section(Section::Authorization(Authorization::new( + tx.sechashes(), + BTreeMap::from_iter([(0, sk2)]), + None, + ))); + + // Should be rejected + tx.validate_tx().expect_err("invalid signature - wrong key"); + } + } + + #[test] + fn test_inner_tx_signing() { + let sk1 = key::testing::keypair_1(); + let sk2 = key::testing::keypair_2(); + let pk1 = sk1.to_public(); + let pk2 = sk2.to_public(); + let pks_map = AccountPublicKeysMap::from_iter(vec![pk1.clone()]); + let threshold = 1_u8; + + let tx = Tx::default(); + + // Unsigned tx should fail validation + tx.verify_signatures( + &[tx.header_hash()], + pks_map.clone(), + &None, + threshold, + || Ok(()), + ) + .expect_err("Unsigned"); + + // Sign the tx + { + let mut tx = tx.clone(); + let signatures = + tx.compute_section_signature(&[sk1], &pks_map, None); + assert_eq!(signatures.len(), 1); + tx.add_signatures(signatures); + + // Signed tx should pass validation + let authorizations = tx + .verify_signatures( + &[tx.header_hash()], + pks_map.clone(), + &None, + threshold, + || Ok(()), + ) + .expect("valid tx"); + assert_eq!(authorizations.len(), 1); + } + + // Sign the tx with a wrong key + { + let mut tx = tx.clone(); + let pks_map_wrong = + AccountPublicKeysMap::from_iter(vec![pk2.clone()]); + let signatures = + tx.compute_section_signature(&[sk2], &pks_map_wrong, None); + assert_eq!(signatures.len(), 1); + tx.add_signatures(signatures); + + // Should be rejected + assert_matches!( + tx.verify_signatures( + &[tx.header_hash()], + pks_map.clone(), + &None, + threshold, + || Ok(()), + ), + Err(VerifySigError::InvalidSectionSignature(_)) + ); + } + } + + #[test] + fn test_inner_tx_multisig_signing() { + let sk1 = key::testing::keypair_1(); + let sk2 = key::testing::keypair_2(); + let sk3 = key::testing::keypair_3(); + let pk1 = sk1.to_public(); + let pk2 = sk2.to_public(); + let pk3 = sk3.to_public(); + + // A multisig with pk/sk 1 and 2 requiring both signatures + let pks_map = + AccountPublicKeysMap::from_iter(vec![pk1.clone(), pk2.clone()]); + let threshold = 2_u8; + + let tx = Tx::default(); + + // Unsigned tx should fail validation + tx.verify_signatures( + &[tx.header_hash()], + pks_map.clone(), + &None, + threshold, + || Ok(()), + ) + .expect_err("Unsigned"); + + // Sign the tx with both keys + { + let mut tx = tx.clone(); + let signatures = tx.compute_section_signature( + &[sk1.clone(), sk2.clone()], + &pks_map, + None, + ); + assert_eq!(signatures.len(), 2); + tx.add_signatures(signatures); + + // Signed tx should pass validation + let authorizations = tx + .verify_signatures( + &[tx.header_hash()], + pks_map.clone(), + &None, + threshold, + || Ok(()), + ) + .expect("valid tx"); + assert_eq!(authorizations.len(), 1); + } + + // Sign the tx with one key only - sk1 + { + let mut tx = tx.clone(); + let signatures = + tx.compute_section_signature(&[sk1.clone()], &pks_map, None); + assert_eq!(signatures.len(), 1); + tx.add_signatures(signatures); + + // Should be rejected + assert_matches!( + tx.verify_signatures( + &[tx.header_hash()], + pks_map.clone(), + &None, + threshold, + || Ok(()), + ), + Err(VerifySigError::InvalidSectionSignature(_)) + ); + } + + // Sign the tx with one key only - sk2 + { + let mut tx = tx.clone(); + let pks_map_wrong = AccountPublicKeysMap::from_iter(vec![pk2]); + let signatures = + tx.compute_section_signature(&[sk2], &pks_map_wrong, None); + assert_eq!(signatures.len(), 1); + tx.add_signatures(signatures); + + // Should be rejected + assert_matches!( + tx.verify_signatures( + &[tx.header_hash()], + pks_map.clone(), + &None, + threshold, + || Ok(()), + ), + Err(VerifySigError::InvalidSectionSignature(_)) + ); + } + + // Sign the tx with two keys but one of them incorrect - sk3 + { + let mut tx = tx.clone(); + let pks_map_wrong = AccountPublicKeysMap::from_iter(vec![pk1, pk3]); + let signatures = + tx.compute_section_signature(&[sk1, sk3], &pks_map_wrong, None); + assert_eq!(signatures.len(), 2); + tx.add_signatures(signatures); + + // Should be rejected + assert_matches!( + tx.verify_signatures( + &[tx.header_hash()], + pks_map.clone(), + &None, + threshold, + || Ok(()), + ), + Err(VerifySigError::InvalidSectionSignature(_)) + ); + } + } + + #[test] + fn test_inner_tx_sections() { + let mut tx = Tx::default(); + dbg!(&tx); + assert!(tx.first_commitments().is_none()); + + for cmt in [ + InnerTxRef::CommitmentHash(Hash::default()), + InnerTxRef::Index(0), + InnerTxRef::Index(1), + ] { + assert!(tx.code(cmt.clone()).is_none()); + assert!(tx.data(cmt.clone()).is_none()); + assert!(tx.memo(cmt).is_none()); + } + + // Set inner tx code + let code_bytes = "code brrr".as_bytes(); + let code = Code::new(code_bytes.to_owned(), None); + tx.set_code(code); + assert!(tx.first_commitments().is_some()); + for cmt in [ + InnerTxRef::Commitment(tx.first_commitments().unwrap()), + InnerTxRef::CommitmentHash( + tx.first_commitments().unwrap().get_hash(), + ), + InnerTxRef::Index(0), + ] { + assert!(tx.code(cmt.clone()).is_some()); + assert_eq!(tx.code(cmt.clone()).unwrap(), code_bytes); + + assert!(tx.data(cmt.clone()).is_none()); + assert!(tx.memo(cmt).is_none()); + } + for cmt in [ + InnerTxRef::CommitmentHash(Hash::default()), + InnerTxRef::Index(1), + ] { + assert!(tx.code(cmt.clone()).is_none()); + assert!(tx.data(cmt.clone()).is_none()); + assert!(tx.memo(cmt).is_none()); + } + + // Set inner tx data + let data_bytes = "bingbong".as_bytes(); + let data = Data::new(data_bytes.to_owned()); + tx.set_data(data); + assert!(tx.first_commitments().is_some()); + for cmt in [ + InnerTxRef::Commitment(tx.first_commitments().unwrap()), + InnerTxRef::CommitmentHash( + tx.first_commitments().unwrap().get_hash(), + ), + InnerTxRef::Index(0), + ] { + assert!(tx.code(cmt.clone()).is_some()); + + assert!(tx.data(cmt.clone()).is_some()); + assert_eq!(tx.data(cmt.clone()).unwrap(), data_bytes); + + assert!(tx.memo(cmt).is_none()); + } + for cmt in [ + InnerTxRef::CommitmentHash(Hash::default()), + InnerTxRef::Index(1), + ] { + assert!(tx.code(cmt.clone()).is_none()); + assert!(tx.data(cmt.clone()).is_none()); + assert!(tx.memo(cmt).is_none()); + } + + // Set inner tx memo + let memo_bytes = "extradata".as_bytes(); + tx.add_memo(memo_bytes); + assert!(tx.first_commitments().is_some()); + for cmt in [ + InnerTxRef::Commitment(tx.first_commitments().unwrap()), + InnerTxRef::CommitmentHash( + tx.first_commitments().unwrap().get_hash(), + ), + InnerTxRef::Index(0), + ] { + assert!(tx.code(cmt.clone()).is_some()); + + assert!(tx.data(cmt.clone()).is_some()); + + assert!(tx.memo(cmt.clone()).is_some()); + assert_eq!(tx.memo(cmt.clone()).unwrap(), memo_bytes); + } + for cmt in [ + InnerTxRef::CommitmentHash(Hash::default()), + InnerTxRef::Index(1), + ] { + assert!(tx.code(cmt.clone()).is_none()); + assert!(tx.data(cmt.clone()).is_none()); + assert!(tx.memo(cmt).is_none()); + } + } + + #[test] + fn test_batched_tx_sections() { + let code_bytes1 = "code brrr".as_bytes(); + let data_bytes1 = "bingbong".as_bytes(); + let memo_bytes1 = "extradata".as_bytes(); + + let code_bytes2 = code_bytes1; + let data_bytes2 = "WASD".as_bytes(); + let memo_bytes2 = "hjkl".as_bytes(); + + let inner_tx1 = { + let mut tx = Tx::default(); + + let code = Code::new(code_bytes1.to_owned(), None); + tx.set_code(code); + + let data = Data::new(data_bytes1.to_owned()); + tx.set_data(data); + + tx.add_memo(memo_bytes1); + + tx + }; + + let inner_tx2 = { + let mut tx = Tx::default(); + + let code = Code::new(code_bytes2.to_owned(), None); + tx.set_code(code); + + let data = Data::new(data_bytes2.to_owned()); + tx.set_data(data); + + tx.add_memo(memo_bytes2); + + tx + }; + + let cmt1 = inner_tx1.first_commitments().unwrap().to_owned(); + let cmt2 = inner_tx2.first_commitments().unwrap().to_owned(); + + // Batch `inner_tx1` and `inner_tx1` into `tx` + let tx = { + let mut tx = Tx::default(); + + tx.add_inner_tx(inner_tx1, cmt1.clone()); + assert_eq!(tx.first_commitments().unwrap(), &cmt1); + assert_eq!(tx.header.batch.len(), 1); + + tx.add_inner_tx(inner_tx2, cmt2.clone()); + assert_eq!(tx.first_commitments().unwrap(), &cmt1); + assert_eq!(tx.header.batch.len(), 2); + assert_eq!(tx.header.batch.get_index(1).unwrap(), &cmt2); + + tx + }; + + // Check sections of `inner_tx1` + for cmt in [ + InnerTxRef::Commitment(&cmt1), + InnerTxRef::CommitmentHash(cmt1.get_hash()), + InnerTxRef::Index(0), + ] { + assert!(tx.code(cmt.clone()).is_some()); + assert_eq!(tx.code(cmt.clone()).unwrap(), code_bytes1); + + assert!(tx.data(cmt.clone()).is_some()); + assert_eq!(tx.data(cmt.clone()).unwrap(), data_bytes1); + + assert!(tx.memo(cmt.clone()).is_some()); + assert_eq!(tx.memo(cmt.clone()).unwrap(), memo_bytes1); + } + + // Check sections of `inner_tx2` + for cmt in [ + InnerTxRef::Commitment(&cmt2), + InnerTxRef::CommitmentHash(cmt2.get_hash()), + InnerTxRef::Index(1), + ] { + assert!(tx.code(cmt.clone()).is_some()); + assert_eq!(tx.code(cmt.clone()).unwrap(), code_bytes2); + + assert!(tx.data(cmt.clone()).is_some()); + assert_eq!(tx.data(cmt.clone()).unwrap(), data_bytes2); + + assert!(tx.memo(cmt.clone()).is_some()); + assert_eq!(tx.memo(cmt.clone()).unwrap(), memo_bytes2); + } + + // Check invalid indices + for cmt in [InnerTxRef::Index(2), InnerTxRef::Index(3)] { + assert!(tx.code(cmt.clone()).is_none()); + assert!(tx.data(cmt.clone()).is_none()); + assert!(tx.memo(cmt.clone()).is_none()); } } } diff --git a/crates/tx_prelude/src/lib.rs b/crates/tx_prelude/src/lib.rs index 89ad1e80c8..985ad00ced 100644 --- a/crates/tx_prelude/src/lib.rs +++ b/crates/tx_prelude/src/lib.rs @@ -124,12 +124,13 @@ impl Ctx { } /// Get the transaction data for the specified inner tx - pub fn get_tx_data(&mut self, batched_tx: &BatchedTx) -> Result> { - let BatchedTx { tx, ref cmt } = batched_tx; - - tx.data(cmt).ok_or_err_msg("Missing data").inspect_err(|_| { - self.set_commitment_sentinel(); - }) + pub fn get_tx_data(&mut self, tx: &BatchedTx) -> Result> { + tx.to_ref() + .data() + .ok_or_err_msg("Missing data") + .inspect_err(|_| { + self.set_commitment_sentinel(); + }) } } diff --git a/crates/vote_ext/src/lib.rs b/crates/vote_ext/src/lib.rs index c2086af754..b6bf7bac45 100644 --- a/crates/vote_ext/src/lib.rs +++ b/crates/vote_ext/src/lib.rs @@ -31,7 +31,7 @@ use namada_macros::BorshDeserializer; use namada_migrations::*; use namada_tx::data::protocol::{ProtocolTx, ProtocolTxType}; use namada_tx::data::TxType; -use namada_tx::{Authorization, Signed, Tx, TxError}; +use namada_tx::{Authorization, InnerTxRef, Signed, Tx, TxError}; /// This type represents the data we pass to the extension of /// a vote at the PreCommit phase of Tendermint. @@ -61,11 +61,13 @@ macro_rules! ethereum_tx_data_deserialize_inner { fn try_from(tx: &Tx) -> Result { let tx_data = tx - .data(tx.commitments().first().ok_or_else(|| { - TxError::Deserialization( - "Missing inner protocol tx commitments".into(), - ) - })?) + .data(InnerTxRef::Commitment( + tx.commitments().first().ok_or_else(|| { + TxError::Deserialization( + "Missing inner protocol tx commitments".into(), + ) + })?, + )) .ok_or_else(|| { TxError::Deserialization( "Expected protocol tx type associated data".into(), @@ -150,13 +152,13 @@ impl TryFrom<&Tx> for EthereumTxData { "Expected protocol tx type".into(), )); }; - let Some(tx_data) = - tx.data(tx.commitments().first().ok_or_else(|| { + let Some(tx_data) = tx.data(InnerTxRef::Commitment( + tx.commitments().first().ok_or_else(|| { TxError::Deserialization( "Missing inner protocol tx commitments".into(), ) - })?) - else { + })?, + )) else { return Err(TxError::Deserialization( "Expected protocol tx type associated data".into(), )); diff --git a/wasm/tx_ibc/src/lib.rs b/wasm/tx_ibc/src/lib.rs index 964012ca67..939760c6b7 100644 --- a/wasm/tx_ibc/src/lib.rs +++ b/wasm/tx_ibc/src/lib.rs @@ -1,7 +1,6 @@ //! A tx for IBC. //! This tx executes an IBC operation according to the given IBC message as the -//! tx_data. This tx uses an IBC message wrapped inside -//! `key::ed25519::SignedTxData` as its input as declared in `ibc` crate. +//! tx_data. This tx uses an IBC message as its input. use std::collections::BTreeMap; diff --git a/wasm/tx_transfer/src/lib.rs b/wasm/tx_transfer/src/lib.rs index aa1301e164..1e3b3c79aa 100644 --- a/wasm/tx_transfer/src/lib.rs +++ b/wasm/tx_transfer/src/lib.rs @@ -1,6 +1,5 @@ //! A tx for transparent token transfer. -//! This tx uses `token::TransparentTransfer` wrapped inside `SignedTxData` -//! as its input as declared in `namada` crate. +//! This tx uses `token::Transfer` as its input. use std::collections::{BTreeMap, BTreeSet}; diff --git a/wasm/tx_update_account/src/lib.rs b/wasm/tx_update_account/src/lib.rs index 63714140c9..8a36757b4e 100644 --- a/wasm/tx_update_account/src/lib.rs +++ b/wasm/tx_update_account/src/lib.rs @@ -1,6 +1,5 @@ //! A tx for updating an account's validity predicate. -//! This tx wraps the validity predicate inside `SignedTxData` as -//! its input as declared in `namada` crate. +//! This tx uses `account::UpdateAccount` as its input. use namada_tx_prelude::*; diff --git a/wasm/vp_implicit/src/lib.rs b/wasm/vp_implicit/src/lib.rs index cffc14fe06..d39274ee73 100644 --- a/wasm/vp_implicit/src/lib.rs +++ b/wasm/vp_implicit/src/lib.rs @@ -31,10 +31,10 @@ fn validate_tx( verifiers ); - let BatchedTx { tx, ref cmt } = tx; // Check if this is a governance proposal first let is_gov_proposal = tx - .data(cmt) + .to_ref() + .data() .and_then(|tx_data| { let proposal_id = u64::try_from_slice(&tx_data).ok()?; Some(is_proposal_accepted(ctx, proposal_id)) @@ -45,6 +45,7 @@ fn validate_tx( // Any change from governance is allowed without further checks return Ok(()); } + let BatchedTx { tx, cmt: _ } = tx; let mut gadget = VerifySigGadget::new(); diff --git a/wasm/vp_user/src/lib.rs b/wasm/vp_user/src/lib.rs index 78544afca6..de19ac73c9 100644 --- a/wasm/vp_user/src/lib.rs +++ b/wasm/vp_user/src/lib.rs @@ -30,10 +30,10 @@ fn validate_tx( verifiers ); - let BatchedTx { tx, ref cmt } = tx; // Check if this is a governance proposal first let is_gov_proposal = tx - .data(cmt) + .to_ref() + .data() .and_then(|tx_data| { let proposal_id = u64::try_from_slice(&tx_data).ok()?; Some(is_proposal_accepted(ctx, proposal_id)) @@ -44,6 +44,7 @@ fn validate_tx( // Any change from governance is allowed without further checks return Ok(()); } + let BatchedTx { tx, cmt: _ } = tx; let mut gadget = VerifySigGadget::new(); diff --git a/wasm_for_tests/tx_invalid_data/src/lib.rs b/wasm_for_tests/tx_invalid_data/src/lib.rs index 6c6c866b50..3314fab355 100644 --- a/wasm_for_tests/tx_invalid_data/src/lib.rs +++ b/wasm_for_tests/tx_invalid_data/src/lib.rs @@ -2,16 +2,12 @@ use namada_tx_prelude::*; #[transaction] fn apply_tx(ctx: &mut Ctx, tx_data: BatchedTx) -> TxResult { - let BatchedTx { - tx: signed, - ref cmt, - } = tx_data; - let _data = - signed - .data(cmt) - .ok_or_err_msg("Missing data") - .inspect_err(|_| { - ctx.set_commitment_sentinel(); - })?; + let _data = tx_data + .to_ref() + .data() + .ok_or_err_msg("Missing data") + .inspect_err(|_| { + ctx.set_commitment_sentinel(); + })?; Ok(()) } diff --git a/wasm_for_tests/tx_memory_limit/src/lib.rs b/wasm_for_tests/tx_memory_limit/src/lib.rs index afd6c4a72c..2fce8e29c0 100644 --- a/wasm_for_tests/tx_memory_limit/src/lib.rs +++ b/wasm_for_tests/tx_memory_limit/src/lib.rs @@ -2,12 +2,9 @@ use namada_tx_prelude::*; #[transaction] fn apply_tx(_ctx: &mut Ctx, tx_data: BatchedTx) -> TxResult { - let BatchedTx { - tx: tx_data, - ref cmt, - } = tx_data; - let len = usize::try_from_slice(&tx_data.data(cmt).as_ref().unwrap()[..]) - .unwrap(); + let len = + usize::try_from_slice(&tx_data.to_ref().data().as_ref().unwrap()[..]) + .unwrap(); log_string(format!("allocate len {}", len)); let bytes: Vec = vec![6_u8; len]; // use the variable to prevent it from compiler optimizing it away diff --git a/wasm_for_tests/tx_read_storage_key/src/lib.rs b/wasm_for_tests/tx_read_storage_key/src/lib.rs index 14834f76f6..9945caa9d7 100644 --- a/wasm_for_tests/tx_read_storage_key/src/lib.rs +++ b/wasm_for_tests/tx_read_storage_key/src/lib.rs @@ -2,14 +2,11 @@ use namada_tx_prelude::*; #[transaction] fn apply_tx(ctx: &mut Ctx, tx_data: BatchedTx) -> TxResult { - let BatchedTx { - tx: tx_data, - ref cmt, - } = tx_data; // Allocates a memory of size given from the `tx_data (usize)` - let key = - storage::Key::try_from_slice(&tx_data.data(cmt).as_ref().unwrap()[..]) - .unwrap(); + let key = storage::Key::try_from_slice( + &tx_data.to_ref().data().as_ref().unwrap()[..], + ) + .unwrap(); log_string(format!("key {}", key)); let _result: Vec = ctx.read(&key)?.unwrap(); Ok(()) diff --git a/wasm_for_tests/tx_write/src/lib.rs b/wasm_for_tests/tx_write/src/lib.rs index e95dcb9fe5..4611bade70 100644 --- a/wasm_for_tests/tx_write/src/lib.rs +++ b/wasm_for_tests/tx_write/src/lib.rs @@ -19,11 +19,7 @@ fn fatal_msg(msg: &str) -> ! { #[transaction] fn apply_tx(ctx: &mut Ctx, tx_data: BatchedTx) -> TxResult { - let BatchedTx { - tx: signed, - ref cmt, - } = tx_data; - let data = match signed.data(cmt) { + let data = match tx_data.to_ref().data() { Some(data) => { log(&format!("got data ({} bytes)", data.len())); data diff --git a/wasm_for_tests/vp_eval/src/lib.rs b/wasm_for_tests/vp_eval/src/lib.rs index 8fd58d66f2..f4ae6d7f55 100644 --- a/wasm_for_tests/vp_eval/src/lib.rs +++ b/wasm_for_tests/vp_eval/src/lib.rs @@ -9,15 +9,11 @@ fn validate_tx( _verifiers: BTreeSet
, ) -> VpResult { use namada_tx_prelude::transaction::eval_vp::EvalVp; - let BatchedTx { - tx: tx_data, - ref cmt, - } = tx_data; let EvalVp { vp_code_hash, input, }: EvalVp = - EvalVp::try_from_slice(&tx_data.data(cmt).as_ref().unwrap()[..]) + EvalVp::try_from_slice(&tx_data.to_ref().data().as_ref().unwrap()[..]) .unwrap(); ctx.eval(vp_code_hash, input.to_ref()).into_vp_error() } diff --git a/wasm_for_tests/vp_memory_limit/src/lib.rs b/wasm_for_tests/vp_memory_limit/src/lib.rs index 6e107d15c5..7b8e3282da 100644 --- a/wasm_for_tests/vp_memory_limit/src/lib.rs +++ b/wasm_for_tests/vp_memory_limit/src/lib.rs @@ -8,12 +8,9 @@ fn validate_tx( _keys_changed: BTreeSet, _verifiers: BTreeSet
, ) -> VpResult { - let BatchedTx { - tx: tx_data, - ref cmt, - } = tx_data; - let len = usize::try_from_slice(&tx_data.data(cmt).as_ref().unwrap()[..]) - .unwrap(); + let len = + usize::try_from_slice(&tx_data.to_ref().data().as_ref().unwrap()[..]) + .unwrap(); log_string(format!("allocate len {}", len)); let bytes: Vec = vec![6_u8; len]; // use the variable to prevent it from compiler optimizing it away diff --git a/wasm_for_tests/vp_read_storage_key/src/lib.rs b/wasm_for_tests/vp_read_storage_key/src/lib.rs index 680b61097c..1a6d2b5943 100644 --- a/wasm_for_tests/vp_read_storage_key/src/lib.rs +++ b/wasm_for_tests/vp_read_storage_key/src/lib.rs @@ -8,14 +8,11 @@ fn validate_tx( _keys_changed: BTreeSet, _verifiers: BTreeSet
, ) -> VpResult { - let BatchedTx { - tx: tx_data, - ref cmt, - } = tx_data; // Allocates a memory of size given from the `tx_data (usize)` - let key = - storage::Key::try_from_slice(&tx_data.data(cmt).as_ref().unwrap()[..]) - .unwrap(); + let key = storage::Key::try_from_slice( + &tx_data.to_ref().data().as_ref().unwrap()[..], + ) + .unwrap(); log_string(format!("key {}", key)); let _result: Vec = ctx.read_pre(&key).into_vp_error()?.unwrap(); accept()