From f244da8a3f12bead5398a6e424e4ae556e6897f1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Mon, 19 Aug 2024 11:45:50 +0100 Subject: [PATCH 01/73] core: rm unused dep --- Cargo.lock | 20 ++++++++++++-------- crates/core/Cargo.toml | 8 +------- wasm/Cargo.lock | 1 - 3 files changed, 13 insertions(+), 16 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6d05ffcfa2..d12793cbcd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4771,7 +4771,6 @@ dependencies = [ "ethabi", "ethbridge-structs", "eyre", - "futures", "ibc", "ics23", "impl-num-traits", @@ -4788,7 +4787,6 @@ dependencies = [ "num-traits 0.2.17", "num256", "num_enum 0.7.1", - "pretty_assertions", "primitive-types", "proptest", "prost-types 0.12.3", @@ -4802,13 +4800,11 @@ dependencies = [ "sparse-merkle-tree", "tendermint 0.37.0", "tendermint-proto 0.37.0", - "test-log", "thiserror", "tiny-keccak", "tokio", "toml 0.5.11", "tracing", - "tracing-subscriber", "uint", "wasmtimer", "zeroize", @@ -5747,6 +5743,12 @@ dependencies = [ "num-traits 0.2.17", ] +[[package]] +name = "num-conv" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" + [[package]] name = "num-derive" version = "0.3.3" @@ -8314,12 +8316,13 @@ dependencies = [ [[package]] name = "time" -version = "0.3.30" +version = "0.3.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4a34ab300f2dee6e562c10a046fc05e358b29f9bf92277f30c3c8d82275f6f5" +checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" dependencies = [ "deranged", "itoa", + "num-conv", "powerfmt", "serde", "time-core", @@ -8334,10 +8337,11 @@ checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" -version = "0.2.15" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ad70d68dba9e1f8aceda7aa6711965dfec1cac869f311a51bd08b3a2ccbce20" +checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" dependencies = [ + "num-conv", "time-core", ] diff --git a/crates/core/Cargo.toml b/crates/core/Cargo.toml index acecec74ba..affc2339b5 100644 --- a/crates/core/Cargo.toml +++ b/crates/core/Cargo.toml @@ -29,7 +29,7 @@ migrations = [ "linkme", ] benches = ["proptest"] -control_flow = ["futures", "lazy_static", "tokio", "wasmtimer"] +control_flow = ["lazy_static", "tokio", "wasmtimer"] arbitrary = [ "dep:arbitrary", "chrono/arbitrary", @@ -53,7 +53,6 @@ ed25519-consensus.workspace = true ethabi.workspace = true ethbridge-structs.workspace = true eyre.workspace = true -futures = { workspace = true, optional = true } ibc.workspace = true ics23.workspace = true impl-num-traits = "0.1.2" @@ -95,13 +94,8 @@ tokio = { workspace = true, optional = true, default-features = false, features [dev-dependencies] assert_matches.workspace = true -futures.workspace = true -pretty_assertions.workspace = true proptest.workspace = true rand.workspace = true rand_core.workspace = true -test-log.workspace = true tokio = { workspace = true, features = ["full"] } toml.workspace = true -tracing-subscriber.workspace = true -wasmtimer.workspace = true diff --git a/wasm/Cargo.lock b/wasm/Cargo.lock index 98e281ad28..07c61ae787 100644 --- a/wasm/Cargo.lock +++ b/wasm/Cargo.lock @@ -3564,7 +3564,6 @@ dependencies = [ "ethabi", "ethbridge-structs", "eyre", - "futures", "ibc", "ics23", "impl-num-traits", From 98306d67126bf384f6f8c5ccb3f853cf4f27a2cf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Mon, 19 Aug 2024 11:47:28 +0100 Subject: [PATCH 02/73] mv crates/core/src/sign.rs crates/tx/src/ --- crates/{core => tx}/src/sign.rs | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename crates/{core => tx}/src/sign.rs (100%) diff --git a/crates/core/src/sign.rs b/crates/tx/src/sign.rs similarity index 100% rename from crates/core/src/sign.rs rename to crates/tx/src/sign.rs From ebafe2ea5fefe23f374c61b83654ba4189bca148 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Mon, 19 Aug 2024 12:13:20 +0100 Subject: [PATCH 03/73] post mv fixes --- crates/core/src/lib.rs | 1 - crates/sdk/src/signing.rs | 3 +-- crates/tx/src/lib.rs | 3 ++- crates/tx/src/sign.rs | 23 +++++++++++------------ crates/tx/src/types.rs | 2 +- 5 files changed, 15 insertions(+), 17 deletions(-) diff --git a/crates/core/src/lib.rs b/crates/core/src/lib.rs index 058fa07f7b..9250726b3e 100644 --- a/crates/core/src/lib.rs +++ b/crates/core/src/lib.rs @@ -63,7 +63,6 @@ pub mod keccak; pub mod key; pub mod masp; pub mod parameters; -pub mod sign; pub mod storage; pub mod string_encoding; pub mod time; diff --git a/crates/sdk/src/signing.rs b/crates/sdk/src/signing.rs index 9e487522cc..737fb34953 100644 --- a/crates/sdk/src/signing.rs +++ b/crates/sdk/src/signing.rs @@ -16,7 +16,6 @@ use namada_core::arith::checked; use namada_core::collections::{HashMap, HashSet}; use namada_core::key::*; use namada_core::masp::{AssetData, ExtendedViewingKey, PaymentAddress, TxId}; -use namada_core::sign::SignatureIndex; use namada_core::token::{Amount, DenominatedAmount}; use namada_governance::storage::proposal::{ InitProposalData, ProposalType, VoteProposalData, @@ -29,7 +28,7 @@ use namada_token::storage_key::balance_key; use namada_tx::data::pgf::UpdateStewardCommission; use namada_tx::data::pos::BecomeValidator; use namada_tx::data::{pos, Fee}; -use namada_tx::{MaspBuilder, Section, Tx}; +use namada_tx::{MaspBuilder, Section, SignatureIndex, Tx}; use rand::rngs::OsRng; use serde::{Deserialize, Serialize}; use tokio::sync::RwLock; diff --git a/crates/tx/src/lib.rs b/crates/tx/src/lib.rs index bb193e5aa2..93402cd1cb 100644 --- a/crates/tx/src/lib.rs +++ b/crates/tx/src/lib.rs @@ -21,13 +21,14 @@ pub mod action; pub mod data; pub mod event; pub mod proto; +mod sign; mod types; use data::TxType; pub use either; pub use event::new_tx_event; pub use namada_core::key::SignableEthMessage; -pub use namada_core::sign::SignatureIndex; +pub use sign::{SigIndexDecodeError, SignatureIndex}; pub use types::{ standalone_signature, verify_standalone_sig, Authorization, BatchedTx, BatchedTxRef, Code, Commitment, CompressedAuthorization, Data, DecodeError, diff --git a/crates/tx/src/sign.rs b/crates/tx/src/sign.rs index a456247302..4120b60e00 100644 --- a/crates/tx/src/sign.rs +++ b/crates/tx/src/sign.rs @@ -3,27 +3,26 @@ use std::cmp::Ordering; use data_encoding::HEXUPPER; +use namada_core::address::Address; +use namada_core::borsh::{ + BorshDeserialize, BorshSchema, BorshSerialize, BorshSerializeExt, +}; +use namada_core::key::common; use namada_macros::BorshDeserializer; #[cfg(feature = "migrations")] use namada_migrations::*; use serde::{Deserialize, Serialize}; use thiserror::Error; -use super::address::Address; -use super::key::common; -use crate::borsh::{ - BorshDeserialize, BorshSchema, BorshSerialize, BorshSerializeExt, -}; - #[allow(missing_docs)] #[derive(Error, Debug)] pub enum SigIndexDecodeError { #[error("Invalid signature index bytes: {0}")] - InvalidEncoding(std::io::Error), + Encoding(std::io::Error), #[error("Invalid signature index JSON string")] - InvalidJsonString, + JsonString, #[error("Invalid signature index: {0}")] - InvalidHex(data_encoding::DecodeError), + Hex(data_encoding::DecodeError), } #[derive( @@ -77,11 +76,11 @@ impl SignatureIndex { if let Ok(hex) = serde_json::from_slice::(data) { match HEXUPPER.decode(hex.as_bytes()) { Ok(bytes) => Self::try_from_slice(&bytes) - .map_err(SigIndexDecodeError::InvalidEncoding), - Err(e) => Err(SigIndexDecodeError::InvalidHex(e)), + .map_err(SigIndexDecodeError::Encoding), + Err(e) => Err(SigIndexDecodeError::Hex(e)), } } else { - Err(SigIndexDecodeError::InvalidJsonString) + Err(SigIndexDecodeError::JsonString) } } } diff --git a/crates/tx/src/types.rs b/crates/tx/src/types.rs index 0de78b0811..6b1b371d9d 100644 --- a/crates/tx/src/types.rs +++ b/crates/tx/src/types.rs @@ -20,7 +20,6 @@ use namada_core::chain::ChainId; use namada_core::collections::{HashMap, HashSet}; use namada_core::key::*; use namada_core::masp::{AssetData, TxId}; -use namada_core::sign::SignatureIndex; use namada_core::storage::{BlockHeight, TxIndex}; use namada_core::time::DateTimeUtc; use namada_macros::BorshDeserializer; @@ -34,6 +33,7 @@ use thiserror::Error; use crate::data::protocol::ProtocolTx; use crate::data::{hash_tx, Fee, GasLimit, TxType, WrapperTx}; use crate::proto; +use crate::sign::SignatureIndex; /// Represents an error in signature verification #[allow(missing_docs)] From f93d23a7a9b56f337af698c8de1a7e37862cab99 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Mon, 19 Aug 2024 12:15:11 +0100 Subject: [PATCH 04/73] mv crates/core/src/account.rs crates/account/src/auth.rs --- crates/{core/src/account.rs => account/src/auth.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename crates/{core/src/account.rs => account/src/auth.rs} (100%) diff --git a/crates/core/src/account.rs b/crates/account/src/auth.rs similarity index 100% rename from crates/core/src/account.rs rename to crates/account/src/auth.rs From 8b777dc32028810d3c51b213543bddf021458cfd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Mon, 19 Aug 2024 12:25:08 +0100 Subject: [PATCH 05/73] post mv fixes --- Cargo.lock | 2 ++ crates/account/src/auth.rs | 11 +++++------ crates/account/src/lib.rs | 3 ++- crates/core/src/lib.rs | 1 - crates/tx/Cargo.toml | 5 +++-- crates/tx/src/types.rs | 2 +- crates/tx_prelude/src/lib.rs | 2 +- crates/vm/Cargo.toml | 2 ++ crates/vm/src/host_env.rs | 13 +++++-------- wasm/Cargo.lock | 2 ++ wasm_for_tests/Cargo.lock | 1 + 11 files changed, 24 insertions(+), 20 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d12793cbcd..5df288af56 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5471,6 +5471,7 @@ dependencies = [ "konst", "linkme", "masp_primitives", + "namada_account", "namada_core", "namada_events", "namada_gas", @@ -5530,6 +5531,7 @@ dependencies = [ "byte-unit", "clru", "itertools 0.12.1", + "namada_account", "namada_core", "namada_events", "namada_gas", diff --git a/crates/account/src/auth.rs b/crates/account/src/auth.rs index 45974d5cf5..f981df7826 100644 --- a/crates/account/src/auth.rs +++ b/crates/account/src/auth.rs @@ -1,17 +1,16 @@ -//! Account types +//! Public keys associated with an account for n-signature authorization. use std::collections::BTreeMap; use borsh::{BorshDeserialize, BorshSerialize}; +use namada_core::collections::HashMap; +use namada_core::hints; +use namada_core::key::common; use namada_macros::BorshDeserializer; #[cfg(feature = "migrations")] use namada_migrations::*; use serde::{Deserialize, Serialize}; -use super::key::{common, RefTo}; -use crate::collections::HashMap; -use crate::hints; - #[derive( Debug, Clone, @@ -86,7 +85,7 @@ impl AccountPublicKeysMap { secret_keys .into_iter() .filter_map(|secret_key: common::SecretKey| { - self.get_index_from_public_key(&secret_key.ref_to()) + self.get_index_from_public_key(&secret_key.to_public()) .map(|index| (index, secret_key)) }) .collect() diff --git a/crates/account/src/lib.rs b/crates/account/src/lib.rs index 85e8e633a2..1916e3d976 100644 --- a/crates/account/src/lib.rs +++ b/crates/account/src/lib.rs @@ -19,12 +19,13 @@ clippy::print_stderr )] +mod auth; mod storage; mod storage_key; mod types; +pub use auth::AccountPublicKeysMap; use borsh::{BorshDeserialize, BorshSerialize}; -pub use namada_core::account::AccountPublicKeysMap; use namada_core::address::Address; use namada_core::key::common; use namada_macros::BorshDeserializer; diff --git a/crates/core/src/lib.rs b/crates/core/src/lib.rs index 9250726b3e..2ea47ec655 100644 --- a/crates/core/src/lib.rs +++ b/crates/core/src/lib.rs @@ -47,7 +47,6 @@ pub mod collections { pub use hash_set::HashSet; } -pub mod account; pub mod address; pub mod booleans; pub mod chain; diff --git a/crates/tx/Cargo.toml b/crates/tx/Cargo.toml index 43331561c7..6321b51434 100644 --- a/crates/tx/Cargo.toml +++ b/crates/tx/Cargo.toml @@ -14,15 +14,16 @@ version.workspace = true [features] default = ["salt"] -testing = ["proptest", "namada_core/testing"] +testing = ["proptest", "namada_account/testing", "namada_core/testing"] salt = ["rand_core"] migrations = [ "namada_migrations", "linkme", ] -arbitrary = ["dep:arbitrary", "namada_core/arbitrary"] +arbitrary = ["dep:arbitrary", "namada_account/arbitrary", "namada_core/arbitrary"] [dependencies] +namada_account = { path = "../account" } namada_core = { path = "../core" } namada_events = { path = "../events", default-features = false } namada_gas = { path = "../gas" } diff --git a/crates/tx/src/types.rs b/crates/tx/src/types.rs index 6b1b371d9d..bf0aca6f40 100644 --- a/crates/tx/src/types.rs +++ b/crates/tx/src/types.rs @@ -10,7 +10,7 @@ use masp_primitives::transaction::builder::Builder; use masp_primitives::transaction::components::sapling::builder::SaplingMetadata; use masp_primitives::transaction::Transaction; use masp_primitives::zip32::ExtendedFullViewingKey; -use namada_core::account::AccountPublicKeysMap; +use namada_account::AccountPublicKeysMap; use namada_core::address::Address; use namada_core::borsh::schema::{add_definition, Declaration, Definition}; use namada_core::borsh::{ diff --git a/crates/tx_prelude/src/lib.rs b/crates/tx_prelude/src/lib.rs index 8dae255f69..8019053522 100644 --- a/crates/tx_prelude/src/lib.rs +++ b/crates/tx_prelude/src/lib.rs @@ -24,7 +24,7 @@ use core::slice; use std::marker::PhantomData; use masp_primitives::transaction::Transaction; -use namada_core::account::AccountPublicKeysMap; +use namada_account::AccountPublicKeysMap; pub use namada_core::address::Address; pub use namada_core::borsh::{ BorshDeserialize, BorshSerialize, BorshSerializeExt, diff --git a/crates/vm/Cargo.toml b/crates/vm/Cargo.toml index 779433090a..d6e4b0d413 100644 --- a/crates/vm/Cargo.toml +++ b/crates/vm/Cargo.toml @@ -24,11 +24,13 @@ wasm-runtime = [ "wasmer", ] testing = [ + "namada_account/testing", "namada_core/testing", "tempfile", ] [dependencies] +namada_account = { path = "../account" } namada_core = { path = "../core", features = ["control_flow"] } namada_events = { path = "../events", default-features = false } namada_gas = { path = "../gas" } diff --git a/crates/vm/src/host_env.rs b/crates/vm/src/host_env.rs index 61cbf89ac0..9c762c7fa7 100644 --- a/crates/vm/src/host_env.rs +++ b/crates/vm/src/host_env.rs @@ -6,6 +6,7 @@ use std::collections::BTreeSet; use std::fmt::Debug; use std::num::TryFromIntError; +use namada_account::AccountPublicKeysMap; use namada_core::address::{self, Address, ESTABLISHED_ADDRESS_BYTES_LEN}; use namada_core::arith::{self, checked}; use namada_core::borsh::{BorshDeserialize, BorshSerializeExt}; @@ -1976,10 +1977,8 @@ where .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; vp_host_fns::add_gas(gas_meter, gas)?; let public_keys_map = - namada_core::account::AccountPublicKeysMap::try_from_slice( - &public_keys_map, - ) - .map_err(vp_host_fns::RuntimeError::EncodingError)?; + AccountPublicKeysMap::try_from_slice(&public_keys_map) + .map_err(vp_host_fns::RuntimeError::EncodingError)?; let (signer, gas) = env .memory @@ -2136,10 +2135,8 @@ where .map_err(|e| TxRuntimeError::MemoryError(Box::new(e)))?; tx_charge_gas::(env, gas)?; let public_keys_map = - namada_core::account::AccountPublicKeysMap::try_from_slice( - &public_keys_map, - ) - .map_err(TxRuntimeError::EncodingError)?; + AccountPublicKeysMap::try_from_slice(&public_keys_map) + .map_err(TxRuntimeError::EncodingError)?; tx_charge_gas::(env, gas)?; diff --git a/wasm/Cargo.lock b/wasm/Cargo.lock index 07c61ae787..4513bdab3f 100644 --- a/wasm/Cargo.lock +++ b/wasm/Cargo.lock @@ -4015,6 +4015,7 @@ dependencies = [ "either", "konst", "masp_primitives", + "namada_account", "namada_core", "namada_events", "namada_gas", @@ -4069,6 +4070,7 @@ version = "0.43.0" dependencies = [ "borsh", "clru", + "namada_account", "namada_core", "namada_events", "namada_gas", diff --git a/wasm_for_tests/Cargo.lock b/wasm_for_tests/Cargo.lock index 801d966a76..b0da2050a9 100644 --- a/wasm_for_tests/Cargo.lock +++ b/wasm_for_tests/Cargo.lock @@ -2207,6 +2207,7 @@ dependencies = [ "either", "konst", "masp_primitives", + "namada_account", "namada_core", "namada_events", "namada_gas", From b892193083290eaf75f4a9068eef6728828597f8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Mon, 19 Aug 2024 12:27:31 +0100 Subject: [PATCH 06/73] core: mv ProposalBytes into parameters mod --- crates/apps_lib/src/config/genesis.rs | 3 +- .../apps_lib/src/config/genesis/templates.rs | 2 +- crates/core/src/chain.rs | 193 ----------------- crates/core/src/parameters.rs | 205 +++++++++++++++++- crates/node/src/tendermint_node.rs | 3 +- crates/parameters/src/lib.rs | 2 +- crates/proof_of_stake/src/lib.rs | 3 +- 7 files changed, 210 insertions(+), 201 deletions(-) diff --git a/crates/apps_lib/src/config/genesis.rs b/crates/apps_lib/src/config/genesis.rs index b2efd1a227..794ac01c7d 100644 --- a/crates/apps_lib/src/config/genesis.rs +++ b/crates/apps_lib/src/config/genesis.rs @@ -16,13 +16,12 @@ use namada_macros::BorshDeserializer; #[cfg(feature = "migrations")] use namada_migrations::*; use namada_sdk::address::{Address, EstablishedAddress}; -use namada_sdk::chain::ProposalBytes; use namada_sdk::collections::HashMap; use namada_sdk::eth_bridge::EthereumBridgeParams; use namada_sdk::governance::parameters::GovernanceParameters; use namada_sdk::governance::pgf::parameters::PgfParameters; use namada_sdk::key::*; -use namada_sdk::parameters::EpochDuration; +use namada_sdk::parameters::{EpochDuration, ProposalBytes}; use namada_sdk::proof_of_stake::{Dec, GenesisValidator, OwnedPosParams}; use namada_sdk::string_encoding::StringEncoded; use namada_sdk::time::DateTimeUtc; diff --git a/crates/apps_lib/src/config/genesis/templates.rs b/crates/apps_lib/src/config/genesis/templates.rs index 29194f29f5..5e3aaf881e 100644 --- a/crates/apps_lib/src/config/genesis/templates.rs +++ b/crates/apps_lib/src/config/genesis/templates.rs @@ -9,11 +9,11 @@ use namada_macros::BorshDeserializer; #[cfg(feature = "migrations")] use namada_migrations::*; use namada_sdk::address::Address; -use namada_sdk::chain::ProposalBytes; use namada_sdk::dec::Dec; use namada_sdk::eth_bridge::storage::parameters::{ Contracts, Erc20WhitelistEntry, MinimumConfirmations, }; +use namada_sdk::parameters::ProposalBytes; use namada_sdk::token::{ Amount, DenominatedAmount, Denomination, NATIVE_MAX_DECIMAL_PLACES, }; diff --git a/crates/core/src/chain.rs b/crates/core/src/chain.rs index 5e93db551f..fa04c9bda2 100644 --- a/crates/core/src/chain.rs +++ b/crates/core/src/chain.rs @@ -2,8 +2,6 @@ // TODO move BlockHash and BlockHeight here from the storage types use std::fmt; -use std::io::{self, Read}; -use std::num::NonZeroU64; use std::str::FromStr; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; @@ -21,186 +19,6 @@ pub const CHAIN_ID_PREFIX_MAX_LEN: usize = 19; /// Separator between chain ID prefix and the generated hash pub const CHAIN_ID_PREFIX_SEP: char = '.'; -/// Configuration parameter for the upper limit on the number -/// of bytes transactions can occupy in a block proposal. -#[derive( - Copy, - Clone, - Eq, - PartialEq, - Ord, - PartialOrd, - Hash, - Debug, - BorshSerialize, - BorshDeserializer, -)] -#[repr(transparent)] -pub struct ProposalBytes { - inner: NonZeroU64, -} - -impl BorshDeserialize for ProposalBytes { - fn deserialize_reader(reader: &mut R) -> std::io::Result { - let value: u64 = BorshDeserialize::deserialize_reader(reader)?; - Self::new(value).ok_or_else(|| { - io::Error::new( - io::ErrorKind::InvalidInput, - format!( - "ProposalBytes value must be in the range 1 - {}", - Self::RAW_MAX.get() - ), - ) - }) - } -} - -impl Serialize for ProposalBytes { - fn serialize(&self, s: S) -> Result - where - S: serde::Serializer, - { - s.serialize_u64(self.inner.get()) - } -} - -impl<'de> Deserialize<'de> for ProposalBytes { - fn deserialize(deserializer: D) -> Result - where - D: serde::Deserializer<'de>, - { - struct Visitor; - - impl<'de> serde::de::Visitor<'de> for Visitor { - type Value = ProposalBytes; - - fn expecting(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!( - f, - "a u64 in the range 1 - {}", - ProposalBytes::RAW_MAX.get() - ) - } - - fn visit_u64(self, size: u64) -> Result - where - E: serde::de::Error, - { - ProposalBytes::new(size).ok_or_else(|| { - serde::de::Error::invalid_value( - serde::de::Unexpected::Unsigned(size), - &self, - ) - }) - } - - // NOTE: this is only needed because of a bug in the toml parser - // - https://github.com/toml-rs/toml-rs/issues/256 - // - https://github.com/toml-rs/toml/issues/512 - // - // TODO(namada#3243): switch to `toml_edit` for TOML parsing - fn visit_i64(self, size: i64) -> Result - where - E: serde::de::Error, - { - let max_bytes = u64::try_from(size).map_err(|_e| { - serde::de::Error::invalid_value( - serde::de::Unexpected::Signed(size), - &self, - ) - })?; - ProposalBytes::new(max_bytes).ok_or_else(|| { - serde::de::Error::invalid_value( - serde::de::Unexpected::Signed(size), - &self, - ) - }) - } - } - - deserializer.deserialize_u64(Visitor) - } -} - -impl BorshSchema for ProposalBytes { - fn add_definitions_recursively( - definitions: &mut std::collections::BTreeMap< - borsh::schema::Declaration, - borsh::schema::Definition, - >, - ) { - let fields = borsh::schema::Fields::NamedFields(vec![( - "inner".into(), - u64::declaration(), - )]); - let definition = borsh::schema::Definition::Struct { fields }; - definitions.insert(Self::declaration(), definition); - } - - fn declaration() -> borsh::schema::Declaration { - std::any::type_name::().into() - } -} - -impl Default for ProposalBytes { - #[inline] - fn default() -> Self { - Self { - inner: Self::RAW_DEFAULT, - } - } -} - -// constants -impl ProposalBytes { - /// The upper bound of a [`ProposalBytes`] value. - pub const MAX: ProposalBytes = ProposalBytes { - inner: Self::RAW_MAX, - }; - /// The (raw) default value for a [`ProposalBytes`]. - /// - /// This value must be within the range `[1 B, RAW_MAX MiB]`. - const RAW_DEFAULT: NonZeroU64 = Self::RAW_MAX; - /// The (raw) upper bound of a [`ProposalBytes`] value. - /// - /// The maximum space a serialized Tendermint block can - /// occupy is 100 MiB. We reserve 10 MiB for serialization - /// overhead, evidence and header data. For P2P safety - /// reasons (i.e. DoS protection) we hardcap the size of - /// tx data to 6 MiB. - const RAW_MAX: NonZeroU64 = unsafe { - // SAFETY: We are constructing a greater than zero - // value, so the API contract is never violated. - NonZeroU64::new_unchecked(6 * 1024 * 1024) - }; -} - -impl ProposalBytes { - /// Return the number of bytes as a [`u64`] value. - #[inline] - pub const fn get(self) -> u64 { - self.inner.get() - } - - /// Try to construct a new [`ProposalBytes`] instance, - /// from the given `max_bytes` value. - /// - /// This function will return [`None`] if `max_bytes` is not within - /// the inclusive range of 1 to [`ProposalBytes::MAX`]. - #[inline] - pub fn new(max_bytes: u64) -> Option { - NonZeroU64::new(max_bytes) - .map(|inner| Self { inner }) - .and_then(|value| { - if value.get() > Self::RAW_MAX.get() { - None - } else { - Some(value) - } - }) - } -} - /// Release default chain ID. Must be [`CHAIN_ID_LENGTH`] long. pub const DEFAULT_CHAIN_ID: &str = "namada-internal.00000000000000"; @@ -434,16 +252,5 @@ mod tests { let errors = chain_id.validate(&genesis_bytes); assert!(errors.is_empty(), "There should be no validation errors {:#?}", errors); } - - /// Test if [`ProposalBytes`] serde serialization is correct. - #[test] - fn test_proposal_size_serialize_roundtrip(s in 1u64..=ProposalBytes::MAX.get()) { - let size = ProposalBytes::new(s).expect("Test failed"); - assert_eq!(size.get(), s); - let json = serde_json::to_string(&size).expect("Test failed"); - let deserialized: ProposalBytes = - serde_json::from_str(&json).expect("Test failed"); - assert_eq!(size, deserialized); - } } } diff --git a/crates/core/src/parameters.rs b/crates/core/src/parameters.rs index 41a03f3991..71db819fd4 100644 --- a/crates/core/src/parameters.rs +++ b/crates/core/src/parameters.rs @@ -1,13 +1,16 @@ //! Protocol parameters types use std::collections::BTreeMap; +use std::fmt; +use std::io::{self, Read}; +use std::num::NonZeroU64; use namada_macros::BorshDeserializer; #[cfg(feature = "migrations")] use namada_migrations::*; +use serde::{Deserialize, Serialize}; use super::address::Address; -use super::chain::ProposalBytes; use super::hash::Hash; use super::time::DurationSecs; use super::token; @@ -101,3 +104,203 @@ impl Default for Parameters { } } } + +/// Configuration parameter for the upper limit on the number +/// of bytes transactions can occupy in a block proposal. +#[derive( + Copy, + Clone, + Eq, + PartialEq, + Ord, + PartialOrd, + Hash, + Debug, + BorshSerialize, + BorshDeserializer, +)] +#[repr(transparent)] +pub struct ProposalBytes { + inner: NonZeroU64, +} + +impl BorshDeserialize for ProposalBytes { + fn deserialize_reader(reader: &mut R) -> std::io::Result { + let value: u64 = BorshDeserialize::deserialize_reader(reader)?; + Self::new(value).ok_or_else(|| { + io::Error::new( + io::ErrorKind::InvalidInput, + format!( + "ProposalBytes value must be in the range 1 - {}", + Self::RAW_MAX.get() + ), + ) + }) + } +} + +impl Serialize for ProposalBytes { + fn serialize(&self, s: S) -> Result + where + S: serde::Serializer, + { + s.serialize_u64(self.inner.get()) + } +} + +impl<'de> Deserialize<'de> for ProposalBytes { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + struct Visitor; + + impl<'de> serde::de::Visitor<'de> for Visitor { + type Value = ProposalBytes; + + fn expecting(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "a u64 in the range 1 - {}", + ProposalBytes::RAW_MAX.get() + ) + } + + fn visit_u64(self, size: u64) -> Result + where + E: serde::de::Error, + { + ProposalBytes::new(size).ok_or_else(|| { + serde::de::Error::invalid_value( + serde::de::Unexpected::Unsigned(size), + &self, + ) + }) + } + + // NOTE: this is only needed because of a bug in the toml parser + // - https://github.com/toml-rs/toml-rs/issues/256 + // - https://github.com/toml-rs/toml/issues/512 + // + // TODO(namada#3243): switch to `toml_edit` for TOML parsing + fn visit_i64(self, size: i64) -> Result + where + E: serde::de::Error, + { + let max_bytes = u64::try_from(size).map_err(|_e| { + serde::de::Error::invalid_value( + serde::de::Unexpected::Signed(size), + &self, + ) + })?; + ProposalBytes::new(max_bytes).ok_or_else(|| { + serde::de::Error::invalid_value( + serde::de::Unexpected::Signed(size), + &self, + ) + }) + } + } + + deserializer.deserialize_u64(Visitor) + } +} + +impl BorshSchema for ProposalBytes { + fn add_definitions_recursively( + definitions: &mut std::collections::BTreeMap< + borsh::schema::Declaration, + borsh::schema::Definition, + >, + ) { + let fields = borsh::schema::Fields::NamedFields(vec![( + "inner".into(), + u64::declaration(), + )]); + let definition = borsh::schema::Definition::Struct { fields }; + definitions.insert(Self::declaration(), definition); + } + + fn declaration() -> borsh::schema::Declaration { + std::any::type_name::().into() + } +} + +impl Default for ProposalBytes { + #[inline] + fn default() -> Self { + Self { + inner: Self::RAW_DEFAULT, + } + } +} + +// constants +impl ProposalBytes { + /// The upper bound of a [`ProposalBytes`] value. + pub const MAX: ProposalBytes = ProposalBytes { + inner: Self::RAW_MAX, + }; + /// The (raw) default value for a [`ProposalBytes`]. + /// + /// This value must be within the range `[1 B, RAW_MAX MiB]`. + const RAW_DEFAULT: NonZeroU64 = Self::RAW_MAX; + /// The (raw) upper bound of a [`ProposalBytes`] value. + /// + /// The maximum space a serialized Tendermint block can + /// occupy is 100 MiB. We reserve 10 MiB for serialization + /// overhead, evidence and header data. For P2P safety + /// reasons (i.e. DoS protection) we hardcap the size of + /// tx data to 6 MiB. + const RAW_MAX: NonZeroU64 = unsafe { + // SAFETY: We are constructing a greater than zero + // value, so the API contract is never violated. + NonZeroU64::new_unchecked(6 * 1024 * 1024) + }; +} + +impl ProposalBytes { + /// Return the number of bytes as a [`u64`] value. + #[inline] + pub const fn get(self) -> u64 { + self.inner.get() + } + + /// Try to construct a new [`ProposalBytes`] instance, + /// from the given `max_bytes` value. + /// + /// This function will return [`None`] if `max_bytes` is not within + /// the inclusive range of 1 to [`ProposalBytes::MAX`]. + #[inline] + pub fn new(max_bytes: u64) -> Option { + NonZeroU64::new(max_bytes) + .map(|inner| Self { inner }) + .and_then(|value| { + if value.get() > Self::RAW_MAX.get() { + None + } else { + Some(value) + } + }) + } +} + +#[cfg(test)] +mod tests { + use proptest::prelude::*; + + use super::*; + + proptest! { + /// Test if [`ProposalBytes`] serde serialization is correct. + #[test] + fn test_proposal_size_serialize_roundtrip(s in 1u64..=ProposalBytes::MAX.get()) { + let size = ProposalBytes::new(s).expect("Test failed"); + assert_eq!(size.get(), s); + let json = serde_json::to_string(&size).expect("Test failed"); + let deserialized: ProposalBytes = + serde_json::from_str(&json).expect("Test failed"); + assert_eq!(size, deserialized); + } + } +} diff --git a/crates/node/src/tendermint_node.rs b/crates/node/src/tendermint_node.rs index 04c6e420bf..f90ed014c3 100644 --- a/crates/node/src/tendermint_node.rs +++ b/crates/node/src/tendermint_node.rs @@ -6,7 +6,8 @@ use std::str::FromStr; use namada_apps_lib::cli::namada_version; use namada_apps_lib::config; pub use namada_apps_lib::tendermint_node::*; -use namada_sdk::chain::{ChainId, ProposalBytes}; +use namada_sdk::chain::ChainId; +use namada_sdk::parameters::ProposalBytes; use namada_sdk::storage::BlockHeight; use namada_sdk::time::DateTimeUtc; use thiserror::Error; diff --git a/crates/parameters/src/lib.rs b/crates/parameters/src/lib.rs index 3b2b954de5..df38f5ae12 100644 --- a/crates/parameters/src/lib.rs +++ b/crates/parameters/src/lib.rs @@ -25,8 +25,8 @@ use std::marker::PhantomData; use namada_core::address::{Address, InternalAddress}; use namada_core::arith::checked; -use namada_core::chain::ProposalBytes; use namada_core::storage::BlockHeight; +pub use namada_core::parameters::ProposalBytes; use namada_core::time::DurationSecs; use namada_core::{hints, token}; use namada_storage::{ResultExt, StorageRead, StorageWrite}; diff --git a/crates/proof_of_stake/src/lib.rs b/crates/proof_of_stake/src/lib.rs index 8b0737bc36..564cff8fc0 100644 --- a/crates/proof_of_stake/src/lib.rs +++ b/crates/proof_of_stake/src/lib.rs @@ -3100,9 +3100,8 @@ fn prune_old_delegations( #[cfg(any(test, feature = "testing"))] /// PoS related utility functions to help set up tests. pub mod test_utils { - use namada_core::chain::ProposalBytes; use namada_core::hash::Hash; - use namada_core::parameters::EpochDuration; + use namada_core::parameters::{EpochDuration, ProposalBytes}; use namada_core::time::DurationSecs; use super::*; From 6b07f767c31ea49b153fc58c44d64395c201b1f4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Mon, 19 Aug 2024 16:38:14 +0100 Subject: [PATCH 07/73] core: mv BlockHeight, BlockHash, Epoch, Epochs, Header into chain mod --- crates/apps_lib/src/cli/wallet.rs | 2 +- crates/apps_lib/src/client/rpc.rs | 3 +- crates/apps_lib/src/config/mod.rs | 3 +- crates/benches/process_wrapper.rs | 2 +- crates/core/src/chain.rs | 683 +++++++++++++++++- crates/core/src/masp.rs | 2 +- crates/core/src/storage.rs | 667 +---------------- crates/encoding_spec/src/main.rs | 3 +- .../transactions/bridge_pool_roots.rs | 2 +- .../transactions/ethereum_events/eth_msgs.rs | 2 +- .../transactions/ethereum_events/events.rs | 3 +- .../transactions/ethereum_events/mod.rs | 3 +- .../src/protocol/transactions/utils.rs | 2 +- .../transactions/validator_set_update/mod.rs | 2 +- .../src/protocol/transactions/votes.rs | 2 +- .../src/protocol/transactions/votes/update.rs | 2 +- .../protocol/validation/bridge_pool_roots.rs | 2 +- .../protocol/validation/ethereum_events.rs | 2 +- .../validation/validator_set_update.rs | 2 +- .../src/storage/eth_bridge_queries.rs | 3 +- crates/ethereum_bridge/src/storage/proof.rs | 2 +- .../src/storage/vote_tallies.rs | 3 +- crates/ethereum_bridge/src/test_utils.rs | 3 +- crates/events/src/extend.rs | 3 +- crates/governance/src/cli/onchain.rs | 2 +- crates/governance/src/cli/validation.rs | 2 +- crates/governance/src/storage/mod.rs | 2 +- crates/governance/src/storage/proposal.rs | 4 +- crates/governance/src/utils.rs | 2 +- crates/governance/src/vp/mod.rs | 4 +- crates/ibc/src/actions.rs | 10 +- crates/ibc/src/context/common.rs | 3 +- crates/ibc/src/vp/context.rs | 3 +- crates/ibc/src/vp/mod.rs | 5 +- .../light_sdk/src/reading/asynchronous/pos.rs | 2 +- crates/light_sdk/src/reading/blocking/pos.rs | 2 +- .../light_sdk/src/transaction/governance.rs | 2 +- crates/light_sdk/src/transaction/wrapper.rs | 2 +- crates/merkle_tree/src/eth_bridge_pool.rs | 3 +- crates/merkle_tree/src/lib.rs | 5 +- crates/node/src/bench_utils.rs | 6 +- crates/node/src/dry_run_tx.rs | 3 +- crates/node/src/lib.rs | 3 +- crates/node/src/protocol.rs | 2 +- crates/node/src/shell/governance.rs | 2 +- crates/node/src/shell/init_chain.rs | 2 +- crates/node/src/shell/mod.rs | 10 +- crates/node/src/shell/queries.rs | 2 +- crates/node/src/shell/testing/node.rs | 2 +- .../shell/vote_extensions/bridge_pool_vext.rs | 2 +- crates/node/src/shims/abcipp_shim.rs | 2 +- crates/node/src/storage/mod.rs | 4 +- crates/node/src/tendermint_node.rs | 3 +- crates/parameters/src/lib.rs | 4 +- crates/proof_of_stake/src/epoched.rs | 4 +- crates/proof_of_stake/src/error.rs | 2 +- crates/proof_of_stake/src/lib.rs | 13 +- crates/proof_of_stake/src/parameters.rs | 2 +- crates/proof_of_stake/src/queries.rs | 2 +- crates/proof_of_stake/src/rewards.rs | 2 +- crates/proof_of_stake/src/slashing.rs | 2 +- crates/proof_of_stake/src/storage.rs | 28 +- crates/proof_of_stake/src/storage_key.rs | 4 +- crates/proof_of_stake/src/tests/helpers.rs | 2 +- crates/proof_of_stake/src/tests/mod.rs | 8 +- .../proof_of_stake/src/tests/state_machine.rs | 2 +- .../src/tests/state_machine_v2.rs | 2 +- .../src/tests/test_helper_fns.rs | 3 +- crates/proof_of_stake/src/tests/test_pos.rs | 2 +- .../src/tests/test_slash_and_redel.rs | 2 +- .../src/tests/test_validator.rs | 2 +- crates/proof_of_stake/src/types/mod.rs | 2 +- .../src/validator_set_update.rs | 2 +- crates/sdk/src/args.rs | 3 +- crates/sdk/src/error.rs | 2 +- crates/sdk/src/eth_bridge/bridge_pool.rs | 2 +- crates/sdk/src/eth_bridge/validator_set.rs | 2 +- crates/sdk/src/events/log/dumb_queries.rs | 2 +- crates/sdk/src/masp.rs | 3 +- .../sdk/src/masp/shielded_sync/dispatcher.rs | 5 +- crates/sdk/src/masp/shielded_sync/utils.rs | 3 +- crates/sdk/src/masp/test_utils.rs | 2 +- crates/sdk/src/migrations.rs | 3 +- crates/sdk/src/queries/mod.rs | 2 +- crates/sdk/src/queries/router.rs | 10 +- crates/sdk/src/queries/shell.rs | 5 +- crates/sdk/src/queries/shell/eth_bridge.rs | 3 +- crates/sdk/src/queries/types.rs | 2 +- crates/sdk/src/queries/vp/pos.rs | 2 +- crates/sdk/src/rpc.rs | 5 +- crates/sdk/src/tx.rs | 2 +- crates/sdk/src/wallet/keys.rs | 2 +- crates/sdk/src/wallet/mod.rs | 2 +- crates/sdk/src/wallet/store.rs | 2 +- crates/state/src/in_memory.rs | 4 +- crates/state/src/lib.rs | 14 +- crates/state/src/wl_state.rs | 4 +- crates/storage/src/db.rs | 6 +- crates/storage/src/lib.rs | 1 + crates/storage/src/mockdb.rs | 5 +- crates/systems/src/parameters.rs | 2 +- crates/systems/src/proof_of_stake.rs | 4 +- crates/tests/src/e2e/helpers.rs | 2 +- crates/tests/src/e2e/ibc_tests.rs | 3 +- crates/tests/src/e2e/ledger_tests.rs | 2 +- crates/tests/src/integration/ledger_tests.rs | 3 +- crates/tests/src/native_vp/pos.rs | 4 +- crates/tests/src/vm_host_env/ibc.rs | 2 +- crates/tests/src/vm_host_env/mod.rs | 2 +- crates/tx/src/types.rs | 4 +- crates/tx_prelude/src/lib.rs | 12 +- crates/vm/src/host_env.rs | 3 +- crates/vote_ext/src/bridge_pool_roots.rs | 2 +- crates/vote_ext/src/ethereum_events.rs | 2 +- crates/vote_ext/src/validator_set_update.rs | 2 +- crates/vp/src/native_vp.rs | 2 +- crates/vp/src/vp_host_fns.rs | 5 +- crates/vp_env/src/lib.rs | 3 +- crates/vp_prelude/src/lib.rs | 10 +- wasm/vp_implicit/src/lib.rs | 2 +- wasm/vp_user/src/lib.rs | 2 +- 121 files changed, 899 insertions(+), 861 deletions(-) diff --git a/crates/apps_lib/src/cli/wallet.rs b/crates/apps_lib/src/cli/wallet.rs index 14a0eab8cb..9145d3ca80 100644 --- a/crates/apps_lib/src/cli/wallet.rs +++ b/crates/apps_lib/src/cli/wallet.rs @@ -9,7 +9,7 @@ use borsh_ext::BorshSerializeExt; use color_eyre::eyre::Result; use itertools::sorted; use ledger_namada_rs::{BIP44Path, NamadaApp}; -use namada_core::storage::BlockHeight; +use namada_core::chain::BlockHeight; use namada_sdk::address::{Address, DecodeError}; use namada_sdk::io::Io; use namada_sdk::key::*; diff --git a/crates/apps_lib/src/client/rpc.rs b/crates/apps_lib/src/client/rpc.rs index ee04757b7f..9bb20e99c8 100644 --- a/crates/apps_lib/src/client/rpc.rs +++ b/crates/apps_lib/src/client/rpc.rs @@ -11,6 +11,7 @@ use masp_primitives::sapling::Node; use masp_primitives::transaction::components::I128Sum; use masp_primitives::zip32::ExtendedFullViewingKey; use namada_sdk::address::{Address, InternalAddress, MASP}; +use namada_sdk::chain::{BlockHeight, Epoch}; use namada_sdk::collections::{HashMap, HashSet}; use namada_sdk::control_flow::time::{Duration, Instant}; use namada_sdk::events::Event; @@ -37,7 +38,7 @@ use namada_sdk::queries::{Client, RPC}; use namada_sdk::rpc::{ self, enriched_bonds_and_unbonds, query_epoch, TxResponse, }; -use namada_sdk::storage::{BlockHeight, BlockResults, Epoch}; +use namada_sdk::storage::BlockResults; use namada_sdk::tendermint_rpc::endpoint::status; use namada_sdk::token::MaspDigitPos; use namada_sdk::tx::display_batch_resp; diff --git a/crates/apps_lib/src/config/mod.rs b/crates/apps_lib/src/config/mod.rs index eb63dc4a67..428da39e91 100644 --- a/crates/apps_lib/src/config/mod.rs +++ b/crates/apps_lib/src/config/mod.rs @@ -11,9 +11,8 @@ use std::num::NonZeroU64; use std::path::{Path, PathBuf}; use directories::ProjectDirs; -use namada_sdk::chain::ChainId; +use namada_sdk::chain::{BlockHeight, ChainId}; use namada_sdk::collections::HashMap; -use namada_sdk::storage::BlockHeight; use namada_sdk::time::Rfc3339String; use serde::{Deserialize, Serialize}; use thiserror::Error; diff --git a/crates/benches/process_wrapper.rs b/crates/benches/process_wrapper.rs index 715df4cc07..cf6e1a7e8e 100644 --- a/crates/benches/process_wrapper.rs +++ b/crates/benches/process_wrapper.rs @@ -1,8 +1,8 @@ use criterion::{criterion_group, criterion_main, Criterion}; use namada_apps_lib::address; +use namada_apps_lib::chain::BlockHeight; use namada_apps_lib::key::RefTo; use namada_apps_lib::state::TxIndex; -use namada_apps_lib::storage::BlockHeight; use namada_apps_lib::time::DateTimeUtc; use namada_apps_lib::token::{Amount, DenominatedAmount, Transfer}; use namada_apps_lib::tx::data::{Fee, WrapperTx}; diff --git a/crates/core/src/chain.rs b/crates/core/src/chain.rs index fa04c9bda2..22774dff61 100644 --- a/crates/core/src/chain.rs +++ b/crates/core/src/chain.rs @@ -1,10 +1,11 @@ //! Chain related data types -// TODO move BlockHash and BlockHeight here from the storage types -use std::fmt; +use std::fmt::{self, Display}; +use std::num::ParseIntError; use std::str::FromStr; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; +use data_encoding::HEXUPPER; use namada_macros::BorshDeserializer; #[cfg(feature = "migrations")] use namada_migrations::*; @@ -12,6 +13,15 @@ use serde::{Deserialize, Serialize}; use sha2::{Digest, Sha256}; use thiserror::Error; +use crate::borsh::BorshSerializeExt; +use crate::bytes::ByteBuf; +use crate::hash::Hash; +use crate::time::DateTimeUtc; + +/// The length of the block's hash string +pub const BLOCK_HASH_LENGTH: usize = 32; +/// The length of the block height +pub const BLOCK_HEIGHT_LENGTH: usize = 8; /// The length of the chain ID string pub const CHAIN_ID_LENGTH: usize = 30; /// The maximum length of chain ID prefix @@ -103,6 +113,432 @@ impl ChainId { } } +/// Height of a block, i.e. the level. The `default` is the +/// [`BlockHeight::sentinel`] value, which doesn't correspond to any block. +#[derive( + Clone, + Copy, + BorshSerialize, + BorshDeserialize, + BorshDeserializer, + BorshSchema, + PartialEq, + Eq, + PartialOrd, + Ord, + Hash, + Debug, + Serialize, + Deserialize, +)] +pub struct BlockHeight(pub u64); + +impl Default for BlockHeight { + fn default() -> Self { + Self::sentinel() + } +} + +impl Display for BlockHeight { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.0) + } +} + +impl From for u64 { + fn from(height: BlockHeight) -> Self { + height.0 + } +} + +impl FromStr for BlockHeight { + type Err = ParseIntError; + + fn from_str(s: &str) -> std::result::Result { + Ok(Self(s.parse::()?)) + } +} + +/// Hash of a block as fixed-size byte array +#[derive( + Clone, + Default, + BorshSerialize, + BorshDeserialize, + BorshDeserializer, + PartialEq, + Eq, + PartialOrd, + Ord, + Hash, + Serialize, + Deserialize, +)] +pub struct BlockHash(pub [u8; BLOCK_HASH_LENGTH]); + +impl Display for BlockHash { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", HEXUPPER.encode(&self.0)) + } +} + +impl From for BlockHash { + fn from(hash: Hash) -> Self { + BlockHash(hash.0) + } +} + +impl From for BlockHeight { + fn from(height: u64) -> Self { + BlockHeight(height) + } +} + +impl From for BlockHeight { + fn from(height: tendermint::block::Height) -> Self { + Self(u64::from(height)) + } +} + +impl TryFrom for tendermint::block::Height { + type Error = tendermint::Error; + + fn try_from(height: BlockHeight) -> std::result::Result { + Self::try_from(height.0) + } +} + +impl TryFrom for BlockHeight { + type Error = String; + + fn try_from(value: i64) -> std::result::Result { + value + .try_into() + .map(BlockHeight) + .map_err(|e| format!("Unexpected height value {}, {}", value, e)) + } +} +impl BlockHeight { + /// The first block height 1. + pub const fn first() -> Self { + Self(1) + } + + /// A sentinel value block height 0 may be used before any block is + /// committed or in queries to read from the latest committed block. + pub const fn sentinel() -> Self { + Self(0) + } + + /// Get the height of the next block + pub fn next_height(&self) -> BlockHeight { + BlockHeight( + self.0 + .checked_add(1) + .expect("Block height must not overflow"), + ) + } + + /// Get the height of the previous block + pub fn prev_height(&self) -> Option { + Some(BlockHeight(self.0.checked_sub(1)?)) + } + + /// Checked block height addition. + #[must_use = "this returns the result of the operation, without modifying \ + the original"] + pub fn checked_add(self, rhs: impl Into) -> Option { + let BlockHeight(rhs) = rhs.into(); + Some(Self(self.0.checked_add(rhs)?)) + } + + /// Checked block height subtraction. + #[must_use = "this returns the result of the operation, without modifying \ + the original"] + pub fn checked_sub(self, rhs: impl Into) -> Option { + let BlockHeight(rhs) = rhs.into(); + Some(Self(self.0.checked_sub(rhs)?)) + } +} + +impl TryFrom<&[u8]> for BlockHash { + type Error = ParseBlockHashError; + + fn try_from(value: &[u8]) -> Result { + if value.len() != BLOCK_HASH_LENGTH { + return Err(ParseBlockHashError::ParseBlockHash(format!( + "Unexpected block hash length {}, expected {}", + value.len(), + BLOCK_HASH_LENGTH + ))); + } + let mut hash = [0; 32]; + hash.copy_from_slice(value); + Ok(BlockHash(hash)) + } +} + +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub enum ParseBlockHashError { + #[error("Error parsing block hash: {0}")] + ParseBlockHash(String), +} + +impl TryFrom> for BlockHash { + type Error = ParseBlockHashError; + + fn try_from(value: Vec) -> Result { + value.as_slice().try_into() + } +} + +impl core::fmt::Debug for BlockHash { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let hash = format!("{}", ByteBuf(&self.0)); + f.debug_tuple("BlockHash").field(&hash).finish() + } +} + +/// Epoch identifier. Epochs are identified by consecutive numbers. +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +#[derive( + Clone, + Copy, + Default, + Debug, + PartialEq, + Eq, + PartialOrd, + Ord, + Hash, + BorshSerialize, + BorshDeserialize, + BorshDeserializer, + BorshSchema, + Serialize, + Deserialize, +)] +pub struct Epoch(pub u64); + +impl Display for Epoch { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.0) + } +} + +impl FromStr for Epoch { + type Err = ParseIntError; + + fn from_str(s: &str) -> std::result::Result { + let raw: u64 = u64::from_str(s)?; + Ok(Self(raw)) + } +} + +impl Epoch { + /// Change to the next epoch + pub fn next(&self) -> Self { + Self(self.0.checked_add(1).expect("Epoch shouldn't overflow")) + } + + /// Change to the previous epoch. + pub fn prev(&self) -> Option { + Some(Self(self.0.checked_sub(1)?)) + } + + /// Iterate a range of consecutive epochs starting from `self` of a given + /// length. Work-around for `Step` implementation pending on stabilization of . + pub fn iter_range(self, len: u64) -> impl Iterator + Clone { + let start_ix: u64 = self.into(); + let end_ix: u64 = start_ix.checked_add(len).unwrap_or(u64::MAX); + (start_ix..end_ix).map(Epoch::from) + } + + /// Iterate a range of epochs, inclusive of the start and end. + pub fn iter_bounds_inclusive( + start: Self, + end: Self, + ) -> impl Iterator + Clone { + let start_ix = start.0; + let end_ix = end.0; + (start_ix..=end_ix).map(Epoch::from) + } + + /// Checked epoch addition. + #[must_use = "this returns the result of the operation, without modifying \ + the original"] + pub fn checked_add(self, rhs: impl Into) -> Option { + let Epoch(rhs) = rhs.into(); + Some(Self(self.0.checked_add(rhs)?)) + } + + /// Unchecked epoch addition. + /// + /// # Panic + /// + /// Panics on overflow. Care must be taken to only use this with trusted + /// values that are known to be in a limited range (e.g. system parameters + /// but not e.g. transaction variables). + pub fn unchecked_add(self, rhs: impl Into) -> Self { + self.checked_add(rhs) + .expect("Epoch addition shouldn't overflow") + } + + /// Checked epoch subtraction. Computes self - rhs, returning None if + /// overflow occurred. + #[must_use = "this returns the result of the operation, without modifying \ + the original"] + pub fn checked_sub(self, rhs: impl Into) -> Option { + let Epoch(rhs) = rhs.into(); + Some(Self(self.0.checked_sub(rhs)?)) + } + + /// Checked epoch division. + #[must_use = "this returns the result of the operation, without modifying \ + the original"] + pub fn checked_div(self, rhs: impl Into) -> Option { + let Epoch(rhs) = rhs.into(); + Some(Self(self.0.checked_div(rhs)?)) + } + + /// Checked epoch multiplication. + #[must_use = "this returns the result of the operation, without modifying \ + the original"] + pub fn checked_mul(self, rhs: impl Into) -> Option { + let Epoch(rhs) = rhs.into(); + Some(Self(self.0.checked_mul(rhs)?)) + } + + /// Checked epoch integral reminder. + #[must_use = "this returns the result of the operation, without modifying \ + the original"] + pub fn checked_rem(self, rhs: impl Into) -> Option { + let Epoch(rhs) = rhs.into(); + Some(Self(self.0.checked_rem(rhs)?)) + } + + /// Checked epoch subtraction. Computes self - rhs, returning default + /// `Epoch(0)` if overflow occurred. + #[must_use = "this returns the result of the operation, without modifying \ + the original"] + pub fn sub_or_default(self, rhs: Epoch) -> Self { + self.checked_sub(rhs).unwrap_or_default() + } +} + +impl From for Epoch { + fn from(epoch: u64) -> Self { + Epoch(epoch) + } +} + +impl From for u64 { + fn from(epoch: Epoch) -> Self { + epoch.0 + } +} + +/// Predecessor block epochs +#[derive( + Clone, + Debug, + Default, + PartialEq, + Eq, + PartialOrd, + Ord, + Hash, + BorshSerialize, + BorshDeserialize, + BorshDeserializer, +)] +pub struct Epochs { + /// The block heights of the first block of each known epoch. + /// Invariant: the values must be sorted in ascending order. + pub first_block_heights: Vec, +} + +impl Epochs { + /// Record start of a new epoch at the given block height + pub fn new_epoch(&mut self, block_height: BlockHeight) { + self.first_block_heights.push(block_height); + } + + /// Look up the epoch of a given block height. If the given height is + /// greater than the current height, the current epoch will be returned even + /// though an epoch for a future block cannot be determined. + pub fn get_epoch(&self, block_height: BlockHeight) -> Option { + if let Some((_first_known_epoch_height, rest)) = + self.first_block_heights.split_first() + { + let mut epoch = Epoch::default(); + for next_block_height in rest { + if block_height < *next_block_height { + return Some(epoch); + } else { + epoch = epoch.next(); + } + } + return Some(epoch); + } + None + } + + /// Look up the starting block height of an epoch at or before a given + /// height. + pub fn get_epoch_start_height( + &self, + height: BlockHeight, + ) -> Option { + for start_height in self.first_block_heights.iter().rev() { + if *start_height <= height { + return Some(*start_height); + } + } + None + } + + /// Look up the starting block height of the given epoch + pub fn get_start_height_of_epoch( + &self, + epoch: Epoch, + ) -> Option { + if epoch.0 > self.first_block_heights.len() as u64 { + return None; + } + let idx = usize::try_from(epoch.0).ok()?; + self.first_block_heights.get(idx).copied() + } + + /// Return all starting block heights for each successive Epoch. + /// + /// __INVARIANT:__ The returned values are sorted in ascending order. + pub fn first_block_heights(&self) -> &[BlockHeight] { + &self.first_block_heights + } +} + +/// The block header data from Tendermint header relevant for Namada storage +#[derive( + Clone, Debug, BorshSerialize, BorshDeserialize, BorshDeserializer, Default, +)] +pub struct Header { + /// Merkle root hash of block + pub hash: Hash, + /// Timestamp associated to block + pub time: DateTimeUtc, + /// Hash of the addresses of the next validator set + pub next_validators_hash: Hash, +} + +impl Header { + /// The number of bytes when this header is encoded + pub fn encoded_len(&self) -> usize { + self.serialize_to_vec().len() + } +} + #[allow(missing_docs)] #[derive(Debug, Error)] pub enum ChainIdValidationError { @@ -233,6 +669,80 @@ impl FromStr for ChainIdPrefix { } } +/// Helpers for testing with storage types. +#[cfg(any(test, feature = "testing"))] +pub mod testing { + use std::ops::{Add, AddAssign, Sub}; + + use proptest::prelude::*; + + use super::*; + use crate::time::DateTimeUtc; + + impl Add for BlockHeight + where + T: Into, + { + type Output = BlockHeight; + + fn add(self, rhs: T) -> Self::Output { + self.checked_add(rhs.into()).unwrap() + } + } + + impl AddAssign for BlockHeight + where + T: Into, + { + fn add_assign(&mut self, rhs: T) { + *self = self.checked_add(rhs.into()).unwrap() + } + } + + impl Add for Epoch + where + T: Into, + { + type Output = Epoch; + + fn add(self, rhs: T) -> Self::Output { + self.checked_add(rhs.into()).unwrap() + } + } + + impl Sub for Epoch + where + T: Into, + { + type Output = Epoch; + + fn sub(self, rhs: T) -> Self::Output { + self.checked_sub(rhs.into()).unwrap() + } + } + + prop_compose! { + /// Generate an arbitrary epoch + pub fn arb_epoch()(epoch: u64) -> Epoch { + Epoch(epoch) + } + } + + /// A dummy header used for testing + pub fn get_dummy_header() -> Header { + use crate::time::DurationSecs; + Header { + hash: Hash([0; 32]), + #[allow( + clippy::disallowed_methods, + clippy::arithmetic_side_effects + )] + time: DateTimeUtc::now() + DurationSecs(5), + next_validators_hash: Hash([0; 32]), + } + } +} + #[cfg(test)] mod tests { use proptest::prelude::*; @@ -253,4 +763,173 @@ mod tests { assert!(errors.is_empty(), "There should be no validation errors {:#?}", errors); } } + + #[test] + fn test_predecessor_epochs_and_heights() { + let mut epochs = Epochs { + first_block_heights: vec![BlockHeight::first()], + }; + println!("epochs {:#?}", epochs); + assert_eq!( + epochs.get_start_height_of_epoch(Epoch(0)), + Some(BlockHeight(1)) + ); + assert_eq!(epochs.get_epoch(BlockHeight(0)), Some(Epoch(0))); + + // epoch 1 + epochs.new_epoch(BlockHeight(10)); + println!("epochs {:#?}", epochs); + assert_eq!( + epochs.get_start_height_of_epoch(Epoch(1)), + Some(BlockHeight(10)) + ); + assert_eq!(epochs.get_epoch(BlockHeight(0)), Some(Epoch(0))); + assert_eq!(epochs.get_epoch_start_height(BlockHeight(0)), None); + assert_eq!( + epochs.get_epoch_start_height(BlockHeight(1)), + Some(BlockHeight(1)) + ); + assert_eq!(epochs.get_epoch(BlockHeight(9)), Some(Epoch(0))); + assert_eq!( + epochs.get_epoch_start_height(BlockHeight(9)), + Some(BlockHeight(1)) + ); + assert_eq!(epochs.get_epoch(BlockHeight(10)), Some(Epoch(1))); + assert_eq!( + epochs.get_epoch_start_height(BlockHeight(10)), + Some(BlockHeight(10)) + ); + assert_eq!(epochs.get_epoch(BlockHeight(11)), Some(Epoch(1))); + assert_eq!( + epochs.get_epoch_start_height(BlockHeight(11)), + Some(BlockHeight(10)) + ); + assert_eq!(epochs.get_epoch(BlockHeight(100)), Some(Epoch(1))); + assert_eq!( + epochs.get_epoch_start_height(BlockHeight(100)), + Some(BlockHeight(10)) + ); + + // epoch 2 + epochs.new_epoch(BlockHeight(20)); + println!("epochs {:#?}", epochs); + assert_eq!( + epochs.get_start_height_of_epoch(Epoch(2)), + Some(BlockHeight(20)) + ); + assert_eq!(epochs.get_epoch(BlockHeight(0)), Some(Epoch(0))); + assert_eq!(epochs.get_epoch(BlockHeight(9)), Some(Epoch(0))); + assert_eq!(epochs.get_epoch(BlockHeight(10)), Some(Epoch(1))); + assert_eq!(epochs.get_epoch(BlockHeight(11)), Some(Epoch(1))); + assert_eq!( + epochs.get_epoch_start_height(BlockHeight(11)), + Some(BlockHeight(10)) + ); + assert_eq!(epochs.get_epoch(BlockHeight(20)), Some(Epoch(2))); + assert_eq!( + epochs.get_epoch_start_height(BlockHeight(20)), + Some(BlockHeight(20)) + ); + assert_eq!(epochs.get_epoch(BlockHeight(100)), Some(Epoch(2))); + assert_eq!( + epochs.get_epoch_start_height(BlockHeight(100)), + Some(BlockHeight(20)) + ); + + // epoch 3 + epochs.new_epoch(BlockHeight(200)); + println!("epochs {:#?}", epochs); + assert_eq!( + epochs.get_start_height_of_epoch(Epoch(3)), + Some(BlockHeight(200)) + ); + assert_eq!(epochs.get_epoch(BlockHeight(0)), Some(Epoch(0))); + assert_eq!(epochs.get_epoch(BlockHeight(9)), Some(Epoch(0))); + assert_eq!(epochs.get_epoch(BlockHeight(10)), Some(Epoch(1))); + assert_eq!(epochs.get_epoch(BlockHeight(11)), Some(Epoch(1))); + assert_eq!(epochs.get_epoch(BlockHeight(20)), Some(Epoch(2))); + assert_eq!(epochs.get_epoch(BlockHeight(100)), Some(Epoch(2))); + assert_eq!( + epochs.get_epoch_start_height(BlockHeight(100)), + Some(BlockHeight(20)) + ); + assert_eq!(epochs.get_epoch(BlockHeight(200)), Some(Epoch(3))); + assert_eq!( + epochs.get_epoch_start_height(BlockHeight(200)), + Some(BlockHeight(200)) + ); + + // epoch 4 + epochs.new_epoch(BlockHeight(300)); + println!("epochs {:#?}", epochs); + assert_eq!( + epochs.get_start_height_of_epoch(Epoch(4)), + Some(BlockHeight(300)) + ); + assert_eq!(epochs.get_epoch(BlockHeight(20)), Some(Epoch(2))); + assert_eq!(epochs.get_epoch(BlockHeight(100)), Some(Epoch(2))); + assert_eq!(epochs.get_epoch(BlockHeight(200)), Some(Epoch(3))); + assert_eq!(epochs.get_epoch(BlockHeight(300)), Some(Epoch(4))); + + // epoch 5 + epochs.new_epoch(BlockHeight(499)); + println!("epochs {:#?}", epochs); + assert_eq!( + epochs.get_start_height_of_epoch(Epoch(5)), + Some(BlockHeight(499)) + ); + assert_eq!(epochs.get_epoch(BlockHeight(20)), Some(Epoch(2))); + assert_eq!(epochs.get_epoch(BlockHeight(100)), Some(Epoch(2))); + assert_eq!(epochs.get_epoch(BlockHeight(200)), Some(Epoch(3))); + assert_eq!(epochs.get_epoch(BlockHeight(300)), Some(Epoch(4))); + assert_eq!(epochs.get_epoch(BlockHeight(499)), Some(Epoch(5))); + + // epoch 6 + epochs.new_epoch(BlockHeight(500)); + println!("epochs {:#?}", epochs); + assert_eq!( + epochs.get_start_height_of_epoch(Epoch(6)), + Some(BlockHeight(500)) + ); + assert_eq!(epochs.get_epoch(BlockHeight(200)), Some(Epoch(3))); + assert_eq!(epochs.get_epoch(BlockHeight(300)), Some(Epoch(4))); + assert_eq!(epochs.get_epoch(BlockHeight(499)), Some(Epoch(5))); + assert_eq!(epochs.get_epoch(BlockHeight(500)), Some(Epoch(6))); + + // epoch 7 + epochs.new_epoch(BlockHeight(550)); + println!("epochs {:#?}", epochs); + assert_eq!( + epochs.get_start_height_of_epoch(Epoch(7)), + Some(BlockHeight(550)) + ); + assert_eq!(epochs.get_epoch(BlockHeight(300)), Some(Epoch(4))); + assert_eq!(epochs.get_epoch(BlockHeight(499)), Some(Epoch(5))); + assert_eq!(epochs.get_epoch(BlockHeight(500)), Some(Epoch(6))); + assert_eq!(epochs.get_epoch(BlockHeight(550)), Some(Epoch(7))); + + // epoch 8 + epochs.new_epoch(BlockHeight(600)); + println!("epochs {:#?}", epochs); + assert_eq!( + epochs.get_start_height_of_epoch(Epoch(7)), + Some(BlockHeight(550)) + ); + assert_eq!( + epochs.get_start_height_of_epoch(Epoch(8)), + Some(BlockHeight(600)) + ); + assert_eq!(epochs.get_epoch(BlockHeight(500)), Some(Epoch(6))); + assert_eq!(epochs.get_epoch(BlockHeight(550)), Some(Epoch(7))); + assert_eq!(epochs.get_epoch(BlockHeight(600)), Some(Epoch(8))); + + // try to fetch height values out of range + // at this point, the min known epoch is 7 + for e in [9, 10, 11, 12] { + assert!( + epochs.get_start_height_of_epoch(Epoch(e)).is_none(), + "Epoch: {e}" + ); + } + } } diff --git a/crates/core/src/masp.rs b/crates/core/src/masp.rs index 7ad4053d11..47b7daaf22 100644 --- a/crates/core/src/masp.rs +++ b/crates/core/src/masp.rs @@ -18,8 +18,8 @@ use serde::{Deserialize, Deserializer, Serialize, Serializer}; use sha2::{Digest, Sha256}; use crate::address::{Address, DecodeError, HASH_HEX_LEN, IBC, MASP}; +use crate::chain::Epoch; use crate::impl_display_and_from_str_via_format; -use crate::storage::Epoch; use crate::string_encoding::{ self, MASP_EXT_FULL_VIEWING_KEY_HRP, MASP_EXT_SPENDING_KEY_HRP, MASP_PAYMENT_ADDRESS_HRP, diff --git a/crates/core/src/storage.rs b/crates/core/src/storage.rs index 364931d03f..d67d64225f 100644 --- a/crates/core/src/storage.rs +++ b/crates/core/src/storage.rs @@ -2,14 +2,12 @@ use std::collections::VecDeque; use std::fmt::Display; use std::io::{Read, Write}; -use std::num::ParseIntError; use std::ops::Deref; use std::str::FromStr; use arse_merkle_tree::InternalKey; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; -use borsh_ext::BorshSerializeExt; -use data_encoding::{BASE32HEX_NOPAD, HEXUPPER}; +use data_encoding::BASE32HEX_NOPAD; use index_set::vec::VecIndexSet; use namada_macros::BorshDeserializer; #[cfg(feature = "migrations")] @@ -19,12 +17,11 @@ use thiserror::Error; use super::key::common; use crate::address::{self, Address, PARAMETERS}; -use crate::bytes::ByteBuf; +use crate::chain::{BlockHeight, Epoch}; use crate::ethereum_events::{GetEventNonce, TransfersToNamada, Uint}; use crate::hash::Hash; use crate::hints; use crate::keccak::{KeccakHash, TryFromError}; -use crate::time::DateTimeUtc; /// The maximum size of an IBC key (in bytes) allowed in merkle-ized storage pub const IBC_KEY_LIMIT: usize = 240; @@ -40,8 +37,6 @@ pub enum Error { InvalidKeySeg(String), #[error("Error parsing key segment: {0}")] ParseKeySeg(String), - #[error("Error parsing block hash: {0}")] - ParseBlockHash(String), #[error("Error parsing tx index: {0}")] ParseTxIndex(String), #[error("The key is empty")] @@ -58,12 +53,8 @@ pub enum Error { /// Result for functions that may fail pub type Result = std::result::Result; -/// The length of the block's hash string -pub const BLOCK_HASH_LENGTH: usize = 32; /// The length of the transaction index pub const TX_INDEX_LENGTH: usize = 4; -/// The length of the block height -pub const BLOCK_HEIGHT_LENGTH: usize = 8; /// The length of the epoch type pub const EPOCH_TYPE_LENGTH: usize = 8; @@ -272,207 +263,6 @@ impl BlockResults { } } -/// Height of a block, i.e. the level. The `default` is the -/// [`BlockHeight::sentinel`] value, which doesn't correspond to any block. -#[derive( - Clone, - Copy, - BorshSerialize, - BorshDeserialize, - BorshDeserializer, - BorshSchema, - PartialEq, - Eq, - PartialOrd, - Ord, - Hash, - Debug, - Serialize, - Deserialize, -)] -pub struct BlockHeight(pub u64); - -impl Default for BlockHeight { - fn default() -> Self { - Self::sentinel() - } -} - -impl Display for BlockHeight { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", self.0) - } -} - -impl From for u64 { - fn from(height: BlockHeight) -> Self { - height.0 - } -} - -impl FromStr for BlockHeight { - type Err = ParseIntError; - - fn from_str(s: &str) -> std::result::Result { - Ok(Self(s.parse::()?)) - } -} - -/// Hash of a block as fixed-size byte array -#[derive( - Clone, - Default, - BorshSerialize, - BorshDeserialize, - BorshDeserializer, - PartialEq, - Eq, - PartialOrd, - Ord, - Hash, - Serialize, - Deserialize, -)] -pub struct BlockHash(pub [u8; BLOCK_HASH_LENGTH]); - -impl Display for BlockHash { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", HEXUPPER.encode(&self.0)) - } -} - -impl From for BlockHash { - fn from(hash: Hash) -> Self { - BlockHash(hash.0) - } -} - -impl From for BlockHeight { - fn from(height: u64) -> Self { - BlockHeight(height) - } -} - -impl From for BlockHeight { - fn from(height: tendermint::block::Height) -> Self { - Self(u64::from(height)) - } -} - -impl TryFrom for tendermint::block::Height { - type Error = tendermint::Error; - - fn try_from(height: BlockHeight) -> std::result::Result { - Self::try_from(height.0) - } -} - -impl TryFrom for BlockHeight { - type Error = String; - - fn try_from(value: i64) -> std::result::Result { - value - .try_into() - .map(BlockHeight) - .map_err(|e| format!("Unexpected height value {}, {}", value, e)) - } -} -impl BlockHeight { - /// The first block height 1. - pub const fn first() -> Self { - Self(1) - } - - /// A sentinel value block height 0 may be used before any block is - /// committed or in queries to read from the latest committed block. - pub const fn sentinel() -> Self { - Self(0) - } - - /// Get the height of the next block - pub fn next_height(&self) -> BlockHeight { - BlockHeight( - self.0 - .checked_add(1) - .expect("Block height must not overflow"), - ) - } - - /// Get the height of the previous block - pub fn prev_height(&self) -> Option { - Some(BlockHeight(self.0.checked_sub(1)?)) - } - - /// Checked block height addition. - #[must_use = "this returns the result of the operation, without modifying \ - the original"] - pub fn checked_add(self, rhs: impl Into) -> Option { - let BlockHeight(rhs) = rhs.into(); - Some(Self(self.0.checked_add(rhs)?)) - } - - /// Checked block height subtraction. - #[must_use = "this returns the result of the operation, without modifying \ - the original"] - pub fn checked_sub(self, rhs: impl Into) -> Option { - let BlockHeight(rhs) = rhs.into(); - Some(Self(self.0.checked_sub(rhs)?)) - } -} - -impl TryFrom<&[u8]> for BlockHash { - type Error = self::Error; - - fn try_from(value: &[u8]) -> Result { - if value.len() != BLOCK_HASH_LENGTH { - return Err(Error::ParseBlockHash(format!( - "Unexpected block hash length {}, expected {}", - value.len(), - BLOCK_HASH_LENGTH - ))); - } - let mut hash = [0; 32]; - hash.copy_from_slice(value); - Ok(BlockHash(hash)) - } -} - -impl TryFrom> for BlockHash { - type Error = self::Error; - - fn try_from(value: Vec) -> Result { - value.as_slice().try_into() - } -} - -impl core::fmt::Debug for BlockHash { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let hash = format!("{}", ByteBuf(&self.0)); - f.debug_tuple("BlockHash").field(&hash).finish() - } -} - -/// The data from Tendermint header -/// relevant for Namada storage -#[derive( - Clone, Debug, BorshSerialize, BorshDeserialize, BorshDeserializer, Default, -)] -pub struct Header { - /// Merkle root hash of block - pub hash: Hash, - /// Timestamp associated to block - pub time: DateTimeUtc, - /// Hash of the addresses of the next validator set - pub next_validators_hash: Hash, -} - -impl Header { - /// The number of bytes when this header is encoded - pub fn encoded_len(&self) -> usize { - self.serialize_to_vec().len() - } -} - /// A storage key is made of storage key segments [`DbKeySeg`], separated by /// [`KEY_SEGMENT_SEPARATOR`]. #[derive( @@ -1162,225 +952,6 @@ impl KeySeg for common::PublicKey { } } -/// Epoch identifier. Epochs are identified by consecutive numbers. -#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] -#[derive( - Clone, - Copy, - Default, - Debug, - PartialEq, - Eq, - PartialOrd, - Ord, - Hash, - BorshSerialize, - BorshDeserialize, - BorshDeserializer, - BorshSchema, - Serialize, - Deserialize, -)] -pub struct Epoch(pub u64); - -impl Display for Epoch { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", self.0) - } -} - -impl FromStr for Epoch { - type Err = ParseIntError; - - fn from_str(s: &str) -> std::result::Result { - let raw: u64 = u64::from_str(s)?; - Ok(Self(raw)) - } -} - -impl Epoch { - /// Change to the next epoch - pub fn next(&self) -> Self { - Self(self.0.checked_add(1).expect("Epoch shouldn't overflow")) - } - - /// Change to the previous epoch. - pub fn prev(&self) -> Option { - Some(Self(self.0.checked_sub(1)?)) - } - - /// Iterate a range of consecutive epochs starting from `self` of a given - /// length. Work-around for `Step` implementation pending on stabilization of . - pub fn iter_range(self, len: u64) -> impl Iterator + Clone { - let start_ix: u64 = self.into(); - let end_ix: u64 = start_ix.checked_add(len).unwrap_or(u64::MAX); - (start_ix..end_ix).map(Epoch::from) - } - - /// Iterate a range of epochs, inclusive of the start and end. - pub fn iter_bounds_inclusive( - start: Self, - end: Self, - ) -> impl Iterator + Clone { - let start_ix = start.0; - let end_ix = end.0; - (start_ix..=end_ix).map(Epoch::from) - } - - /// Checked epoch addition. - #[must_use = "this returns the result of the operation, without modifying \ - the original"] - pub fn checked_add(self, rhs: impl Into) -> Option { - let Epoch(rhs) = rhs.into(); - Some(Self(self.0.checked_add(rhs)?)) - } - - /// Unchecked epoch addition. - /// - /// # Panic - /// - /// Panics on overflow. Care must be taken to only use this with trusted - /// values that are known to be in a limited range (e.g. system parameters - /// but not e.g. transaction variables). - pub fn unchecked_add(self, rhs: impl Into) -> Self { - self.checked_add(rhs) - .expect("Epoch addition shouldn't overflow") - } - - /// Checked epoch subtraction. Computes self - rhs, returning None if - /// overflow occurred. - #[must_use = "this returns the result of the operation, without modifying \ - the original"] - pub fn checked_sub(self, rhs: impl Into) -> Option { - let Epoch(rhs) = rhs.into(); - Some(Self(self.0.checked_sub(rhs)?)) - } - - /// Checked epoch division. - #[must_use = "this returns the result of the operation, without modifying \ - the original"] - pub fn checked_div(self, rhs: impl Into) -> Option { - let Epoch(rhs) = rhs.into(); - Some(Self(self.0.checked_div(rhs)?)) - } - - /// Checked epoch multiplication. - #[must_use = "this returns the result of the operation, without modifying \ - the original"] - pub fn checked_mul(self, rhs: impl Into) -> Option { - let Epoch(rhs) = rhs.into(); - Some(Self(self.0.checked_mul(rhs)?)) - } - - /// Checked epoch integral reminder. - #[must_use = "this returns the result of the operation, without modifying \ - the original"] - pub fn checked_rem(self, rhs: impl Into) -> Option { - let Epoch(rhs) = rhs.into(); - Some(Self(self.0.checked_rem(rhs)?)) - } - - /// Checked epoch subtraction. Computes self - rhs, returning default - /// `Epoch(0)` if overflow occurred. - #[must_use = "this returns the result of the operation, without modifying \ - the original"] - pub fn sub_or_default(self, rhs: Epoch) -> Self { - self.checked_sub(rhs).unwrap_or_default() - } -} - -impl From for Epoch { - fn from(epoch: u64) -> Self { - Epoch(epoch) - } -} - -impl From for u64 { - fn from(epoch: Epoch) -> Self { - epoch.0 - } -} - -/// Predecessor block epochs -#[derive( - Clone, - Debug, - Default, - PartialEq, - Eq, - PartialOrd, - Ord, - Hash, - BorshSerialize, - BorshDeserialize, - BorshDeserializer, -)] -pub struct Epochs { - /// The block heights of the first block of each known epoch. - /// Invariant: the values must be sorted in ascending order. - pub first_block_heights: Vec, -} - -impl Epochs { - /// Record start of a new epoch at the given block height - pub fn new_epoch(&mut self, block_height: BlockHeight) { - self.first_block_heights.push(block_height); - } - - /// Look up the epoch of a given block height. If the given height is - /// greater than the current height, the current epoch will be returned even - /// though an epoch for a future block cannot be determined. - pub fn get_epoch(&self, block_height: BlockHeight) -> Option { - if let Some((_first_known_epoch_height, rest)) = - self.first_block_heights.split_first() - { - let mut epoch = Epoch::default(); - for next_block_height in rest { - if block_height < *next_block_height { - return Some(epoch); - } else { - epoch = epoch.next(); - } - } - return Some(epoch); - } - None - } - - /// Look up the starting block height of an epoch at or before a given - /// height. - pub fn get_epoch_start_height( - &self, - height: BlockHeight, - ) -> Option { - for start_height in self.first_block_heights.iter().rev() { - if *start_height <= height { - return Some(*start_height); - } - } - None - } - - /// Look up the starting block height of the given epoch - pub fn get_start_height_of_epoch( - &self, - epoch: Epoch, - ) -> Option { - if epoch.0 > self.first_block_heights.len() as u64 { - return None; - } - let idx = usize::try_from(epoch.0).ok()?; - self.first_block_heights.get(idx).copied() - } - - /// Return all starting block heights for each successive Epoch. - /// - /// __INVARIANT:__ The returned values are sorted in ascending order. - pub fn first_block_heights(&self) -> &[BlockHeight] { - &self.first_block_heights - } -} - /// A value of a storage prefix iterator. #[derive( Debug, @@ -1564,6 +1135,7 @@ pub mod tests { use super::*; use crate::address::testing::arb_address; + use crate::chain::Epoch; proptest! { /// Tests that any key that doesn't contain reserved prefixes is valid. @@ -1772,175 +1344,6 @@ pub mod tests { } } - #[test] - fn test_predecessor_epochs_and_heights() { - let mut epochs = Epochs { - first_block_heights: vec![BlockHeight::first()], - }; - println!("epochs {:#?}", epochs); - assert_eq!( - epochs.get_start_height_of_epoch(Epoch(0)), - Some(BlockHeight(1)) - ); - assert_eq!(epochs.get_epoch(BlockHeight(0)), Some(Epoch(0))); - - // epoch 1 - epochs.new_epoch(BlockHeight(10)); - println!("epochs {:#?}", epochs); - assert_eq!( - epochs.get_start_height_of_epoch(Epoch(1)), - Some(BlockHeight(10)) - ); - assert_eq!(epochs.get_epoch(BlockHeight(0)), Some(Epoch(0))); - assert_eq!(epochs.get_epoch_start_height(BlockHeight(0)), None); - assert_eq!( - epochs.get_epoch_start_height(BlockHeight(1)), - Some(BlockHeight(1)) - ); - assert_eq!(epochs.get_epoch(BlockHeight(9)), Some(Epoch(0))); - assert_eq!( - epochs.get_epoch_start_height(BlockHeight(9)), - Some(BlockHeight(1)) - ); - assert_eq!(epochs.get_epoch(BlockHeight(10)), Some(Epoch(1))); - assert_eq!( - epochs.get_epoch_start_height(BlockHeight(10)), - Some(BlockHeight(10)) - ); - assert_eq!(epochs.get_epoch(BlockHeight(11)), Some(Epoch(1))); - assert_eq!( - epochs.get_epoch_start_height(BlockHeight(11)), - Some(BlockHeight(10)) - ); - assert_eq!(epochs.get_epoch(BlockHeight(100)), Some(Epoch(1))); - assert_eq!( - epochs.get_epoch_start_height(BlockHeight(100)), - Some(BlockHeight(10)) - ); - - // epoch 2 - epochs.new_epoch(BlockHeight(20)); - println!("epochs {:#?}", epochs); - assert_eq!( - epochs.get_start_height_of_epoch(Epoch(2)), - Some(BlockHeight(20)) - ); - assert_eq!(epochs.get_epoch(BlockHeight(0)), Some(Epoch(0))); - assert_eq!(epochs.get_epoch(BlockHeight(9)), Some(Epoch(0))); - assert_eq!(epochs.get_epoch(BlockHeight(10)), Some(Epoch(1))); - assert_eq!(epochs.get_epoch(BlockHeight(11)), Some(Epoch(1))); - assert_eq!( - epochs.get_epoch_start_height(BlockHeight(11)), - Some(BlockHeight(10)) - ); - assert_eq!(epochs.get_epoch(BlockHeight(20)), Some(Epoch(2))); - assert_eq!( - epochs.get_epoch_start_height(BlockHeight(20)), - Some(BlockHeight(20)) - ); - assert_eq!(epochs.get_epoch(BlockHeight(100)), Some(Epoch(2))); - assert_eq!( - epochs.get_epoch_start_height(BlockHeight(100)), - Some(BlockHeight(20)) - ); - - // epoch 3 - epochs.new_epoch(BlockHeight(200)); - println!("epochs {:#?}", epochs); - assert_eq!( - epochs.get_start_height_of_epoch(Epoch(3)), - Some(BlockHeight(200)) - ); - assert_eq!(epochs.get_epoch(BlockHeight(0)), Some(Epoch(0))); - assert_eq!(epochs.get_epoch(BlockHeight(9)), Some(Epoch(0))); - assert_eq!(epochs.get_epoch(BlockHeight(10)), Some(Epoch(1))); - assert_eq!(epochs.get_epoch(BlockHeight(11)), Some(Epoch(1))); - assert_eq!(epochs.get_epoch(BlockHeight(20)), Some(Epoch(2))); - assert_eq!(epochs.get_epoch(BlockHeight(100)), Some(Epoch(2))); - assert_eq!( - epochs.get_epoch_start_height(BlockHeight(100)), - Some(BlockHeight(20)) - ); - assert_eq!(epochs.get_epoch(BlockHeight(200)), Some(Epoch(3))); - assert_eq!( - epochs.get_epoch_start_height(BlockHeight(200)), - Some(BlockHeight(200)) - ); - - // epoch 4 - epochs.new_epoch(BlockHeight(300)); - println!("epochs {:#?}", epochs); - assert_eq!( - epochs.get_start_height_of_epoch(Epoch(4)), - Some(BlockHeight(300)) - ); - assert_eq!(epochs.get_epoch(BlockHeight(20)), Some(Epoch(2))); - assert_eq!(epochs.get_epoch(BlockHeight(100)), Some(Epoch(2))); - assert_eq!(epochs.get_epoch(BlockHeight(200)), Some(Epoch(3))); - assert_eq!(epochs.get_epoch(BlockHeight(300)), Some(Epoch(4))); - - // epoch 5 - epochs.new_epoch(BlockHeight(499)); - println!("epochs {:#?}", epochs); - assert_eq!( - epochs.get_start_height_of_epoch(Epoch(5)), - Some(BlockHeight(499)) - ); - assert_eq!(epochs.get_epoch(BlockHeight(20)), Some(Epoch(2))); - assert_eq!(epochs.get_epoch(BlockHeight(100)), Some(Epoch(2))); - assert_eq!(epochs.get_epoch(BlockHeight(200)), Some(Epoch(3))); - assert_eq!(epochs.get_epoch(BlockHeight(300)), Some(Epoch(4))); - assert_eq!(epochs.get_epoch(BlockHeight(499)), Some(Epoch(5))); - - // epoch 6 - epochs.new_epoch(BlockHeight(500)); - println!("epochs {:#?}", epochs); - assert_eq!( - epochs.get_start_height_of_epoch(Epoch(6)), - Some(BlockHeight(500)) - ); - assert_eq!(epochs.get_epoch(BlockHeight(200)), Some(Epoch(3))); - assert_eq!(epochs.get_epoch(BlockHeight(300)), Some(Epoch(4))); - assert_eq!(epochs.get_epoch(BlockHeight(499)), Some(Epoch(5))); - assert_eq!(epochs.get_epoch(BlockHeight(500)), Some(Epoch(6))); - - // epoch 7 - epochs.new_epoch(BlockHeight(550)); - println!("epochs {:#?}", epochs); - assert_eq!( - epochs.get_start_height_of_epoch(Epoch(7)), - Some(BlockHeight(550)) - ); - assert_eq!(epochs.get_epoch(BlockHeight(300)), Some(Epoch(4))); - assert_eq!(epochs.get_epoch(BlockHeight(499)), Some(Epoch(5))); - assert_eq!(epochs.get_epoch(BlockHeight(500)), Some(Epoch(6))); - assert_eq!(epochs.get_epoch(BlockHeight(550)), Some(Epoch(7))); - - // epoch 8 - epochs.new_epoch(BlockHeight(600)); - println!("epochs {:#?}", epochs); - assert_eq!( - epochs.get_start_height_of_epoch(Epoch(7)), - Some(BlockHeight(550)) - ); - assert_eq!( - epochs.get_start_height_of_epoch(Epoch(8)), - Some(BlockHeight(600)) - ); - assert_eq!(epochs.get_epoch(BlockHeight(500)), Some(Epoch(6))); - assert_eq!(epochs.get_epoch(BlockHeight(550)), Some(Epoch(7))); - assert_eq!(epochs.get_epoch(BlockHeight(600)), Some(Epoch(8))); - - // try to fetch height values out of range - // at this point, the min known epoch is 7 - for e in [9, 10, 11, 12] { - assert!( - epochs.get_start_height_of_epoch(Epoch(e)).is_none(), - "Epoch: {e}" - ); - } - } - proptest! { /// Ensure that addresses in storage keys preserve the order of the /// addresses. @@ -1988,7 +1391,6 @@ pub mod tests { /// Helpers for testing with storage types. #[cfg(any(test, feature = "testing"))] pub mod testing { - use std::ops::{Add, AddAssign, Sub}; use proptest::collection; use proptest::prelude::*; @@ -1996,55 +1398,6 @@ pub mod testing { use super::*; use crate::address::testing::{arb_address, arb_non_internal_address}; - impl Add for BlockHeight - where - T: Into, - { - type Output = BlockHeight; - - fn add(self, rhs: T) -> Self::Output { - self.checked_add(rhs.into()).unwrap() - } - } - - impl AddAssign for BlockHeight - where - T: Into, - { - fn add_assign(&mut self, rhs: T) { - *self = self.checked_add(rhs.into()).unwrap() - } - } - - impl Add for Epoch - where - T: Into, - { - type Output = Epoch; - - fn add(self, rhs: T) -> Self::Output { - self.checked_add(rhs.into()).unwrap() - } - } - - impl Sub for Epoch - where - T: Into, - { - type Output = Epoch; - - fn sub(self, rhs: T) -> Self::Output { - self.checked_sub(rhs.into()).unwrap() - } - } - - prop_compose! { - /// Generate an arbitrary epoch - pub fn arb_epoch()(epoch: u64) -> Epoch { - Epoch(epoch) - } - } - /// Generate an arbitrary [`Key`]. pub fn arb_key() -> impl Strategy { prop_oneof![ @@ -2100,18 +1453,4 @@ pub mod testing { 1 => arb_address().prop_map(DbKeySeg::AddressSeg), ] } - - /// A dummy header used for testing - pub fn get_dummy_header() -> Header { - use crate::time::DurationSecs; - Header { - hash: Hash([0; 32]), - #[allow( - clippy::disallowed_methods, - clippy::arithmetic_side_effects - )] - time: DateTimeUtc::now() + DurationSecs(5), - next_validators_hash: Hash([0; 32]), - } - } } diff --git a/crates/encoding_spec/src/main.rs b/crates/encoding_spec/src/main.rs index 48a087485d..d6e85f9429 100644 --- a/crates/encoding_spec/src/main.rs +++ b/crates/encoding_spec/src/main.rs @@ -24,10 +24,11 @@ use itertools::Itertools; use lazy_static::lazy_static; use madato::types::TableRow; use namada_core::address::Address; +use namada_core::chain::Epoch; use namada_core::collections::HashSet; use namada_core::key::ed25519::{PublicKey, Signature}; use namada_core::parameters::Parameters; -use namada_core::storage::{self, Epoch}; +use namada_core::storage; use namada_tx::data::{pos, TxType, WrapperTx}; use {namada_account as account, namada_token as token}; diff --git a/crates/ethereum_bridge/src/protocol/transactions/bridge_pool_roots.rs b/crates/ethereum_bridge/src/protocol/transactions/bridge_pool_roots.rs index b71b5cd104..4b9ea71d97 100644 --- a/crates/ethereum_bridge/src/protocol/transactions/bridge_pool_roots.rs +++ b/crates/ethereum_bridge/src/protocol/transactions/bridge_pool_roots.rs @@ -2,10 +2,10 @@ use eyre::Result; use namada_core::address::Address; +use namada_core::chain::BlockHeight; use namada_core::collections::{HashMap, HashSet}; use namada_core::keccak::keccak_hash; use namada_core::key::{common, SignableEthMessage}; -use namada_core::storage::BlockHeight; use namada_core::token::Amount; use namada_state::{DBIter, StorageHasher, WlState, DB}; use namada_storage::{StorageRead, StorageWrite}; diff --git a/crates/ethereum_bridge/src/protocol/transactions/ethereum_events/eth_msgs.rs b/crates/ethereum_bridge/src/protocol/transactions/ethereum_events/eth_msgs.rs index 3a59d08f93..7b03812612 100644 --- a/crates/ethereum_bridge/src/protocol/transactions/ethereum_events/eth_msgs.rs +++ b/crates/ethereum_bridge/src/protocol/transactions/ethereum_events/eth_msgs.rs @@ -63,10 +63,10 @@ mod tests { use std::collections::BTreeSet; use namada_core::address; + use namada_core::chain::BlockHeight; use namada_core::ethereum_events::testing::{ arbitrary_nonce, arbitrary_single_transfer, }; - use namada_core::storage::BlockHeight; use super::*; diff --git a/crates/ethereum_bridge/src/protocol/transactions/ethereum_events/events.rs b/crates/ethereum_bridge/src/protocol/transactions/ethereum_events/events.rs index b112b6c509..f1f6e61efc 100644 --- a/crates/ethereum_bridge/src/protocol/transactions/ethereum_events/events.rs +++ b/crates/ethereum_bridge/src/protocol/transactions/ethereum_events/events.rs @@ -6,6 +6,7 @@ use std::str::FromStr; use borsh::BorshDeserialize; use eyre::{Result, WrapErr}; use namada_core::address::Address; +use namada_core::chain::BlockHeight; use namada_core::collections::HashSet; use namada_core::eth_abi::Encode; use namada_core::eth_bridge_pool::{ @@ -17,7 +18,7 @@ use namada_core::ethereum_events::{ TransfersToNamada, }; use namada_core::hints; -use namada_core::storage::{BlockHeight, Key, KeySeg}; +use namada_core::storage::{Key, KeySeg}; use namada_core::uint::Uint; use namada_parameters::read_epoch_duration_parameter; use namada_state::{DBIter, StorageHasher, WlState, DB}; diff --git a/crates/ethereum_bridge/src/protocol/transactions/ethereum_events/mod.rs b/crates/ethereum_bridge/src/protocol/transactions/ethereum_events/mod.rs index aeadccc617..c601f18587 100644 --- a/crates/ethereum_bridge/src/protocol/transactions/ethereum_events/mod.rs +++ b/crates/ethereum_bridge/src/protocol/transactions/ethereum_events/mod.rs @@ -9,10 +9,11 @@ use borsh::BorshDeserialize; use eth_msgs::EthMsgUpdate; use eyre::Result; use namada_core::address::Address; +use namada_core::chain::{BlockHeight, Epoch}; use namada_core::collections::{HashMap, HashSet}; use namada_core::ethereum_events::EthereumEvent; use namada_core::key::common; -use namada_core::storage::{BlockHeight, Epoch, Key}; +use namada_core::storage::Key; use namada_core::token::Amount; use namada_proof_of_stake::storage::read_owned_pos_params; use namada_state::tx_queue::ExpiredTx; diff --git a/crates/ethereum_bridge/src/protocol/transactions/utils.rs b/crates/ethereum_bridge/src/protocol/transactions/utils.rs index 8a012535c5..5180584ebd 100644 --- a/crates/ethereum_bridge/src/protocol/transactions/utils.rs +++ b/crates/ethereum_bridge/src/protocol/transactions/utils.rs @@ -3,8 +3,8 @@ use std::collections::{BTreeMap, BTreeSet}; use eyre::eyre; use itertools::Itertools; use namada_core::address::Address; +use namada_core::chain::BlockHeight; use namada_core::collections::{HashMap, HashSet}; -use namada_core::storage::BlockHeight; use namada_core::token; use namada_proof_of_stake::storage::read_consensus_validator_set_addresses_with_stake; use namada_proof_of_stake::types::WeightedValidator; diff --git a/crates/ethereum_bridge/src/protocol/transactions/validator_set_update/mod.rs b/crates/ethereum_bridge/src/protocol/transactions/validator_set_update/mod.rs index c79281e20a..2419e300c9 100644 --- a/crates/ethereum_bridge/src/protocol/transactions/validator_set_update/mod.rs +++ b/crates/ethereum_bridge/src/protocol/transactions/validator_set_update/mod.rs @@ -2,9 +2,9 @@ use eyre::Result; use namada_core::address::Address; +use namada_core::chain::{BlockHeight, Epoch}; use namada_core::collections::{HashMap, HashSet}; use namada_core::key::common; -use namada_core::storage::{BlockHeight, Epoch}; use namada_core::token::Amount; use namada_state::{DBIter, StorageHasher, WlState, DB}; use namada_systems::governance; diff --git a/crates/ethereum_bridge/src/protocol/transactions/votes.rs b/crates/ethereum_bridge/src/protocol/transactions/votes.rs index 6bde8acf13..8094f9f5ea 100644 --- a/crates/ethereum_bridge/src/protocol/transactions/votes.rs +++ b/crates/ethereum_bridge/src/protocol/transactions/votes.rs @@ -6,8 +6,8 @@ use std::collections::{BTreeMap, BTreeSet}; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; use eyre::{eyre, Result}; use namada_core::address::Address; +use namada_core::chain::{BlockHeight, Epoch}; use namada_core::collections::HashMap; -use namada_core::storage::{BlockHeight, Epoch}; use namada_core::token; use namada_core::voting_power::FractionalVotingPower; use namada_macros::BorshDeserializer; diff --git a/crates/ethereum_bridge/src/protocol/transactions/votes/update.rs b/crates/ethereum_bridge/src/protocol/transactions/votes/update.rs index 40820463bf..095632b71e 100644 --- a/crates/ethereum_bridge/src/protocol/transactions/votes/update.rs +++ b/crates/ethereum_bridge/src/protocol/transactions/votes/update.rs @@ -3,8 +3,8 @@ use std::collections::BTreeSet; use borsh::BorshDeserialize; use eyre::{eyre, Result}; use namada_core::address::Address; +use namada_core::chain::BlockHeight; use namada_core::collections::{HashMap, HashSet}; -use namada_core::storage::BlockHeight; use namada_core::token; use namada_state::{DBIter, StorageHasher, StorageRead, WlState, DB}; use namada_systems::governance; diff --git a/crates/ethereum_bridge/src/protocol/validation/bridge_pool_roots.rs b/crates/ethereum_bridge/src/protocol/validation/bridge_pool_roots.rs index a237e50959..7327b1b35b 100644 --- a/crates/ethereum_bridge/src/protocol/validation/bridge_pool_roots.rs +++ b/crates/ethereum_bridge/src/protocol/validation/bridge_pool_roots.rs @@ -1,7 +1,7 @@ //! Bridge pool roots validation. +use namada_core::chain::BlockHeight; use namada_core::keccak::keccak_hash; -use namada_core::storage::BlockHeight; use namada_proof_of_stake::queries::{ get_validator_eth_hot_key, get_validator_protocol_key, }; diff --git a/crates/ethereum_bridge/src/protocol/validation/ethereum_events.rs b/crates/ethereum_bridge/src/protocol/validation/ethereum_events.rs index acadffb55c..766f5f354f 100644 --- a/crates/ethereum_bridge/src/protocol/validation/ethereum_events.rs +++ b/crates/ethereum_bridge/src/protocol/validation/ethereum_events.rs @@ -1,6 +1,6 @@ //! Ethereum events validation. -use namada_core::storage::BlockHeight; +use namada_core::chain::BlockHeight; use namada_proof_of_stake::queries::get_validator_protocol_key; use namada_state::{DBIter, StorageHasher, StorageRead, WlState, DB}; use namada_systems::governance; diff --git a/crates/ethereum_bridge/src/protocol/validation/validator_set_update.rs b/crates/ethereum_bridge/src/protocol/validation/validator_set_update.rs index 9e61d1734f..4301c86fc0 100644 --- a/crates/ethereum_bridge/src/protocol/validation/validator_set_update.rs +++ b/crates/ethereum_bridge/src/protocol/validation/validator_set_update.rs @@ -1,6 +1,6 @@ //! Validator set update validation. -use namada_core::storage::Epoch; +use namada_core::chain::Epoch; use namada_proof_of_stake::queries::get_validator_eth_hot_key; use namada_state::{DBIter, StorageHasher, WlState, DB}; use namada_systems::governance; diff --git a/crates/ethereum_bridge/src/storage/eth_bridge_queries.rs b/crates/ethereum_bridge/src/storage/eth_bridge_queries.rs index 07eb6eadaa..a1a15d2bb3 100644 --- a/crates/ethereum_bridge/src/storage/eth_bridge_queries.rs +++ b/crates/ethereum_bridge/src/storage/eth_bridge_queries.rs @@ -2,13 +2,14 @@ use borsh::{BorshDeserialize, BorshSerialize}; use namada_core::address::Address; +use namada_core::chain::{BlockHeight, Epoch}; use namada_core::eth_abi::Encode; use namada_core::eth_bridge_pool::PendingTransfer; use namada_core::ethereum_events::{ EthAddress, EthereumEvent, GetEventNonce, TransferToEthereum, Uint, }; use namada_core::keccak::KeccakHash; -use namada_core::storage::{BlockHeight, Epoch, Key as StorageKey}; +use namada_core::storage::Key as StorageKey; use namada_core::voting_power::{EthBridgeVotingPower, FractionalVotingPower}; use namada_core::{hints, token}; use namada_macros::BorshDeserializer; diff --git a/crates/ethereum_bridge/src/storage/proof.rs b/crates/ethereum_bridge/src/storage/proof.rs index 32f4ad9977..71a38bebac 100644 --- a/crates/ethereum_bridge/src/storage/proof.rs +++ b/crates/ethereum_bridge/src/storage/proof.rs @@ -2,12 +2,12 @@ use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; use ethers::abi::Tokenizable; +use namada_core::chain::Epoch; use namada_core::collections::HashMap; use namada_core::eth_abi::Encode; use namada_core::ethereum_events::Uint; use namada_core::keccak::KeccakHash; use namada_core::key::{common, secp256k1}; -use namada_core::storage::Epoch; use namada_core::{eth_abi, ethereum_structs}; use namada_vote_ext::validator_set_update::{ valset_upd_toks_to_hashes, EthAddrBook, VotingPowersMap, VotingPowersMapExt, diff --git a/crates/ethereum_bridge/src/storage/vote_tallies.rs b/crates/ethereum_bridge/src/storage/vote_tallies.rs index b4cdb9b208..7e52019593 100644 --- a/crates/ethereum_bridge/src/storage/vote_tallies.rs +++ b/crates/ethereum_bridge/src/storage/vote_tallies.rs @@ -5,10 +5,11 @@ use std::str::FromStr; use borsh::{BorshDeserialize, BorshSerialize}; use namada_core::address::Address; +use namada_core::chain::{BlockHeight, Epoch}; use namada_core::ethereum_events::{EthereumEvent, Uint}; use namada_core::hash::Hash; use namada_core::keccak::{keccak_hash, KeccakHash}; -use namada_core::storage::{BlockHeight, DbKeySeg, Epoch, Key}; +use namada_core::storage::{DbKeySeg, Key}; use namada_macros::{BorshDeserializer, StorageKeys}; #[cfg(feature = "migrations")] use namada_migrations::*; diff --git a/crates/ethereum_bridge/src/test_utils.rs b/crates/ethereum_bridge/src/test_utils.rs index 5301137a24..3ddf39d80c 100644 --- a/crates/ethereum_bridge/src/test_utils.rs +++ b/crates/ethereum_bridge/src/test_utils.rs @@ -7,12 +7,13 @@ use std::num::NonZeroU64; use namada_account::protocol_pk_key; use namada_core::address::testing::wnam; use namada_core::address::{self, Address}; +use namada_core::chain::BlockHeight; use namada_core::collections::HashMap; use namada_core::dec::Dec; use namada_core::ethereum_events::EthAddress; use namada_core::keccak::KeccakHash; use namada_core::key::{self, RefTo}; -use namada_core::storage::{BlockHeight, Key}; +use namada_core::storage::Key; use namada_proof_of_stake::parameters::OwnedPosParams; use namada_proof_of_stake::types::GenesisValidator; use namada_proof_of_stake::{ diff --git a/crates/events/src/extend.rs b/crates/events/src/extend.rs index 5e68fd8126..ae68e66470 100644 --- a/crates/events/src/extend.rs +++ b/crates/events/src/extend.rs @@ -6,11 +6,12 @@ use std::ops::ControlFlow; use std::str::FromStr; use namada_core::address::Address; +use namada_core::chain::BlockHeight; use namada_core::collections::HashMap; use namada_core::hash::Hash; use namada_core::ibc::IbcTxDataRefs; use namada_core::masp::MaspTxRefs; -use namada_core::storage::{BlockHeight, TxIndex}; +use namada_core::storage::TxIndex; use serde::Deserializer; use super::*; diff --git a/crates/governance/src/cli/onchain.rs b/crates/governance/src/cli/onchain.rs index 5186009b48..9f67657ab4 100644 --- a/crates/governance/src/cli/onchain.rs +++ b/crates/governance/src/cli/onchain.rs @@ -3,7 +3,7 @@ use std::fmt::Display; use borsh::{BorshDeserialize, BorshSerialize}; use namada_core::address::Address; -use namada_core::storage::Epoch; +use namada_core::chain::Epoch; use namada_core::token; use namada_macros::BorshDeserializer; #[cfg(feature = "migrations")] diff --git a/crates/governance/src/cli/validation.rs b/crates/governance/src/cli/validation.rs index 37c8707fa8..c904cbfeca 100644 --- a/crates/governance/src/cli/validation.rs +++ b/crates/governance/src/cli/validation.rs @@ -2,7 +2,7 @@ use std::collections::BTreeMap; use namada_core::address::Address; use namada_core::arith::{self, checked}; -use namada_core::storage::Epoch; +use namada_core::chain::Epoch; use namada_core::token; use thiserror::Error; diff --git a/crates/governance/src/storage/mod.rs b/crates/governance/src/storage/mod.rs index fea46850ec..c4b59181a2 100644 --- a/crates/governance/src/storage/mod.rs +++ b/crates/governance/src/storage/mod.rs @@ -11,9 +11,9 @@ use std::collections::{BTreeMap, BTreeSet}; use namada_core::address::Address; use namada_core::borsh::BorshDeserialize; +use namada_core::chain::Epoch; use namada_core::collections::HashSet; use namada_core::hash::Hash; -use namada_core::storage::Epoch; use namada_core::token; use namada_state::{ iter_prefix, StorageError, StorageRead, StorageResult, StorageWrite, diff --git a/crates/governance/src/storage/proposal.rs b/crates/governance/src/storage/proposal.rs index 884a2c52f0..60e49c9150 100644 --- a/crates/governance/src/storage/proposal.rs +++ b/crates/governance/src/storage/proposal.rs @@ -4,9 +4,9 @@ use std::fmt::Display; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; use itertools::Itertools; use namada_core::address::Address; +use namada_core::chain::Epoch; use namada_core::hash::Hash; pub use namada_core::ibc::PGFIbcTarget; -use namada_core::storage::Epoch; use namada_core::token; use namada_macros::BorshDeserializer; #[cfg(feature = "migrations")] @@ -576,9 +576,9 @@ impl Display for StorageProposal { /// Testing helpers and and strategies for governance proposals pub mod testing { use namada_core::address::testing::arb_non_internal_address; + use namada_core::chain::testing::arb_epoch; use namada_core::hash::testing::arb_hash; use namada_core::ibc::core::host::types::identifiers::{ChannelId, PortId}; - use namada_core::storage::testing::arb_epoch; use namada_core::token::testing::arb_amount; use proptest::prelude::*; use proptest::{collection, prop_compose}; diff --git a/crates/governance/src/utils.rs b/crates/governance/src/utils.rs index d93ec8b23d..84c8256546 100644 --- a/crates/governance/src/utils.rs +++ b/crates/governance/src/utils.rs @@ -4,9 +4,9 @@ use std::str::FromStr; use namada_core::address::Address; use namada_core::arith::{self, checked}; use namada_core::borsh::{BorshDeserialize, BorshSerialize}; +use namada_core::chain::Epoch; use namada_core::collections::HashMap; use namada_core::dec::Dec; -use namada_core::storage::Epoch; use namada_core::token; use namada_macros::BorshDeserializer; #[cfg(feature = "migrations")] diff --git a/crates/governance/src/vp/mod.rs b/crates/governance/src/vp/mod.rs index 31865cc52b..dffe0a8626 100644 --- a/crates/governance/src/vp/mod.rs +++ b/crates/governance/src/vp/mod.rs @@ -9,8 +9,8 @@ use std::marker::PhantomData; use borsh::BorshDeserialize; use namada_core::arith::{self, checked}; use namada_core::booleans::{BoolResultUnitExt, ResultBoolExt}; +use namada_core::chain::Epoch; use namada_core::storage; -use namada_core::storage::Epoch; use namada_state::{StateRead, StorageRead}; use namada_systems::{proof_of_stake, trans_token as token}; use namada_tx::action::{Action, GovAction, Read}; @@ -1234,10 +1234,10 @@ mod test { }; use namada_core::address::Address; use namada_core::borsh::BorshSerializeExt; + use namada_core::chain::testing::get_dummy_header; use namada_core::key::testing::keypair_1; use namada_core::key::RefTo; use namada_core::parameters::Parameters; - use namada_core::storage::testing::get_dummy_header; use namada_core::time::DateTimeUtc; use namada_gas::{TxGasMeter, VpGasMeter}; use namada_proof_of_stake::bond_tokens; diff --git a/crates/ibc/src/actions.rs b/crates/ibc/src/actions.rs index e438eea3f2..74e5cd2bd2 100644 --- a/crates/ibc/src/actions.rs +++ b/crates/ibc/src/actions.rs @@ -18,8 +18,8 @@ use namada_core::tendermint::Time as TmTime; use namada_core::token::Amount; use namada_events::EmitEvents; use namada_state::{ - Epochs, ResultExt, State, StorageError, StorageRead, StorageResult, - StorageWrite, + BlockHeight, Epoch, Epochs, ResultExt, State, StorageError, StorageRead, + StorageResult, StorageWrite, }; use namada_systems::{parameters, trans_token}; @@ -72,18 +72,18 @@ where self.state.get_chain_id() } - fn get_block_height(&self) -> StorageResult { + fn get_block_height(&self) -> StorageResult { self.state.get_block_height() } fn get_block_header( &self, - height: namada_storage::BlockHeight, + height: BlockHeight, ) -> StorageResult> { StorageRead::get_block_header(self.state, height) } - fn get_block_epoch(&self) -> StorageResult { + fn get_block_epoch(&self) -> StorageResult { self.state.get_block_epoch() } diff --git a/crates/ibc/src/context/common.rs b/crates/ibc/src/context/common.rs index 659a7be072..8d04fb6f9c 100644 --- a/crates/ibc/src/context/common.rs +++ b/crates/ibc/src/context/common.rs @@ -20,7 +20,8 @@ use ibc::core::host::types::identifiers::{ use ibc::primitives::proto::{Any, Protobuf}; use ibc::primitives::Timestamp; use namada_core::address::Address; -use namada_core::storage::{BlockHeight, Key}; +use namada_core::chain::BlockHeight; +use namada_core::storage::Key; use namada_core::tendermint::Time as TmTime; use namada_core::token::Amount; use namada_state::{StorageError, StorageRead, StorageWrite}; diff --git a/crates/ibc/src/vp/context.rs b/crates/ibc/src/vp/context.rs index 1d5f88018e..95c5698e21 100644 --- a/crates/ibc/src/vp/context.rs +++ b/crates/ibc/src/vp/context.rs @@ -5,8 +5,9 @@ use std::marker::PhantomData; use namada_core::address::Address; use namada_core::arith::checked; +use namada_core::chain::{BlockHeight, Epoch, Epochs, Header}; use namada_core::collections::{HashMap, HashSet}; -use namada_core::storage::{BlockHeight, Epoch, Epochs, Header, Key, TxIndex}; +use namada_core::storage::{Key, TxIndex}; use namada_events::Event; use namada_gas::MEMORY_ACCESS_GAS_PER_BYTE; use namada_state::write_log::StorageModification; diff --git a/crates/ibc/src/vp/mod.rs b/crates/ibc/src/vp/mod.rs index 45d8a38cfc..2bead88e02 100644 --- a/crates/ibc/src/vp/mod.rs +++ b/crates/ibc/src/vp/mod.rs @@ -489,9 +489,10 @@ mod tests { }; use namada_core::address::InternalAddress; use namada_core::borsh::{BorshDeserialize, BorshSerializeExt}; + use namada_core::chain::testing::get_dummy_header; + use namada_core::chain::{BlockHeight, Epoch}; use namada_core::key::testing::keypair_1; - use namada_core::storage::testing::get_dummy_header; - use namada_core::storage::{BlockHeight, Epoch, TxIndex}; + use namada_core::storage::TxIndex; use namada_core::tendermint::time::Time as TmTime; use namada_core::time::DurationSecs; use namada_gas::{TxGasMeter, VpGasMeter}; diff --git a/crates/light_sdk/src/reading/asynchronous/pos.rs b/crates/light_sdk/src/reading/asynchronous/pos.rs index 046d3a143e..e95fedbe01 100644 --- a/crates/light_sdk/src/reading/asynchronous/pos.rs +++ b/crates/light_sdk/src/reading/asynchronous/pos.rs @@ -1,5 +1,6 @@ use std::collections::BTreeSet; +use namada_sdk::chain::{BlockHeight, Epoch}; use namada_sdk::collections::{HashMap, HashSet}; use namada_sdk::key::common; use namada_sdk::proof_of_stake::types::{ @@ -8,7 +9,6 @@ use namada_sdk::proof_of_stake::types::{ }; use namada_sdk::proof_of_stake::PosParams; use namada_sdk::queries::vp::pos::EnrichedBondsAndUnbondsDetails; -use namada_sdk::storage::{BlockHeight, Epoch}; use super::*; diff --git a/crates/light_sdk/src/reading/blocking/pos.rs b/crates/light_sdk/src/reading/blocking/pos.rs index 363035ccf0..b41cead01d 100644 --- a/crates/light_sdk/src/reading/blocking/pos.rs +++ b/crates/light_sdk/src/reading/blocking/pos.rs @@ -1,6 +1,7 @@ use std::collections::BTreeSet; use namada_sdk::address::Address; +use namada_sdk::chain::{BlockHeight, Epoch}; use namada_sdk::collections::{HashMap, HashSet}; use namada_sdk::key::common; use namada_sdk::proof_of_stake::types::{ @@ -8,7 +9,6 @@ use namada_sdk::proof_of_stake::types::{ }; use namada_sdk::proof_of_stake::PosParams; use namada_sdk::queries::vp::pos::EnrichedBondsAndUnbondsDetails; -use namada_sdk::storage::{BlockHeight, Epoch}; use super::*; diff --git a/crates/light_sdk/src/transaction/governance.rs b/crates/light_sdk/src/transaction/governance.rs index 4cfab99828..4f37e92575 100644 --- a/crates/light_sdk/src/transaction/governance.rs +++ b/crates/light_sdk/src/transaction/governance.rs @@ -1,8 +1,8 @@ use namada_sdk::address::Address; +use namada_sdk::chain::Epoch; use namada_sdk::governance::{ProposalType, ProposalVote}; use namada_sdk::hash::Hash; use namada_sdk::key::common; -use namada_sdk::storage::Epoch; use namada_sdk::token::DenominatedAmount; use namada_sdk::tx::data::GasLimit; use namada_sdk::tx::{Authorization, Tx, TxError}; diff --git a/crates/light_sdk/src/transaction/wrapper.rs b/crates/light_sdk/src/transaction/wrapper.rs index 562f141b4f..b8d9028e2e 100644 --- a/crates/light_sdk/src/transaction/wrapper.rs +++ b/crates/light_sdk/src/transaction/wrapper.rs @@ -1,6 +1,6 @@ use namada_sdk::hash::Hash; use namada_sdk::key::common; -use namada_sdk::storage::Epoch; +use namada_sdk::chain::Epoch; use namada_sdk::tx::data::{Fee, GasLimit}; use namada_sdk::tx::{Section, Signature, Signer, Tx, TxError}; diff --git a/crates/merkle_tree/src/eth_bridge_pool.rs b/crates/merkle_tree/src/eth_bridge_pool.rs index 27bdd7087c..588bba1019 100644 --- a/crates/merkle_tree/src/eth_bridge_pool.rs +++ b/crates/merkle_tree/src/eth_bridge_pool.rs @@ -4,12 +4,13 @@ use std::collections::{BTreeMap, BTreeSet}; use eyre::eyre; use namada_core::borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; +use namada_core::chain::BlockHeight; use namada_core::eth_abi::{Encode, Token}; use namada_core::eth_bridge_pool::PendingTransfer; use namada_core::hash::Hash; use namada_core::keccak::{keccak_hash, KeccakHash}; use namada_core::storage; -use namada_core::storage::{BlockHeight, DbKeySeg}; +use namada_core::storage::DbKeySeg; use namada_macros::BorshDeserializer; #[cfg(feature = "migrations")] use namada_migrations::*; diff --git a/crates/merkle_tree/src/lib.rs b/crates/merkle_tree/src/lib.rs index cab4ae92c4..b8aa33129c 100644 --- a/crates/merkle_tree/src/lib.rs +++ b/crates/merkle_tree/src/lib.rs @@ -35,12 +35,13 @@ use ics23_specs::ibc_leaf_spec; use namada_core::address::{Address, InternalAddress}; use namada_core::borsh::{BorshDeserialize, BorshSerialize, BorshSerializeExt}; use namada_core::bytes::ByteBuf; +use namada_core::chain::{BlockHeight, Epoch}; use namada_core::eth_bridge_pool::{is_pending_transfer_key, PendingTransfer}; use namada_core::hash::{Hash, StorageHasher}; use namada_core::keccak::KeccakHash; use namada_core::storage::{ - self, BlockHeight, DbKeySeg, Epoch, Error as StorageError, Key, KeySeg, - StringKey, TreeBytes, TreeKeyError, IBC_KEY_LIMIT, + self, DbKeySeg, Error as StorageError, Key, KeySeg, StringKey, TreeBytes, + TreeKeyError, IBC_KEY_LIMIT, }; use namada_core::{decode, DecodeError}; use namada_macros::BorshDeserializer; diff --git a/crates/node/src/bench_utils.rs b/crates/node/src/bench_utils.rs index e552d2a892..d6b6b8a256 100644 --- a/crates/node/src/bench_utils.rs +++ b/crates/node/src/bench_utils.rs @@ -23,7 +23,8 @@ use namada_apps_lib::cli::Context; use namada_apps_lib::wallet::{defaults, CliWalletUtils}; use namada_sdk::address::{self, Address, InternalAddress, MASP}; use namada_sdk::args::ShieldedSync; -use namada_sdk::chain::ChainId; +use namada_sdk::chain::testing::get_dummy_header; +use namada_sdk::chain::{BlockHeight, ChainId, Epoch}; use namada_sdk::events::extend::{ ComposeEvent, MaspTxBatchRefs, MaspTxBlockIndex, }; @@ -80,8 +81,7 @@ use namada_sdk::queries::{ Client, EncodedResponseQuery, RequestCtx, RequestQuery, Router, RPC, }; use namada_sdk::state::StorageRead; -use namada_sdk::storage::testing::get_dummy_header; -use namada_sdk::storage::{BlockHeight, Epoch, Key, KeySeg, TxIndex}; +use namada_sdk::storage::{Key, KeySeg, TxIndex}; use namada_sdk::time::DateTimeUtc; use namada_sdk::token::{self, Amount, DenominatedAmount, Transfer}; use namada_sdk::tx::data::pos::Bond; diff --git a/crates/node/src/dry_run_tx.rs b/crates/node/src/dry_run_tx.rs index 20523d7b48..797718b5cc 100644 --- a/crates/node/src/dry_run_tx.rs +++ b/crates/node/src/dry_run_tx.rs @@ -138,6 +138,7 @@ where mod test { use borsh::BorshDeserialize; use borsh_ext::BorshSerializeExt; + use namada_sdk::chain::BlockHeight; use namada_sdk::events::log::EventLog; use namada_sdk::hash::Hash; use namada_sdk::queries::{ @@ -145,7 +146,7 @@ mod test { }; use namada_sdk::state::testing::TestState; use namada_sdk::state::StorageWrite; - use namada_sdk::storage::{BlockHeight, Key}; + use namada_sdk::storage::Key; use namada_sdk::tendermint_rpc::{Error as RpcError, Response}; use namada_sdk::tx::data::TxType; use namada_sdk::tx::{Code, Data, Tx}; diff --git a/crates/node/src/lib.rs b/crates/node/src/lib.rs index db33bdf666..38cdf78c6f 100644 --- a/crates/node/src/lib.rs +++ b/crates/node/src/lib.rs @@ -40,10 +40,11 @@ use namada_apps_lib::config::utils::{ convert_tm_addr_to_socket_addr, num_of_threads, }; use namada_apps_lib::{config, wasm_loader}; +use namada_sdk::chain::BlockHeight; use namada_sdk::eth_bridge::ethers::providers::{Http, Provider}; use namada_sdk::migrations::ScheduledMigration; use namada_sdk::state::{ProcessProposalCachedResult, StateRead, DB}; -use namada_sdk::storage::{BlockHeight, DbColFam}; +use namada_sdk::storage::DbColFam; use namada_sdk::tendermint::abci::request::CheckTxKind; use namada_sdk::tendermint::abci::response::ProcessProposal; use namada_sdk::time::DateTimeUtc; diff --git a/crates/node/src/protocol.rs b/crates/node/src/protocol.rs index a53cb32b97..b96b80545b 100644 --- a/crates/node/src/protocol.rs +++ b/crates/node/src/protocol.rs @@ -1450,6 +1450,7 @@ fn merge_vp_results( #[cfg(test)] mod tests { use eyre::Result; + use namada_sdk::chain::BlockHeight; use namada_sdk::collections::HashMap; use namada_sdk::eth_bridge::protocol::transactions::votes::{ EpochedVotingPower, Votes, @@ -1461,7 +1462,6 @@ mod tests { use namada_sdk::ethereum_events::testing::DAI_ERC20_ETH_ADDRESS; use namada_sdk::ethereum_events::{EthereumEvent, TransferToNamada}; use namada_sdk::keccak::keccak_hash; - use namada_sdk::storage::BlockHeight; use namada_sdk::tx::{SignableEthMessage, Signed}; use namada_sdk::voting_power::FractionalVotingPower; use namada_sdk::{address, key}; diff --git a/crates/node/src/shell/governance.rs b/crates/node/src/shell/governance.rs index cc4a3a3412..7b6a66e0b4 100644 --- a/crates/node/src/shell/governance.rs +++ b/crates/node/src/shell/governance.rs @@ -1,3 +1,4 @@ +use namada_sdk::chain::Epoch; use namada_sdk::collections::HashMap; use namada_sdk::events::extend::{ComposeEvent, Height, UserAccount}; use namada_sdk::events::{EmitEvents, EventLevel}; @@ -23,7 +24,6 @@ use namada_sdk::proof_of_stake::storage::{ }; use namada_sdk::proof_of_stake::types::{BondId, ValidatorState}; use namada_sdk::state::StorageWrite; -use namada_sdk::storage::Epoch; use namada_sdk::token::event::{TokenEvent, TokenOperation}; use namada_sdk::token::read_balance; use namada_sdk::tx::{Code, Data}; diff --git a/crates/node/src/shell/init_chain.rs b/crates/node/src/shell/init_chain.rs index 40ab7a6921..aa18d62c98 100644 --- a/crates/node/src/shell/init_chain.rs +++ b/crates/node/src/shell/init_chain.rs @@ -590,7 +590,7 @@ where genesis: &genesis::chain::Finalized, vp_cache: &mut HashMap>, params: &PosParams, - current_epoch: namada_sdk::storage::Epoch, + current_epoch: namada_sdk::chain::Epoch, ) -> ControlFlow<()> { if let Some(txs) = genesis.transactions.validator_account.as_ref() { for FinalizedValidatorAccountTx { diff --git a/crates/node/src/shell/mod.rs b/crates/node/src/shell/mod.rs index 2be5e16004..7330ebb692 100644 --- a/crates/node/src/shell/mod.rs +++ b/crates/node/src/shell/mod.rs @@ -37,7 +37,7 @@ use borsh::BorshDeserialize; use borsh_ext::BorshSerializeExt; use namada_apps_lib::wallet::{self, ValidatorData, ValidatorKeys}; use namada_sdk::address::Address; -use namada_sdk::chain::ChainId; +use namada_sdk::chain::{BlockHeight, ChainId}; use namada_sdk::eth_bridge::protocol::validation::bridge_pool_roots::validate_bp_roots_vext; use namada_sdk::eth_bridge::protocol::validation::ethereum_events::validate_eth_events_vext; use namada_sdk::eth_bridge::protocol::validation::validator_set_update::validate_valset_upd_vext; @@ -57,7 +57,7 @@ use namada_sdk::state::{ DBIter, FullAccessState, Sha256Hasher, StorageHasher, StorageRead, TempWlState, WlState, DB, EPOCH_SWITCH_BLOCKS_DELAY, }; -use namada_sdk::storage::{BlockHeight, Key, TxIndex}; +use namada_sdk::storage::{Key, TxIndex}; use namada_sdk::tendermint::AppHash; use namada_sdk::time::DateTimeUtc; pub use namada_sdk::tx::data::ResultCode; @@ -698,8 +698,8 @@ where /// Get the next epoch for which we can request validator set changed pub fn get_validator_set_update_epoch( &self, - current_epoch: namada_sdk::storage::Epoch, - ) -> namada_sdk::storage::Epoch { + current_epoch: namada_sdk::chain::Epoch, + ) -> namada_sdk::chain::Epoch { if let Some(delay) = self.state.in_mem().update_epoch_blocks_delay { if delay == EPOCH_SWITCH_BLOCKS_DELAY { // If we're about to update validator sets for the @@ -1990,7 +1990,7 @@ pub mod test_utils { mod shell_tests { use eth_bridge::storage::eth_bridge_queries::is_bridge_comptime_enabled; use namada_sdk::address; - use namada_sdk::storage::Epoch; + use namada_sdk::chain::Epoch; use namada_sdk::token::read_denom; use namada_sdk::tx::data::protocol::{ProtocolTx, ProtocolTxType}; use namada_sdk::tx::data::Fee; diff --git a/crates/node/src/shell/queries.rs b/crates/node/src/shell/queries.rs index 4387548d45..3a37a4ad45 100644 --- a/crates/node/src/shell/queries.rs +++ b/crates/node/src/shell/queries.rs @@ -80,11 +80,11 @@ where #[allow(clippy::cast_possible_truncation)] #[cfg(test)] mod test_queries { + use namada_sdk::chain::Epoch; use namada_sdk::eth_bridge::storage::eth_bridge_queries::is_bridge_comptime_enabled; use namada_sdk::eth_bridge::SendValsetUpd; use namada_sdk::proof_of_stake::storage::read_consensus_validator_set_addresses_with_stake; use namada_sdk::proof_of_stake::types::WeightedValidator; - use namada_sdk::storage::Epoch; use namada_sdk::tendermint::abci::types::VoteInfo; use super::*; diff --git a/crates/node/src/shell/testing/node.rs b/crates/node/src/shell/testing/node.rs index 1fdaeb250c..86af6d494d 100644 --- a/crates/node/src/shell/testing/node.rs +++ b/crates/node/src/shell/testing/node.rs @@ -11,6 +11,7 @@ use data_encoding::HEXUPPER; use itertools::Either; use lazy_static::lazy_static; use namada_sdk::address::Address; +use namada_sdk::chain::{BlockHeight, Epoch, Header}; use namada_sdk::collections::HashMap; use namada_sdk::control_flow::time::Duration; use namada_sdk::eth_bridge::oracle::config::Config as OracleConfig; @@ -31,7 +32,6 @@ use namada_sdk::queries::{ use namada_sdk::state::{ LastBlock, Sha256Hasher, StorageRead, EPOCH_SWITCH_BLOCKS_DELAY, }; -use namada_sdk::storage::{BlockHeight, Epoch, Header}; use namada_sdk::tendermint::abci::response::Info; use namada_sdk::tendermint::abci::types::VoteInfo; use namada_sdk::tendermint_proto::google::protobuf::Timestamp; diff --git a/crates/node/src/shell/vote_extensions/bridge_pool_vext.rs b/crates/node/src/shell/vote_extensions/bridge_pool_vext.rs index bedaea167b..b46dbd8a69 100644 --- a/crates/node/src/shell/vote_extensions/bridge_pool_vext.rs +++ b/crates/node/src/shell/vote_extensions/bridge_pool_vext.rs @@ -55,6 +55,7 @@ where #[cfg(test)] mod test_bp_vote_extensions { use namada_apps_lib::wallet::defaults::{bertha_address, bertha_keypair}; + use namada_sdk::chain::BlockHeight; use namada_sdk::eth_bridge::protocol::validation::bridge_pool_roots::validate_bp_roots_vext; use namada_sdk::eth_bridge::storage::bridge_pool::get_key_from_hash; use namada_sdk::eth_bridge::storage::eth_bridge_queries::{ @@ -74,7 +75,6 @@ mod test_bp_vote_extensions { become_validator, BecomeValidator, Epoch, }; use namada_sdk::state::StorageWrite; - use namada_sdk::storage::BlockHeight; use namada_sdk::tendermint::abci::types::VoteInfo; use namada_sdk::tx::Signed; use namada_sdk::{governance, token}; diff --git a/crates/node/src/shims/abcipp_shim.rs b/crates/node/src/shims/abcipp_shim.rs index 00b1c6c01b..14d3709f58 100644 --- a/crates/node/src/shims/abcipp_shim.rs +++ b/crates/node/src/shims/abcipp_shim.rs @@ -4,10 +4,10 @@ use std::pin::Pin; use std::task::{Context, Poll}; use futures::future::FutureExt; +use namada_sdk::chain::BlockHeight; use namada_sdk::hash::Hash; use namada_sdk::migrations::ScheduledMigration; use namada_sdk::state::{ProcessProposalCachedResult, DB}; -use namada_sdk::storage::BlockHeight; use namada_sdk::tendermint::abci::response::ProcessProposal; use namada_sdk::time::{DateTimeUtc, Utc}; use namada_sdk::tx::data::hash_tx; diff --git a/crates/node/src/storage/mod.rs b/crates/node/src/storage/mod.rs index 628cb47eb7..8466bd874a 100644 --- a/crates/node/src/storage/mod.rs +++ b/crates/node/src/storage/mod.rs @@ -55,7 +55,7 @@ fn new_blake2b() -> Blake2b { mod tests { use borsh::BorshDeserialize; use itertools::Itertools; - use namada_sdk::chain::ChainId; + use namada_sdk::chain::{BlockHeight, ChainId}; use namada_sdk::collections::HashMap; use namada_sdk::eth_bridge::storage::bridge_pool; use namada_sdk::eth_bridge::storage::proof::BridgePoolRootProof; @@ -69,7 +69,7 @@ mod tests { use namada_sdk::state::{ self, StateRead, StorageRead, StorageWrite, StoreType, DB, }; - use namada_sdk::storage::{BlockHeight, Key, KeySeg}; + use namada_sdk::storage::{Key, KeySeg}; use namada_sdk::token::conversion::update_allowed_conversions; use namada_sdk::{ address, decode, encode, parameters, storage, token, validation, diff --git a/crates/node/src/tendermint_node.rs b/crates/node/src/tendermint_node.rs index f90ed014c3..e8df339eed 100644 --- a/crates/node/src/tendermint_node.rs +++ b/crates/node/src/tendermint_node.rs @@ -6,9 +6,8 @@ use std::str::FromStr; use namada_apps_lib::cli::namada_version; use namada_apps_lib::config; pub use namada_apps_lib::tendermint_node::*; -use namada_sdk::chain::ChainId; +use namada_sdk::chain::{BlockHeight, ChainId}; use namada_sdk::parameters::ProposalBytes; -use namada_sdk::storage::BlockHeight; use namada_sdk::time::DateTimeUtc; use thiserror::Error; use tokio::fs::{File, OpenOptions}; diff --git a/crates/parameters/src/lib.rs b/crates/parameters/src/lib.rs index df38f5ae12..5d644a7fbe 100644 --- a/crates/parameters/src/lib.rs +++ b/crates/parameters/src/lib.rs @@ -25,7 +25,7 @@ use std::marker::PhantomData; use namada_core::address::{Address, InternalAddress}; use namada_core::arith::checked; -use namada_core::storage::BlockHeight; +use namada_core::chain::BlockHeight; pub use namada_core::parameters::ProposalBytes; use namada_core::time::DurationSecs; use namada_core::{hints, token}; @@ -629,7 +629,7 @@ where #[cfg(test)] mod tests { - use namada_core::storage::Header; + use namada_core::chain::Header; use namada_core::time::DateTimeUtc; use namada_storage::testing::TestStorage; diff --git a/crates/proof_of_stake/src/epoched.rs b/crates/proof_of_stake/src/epoched.rs index dd37c92b1d..e4250676f9 100644 --- a/crates/proof_of_stake/src/epoched.rs +++ b/crates/proof_of_stake/src/epoched.rs @@ -8,7 +8,7 @@ use std::marker::PhantomData; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; use namada_core::arith::{checked, CheckedAdd}; use namada_core::collections::HashMap; -use namada_core::storage::{self, Epoch}; +use namada_core::storage; use namada_macros::BorshDeserializer; #[cfg(feature = "migrations")] use namada_migrations::*; @@ -18,7 +18,7 @@ use namada_storage::{StorageRead, StorageWrite}; use namada_systems::governance; use crate::parameters::PosParams; -use crate::read_pos_params; +use crate::{read_pos_params, Epoch}; /// Sub-key holding a lazy map in storage pub const LAZY_MAP_SUB_KEY: &str = "lazy_map"; diff --git a/crates/proof_of_stake/src/error.rs b/crates/proof_of_stake/src/error.rs index c8f2cfb1b0..9c74bedfcd 100644 --- a/crates/proof_of_stake/src/error.rs +++ b/crates/proof_of_stake/src/error.rs @@ -2,8 +2,8 @@ use std::num::TryFromIntError; use namada_core::address::Address; +use namada_core::chain::Epoch; use namada_core::dec::Dec; -use namada_core::storage::Epoch; use thiserror::Error; use crate::rewards; diff --git a/crates/proof_of_stake/src/lib.rs b/crates/proof_of_stake/src/lib.rs index 564cff8fc0..ff846b45d3 100644 --- a/crates/proof_of_stake/src/lib.rs +++ b/crates/proof_of_stake/src/lib.rs @@ -42,11 +42,12 @@ use epoched::EpochOffset; pub use error::*; use namada_core::address::{Address, InternalAddress}; use namada_core::arith::checked; +use namada_core::chain::BlockHeight; +pub use namada_core::chain::Epoch; use namada_core::collections::HashSet; pub use namada_core::dec::Dec; use namada_core::key::common; -use namada_core::storage::BlockHeight; -pub use namada_core::storage::{Epoch, Key, KeySeg}; +pub use namada_core::storage::{Key, KeySeg}; use namada_core::tendermint::abci::types::Misbehavior; use namada_core::token; use namada_events::EmitEvents; @@ -122,7 +123,7 @@ where fn is_delegator( storage: &S, address: &Address, - epoch: Option, + epoch: Option, ) -> Result { is_delegator(storage, address, epoch) } @@ -214,7 +215,7 @@ where pub fn is_delegator( storage: &S, address: &Address, - epoch: Option, + epoch: Option, ) -> Result where S: StorageRead, @@ -3113,7 +3114,7 @@ pub mod test_utils { storage: &mut S, params: &PosParams, validators: impl Iterator, - current_epoch: namada_core::storage::Epoch, + current_epoch: namada_core::chain::Epoch, ) -> Result<()> where S: StorageRead + StorageWrite, @@ -3181,7 +3182,7 @@ pub mod test_utils { storage: &mut S, owned: OwnedPosParams, validators: impl Iterator + Clone, - current_epoch: namada_core::storage::Epoch, + current_epoch: namada_core::chain::Epoch, ) -> Result where S: StorageRead + StorageWrite, diff --git a/crates/proof_of_stake/src/parameters.rs b/crates/proof_of_stake/src/parameters.rs index 6d63529f06..776cb6b159 100644 --- a/crates/proof_of_stake/src/parameters.rs +++ b/crates/proof_of_stake/src/parameters.rs @@ -4,8 +4,8 @@ use std::str::FromStr; use borsh::{BorshDeserialize, BorshSerialize}; use namada_core::arith::checked; +use namada_core::chain::Epoch; use namada_core::dec::Dec; -use namada_core::storage::Epoch; use namada_core::token; use namada_core::uint::Uint; #[cfg(test)] diff --git a/crates/proof_of_stake/src/queries.rs b/crates/proof_of_stake/src/queries.rs index 626adf87f3..e8454c2913 100644 --- a/crates/proof_of_stake/src/queries.rs +++ b/crates/proof_of_stake/src/queries.rs @@ -5,10 +5,10 @@ use std::collections::BTreeMap; use borsh::BorshDeserialize; use namada_core::address::Address; +use namada_core::chain::Epoch; use namada_core::collections::{HashMap, HashSet}; use namada_core::dec::Dec; use namada_core::key::common; -use namada_core::storage::Epoch; use namada_core::token; use namada_storage::collections::lazy_map::{NestedSubKey, SubKey}; use namada_storage::StorageRead; diff --git a/crates/proof_of_stake/src/rewards.rs b/crates/proof_of_stake/src/rewards.rs index 7634469c00..c225840cc6 100644 --- a/crates/proof_of_stake/src/rewards.rs +++ b/crates/proof_of_stake/src/rewards.rs @@ -3,9 +3,9 @@ use namada_controller::PDController; use namada_core::address::{self, Address}; use namada_core::arith::{self, checked}; +use namada_core::chain::{BlockHeight, Epoch}; use namada_core::collections::{HashMap, HashSet}; use namada_core::dec::Dec; -use namada_core::storage::{BlockHeight, Epoch}; use namada_core::token; use namada_core::uint::{Uint, I256}; use namada_storage::collections::lazy_map::NestedSubKey; diff --git a/crates/proof_of_stake/src/slashing.rs b/crates/proof_of_stake/src/slashing.rs index 5ce05e1f1d..4367561d10 100644 --- a/crates/proof_of_stake/src/slashing.rs +++ b/crates/proof_of_stake/src/slashing.rs @@ -6,10 +6,10 @@ use std::collections::{BTreeMap, BTreeSet}; use borsh::BorshDeserialize; use namada_core::address::Address; use namada_core::arith::{self, checked}; +use namada_core::chain::{BlockHeight, Epoch}; use namada_core::collections::HashMap; use namada_core::dec::Dec; use namada_core::key::tm_raw_hash_to_string; -use namada_core::storage::{BlockHeight, Epoch}; use namada_core::tendermint::abci::types::{Misbehavior, MisbehaviorKind}; use namada_core::token; use namada_events::EmitEvents; diff --git a/crates/proof_of_stake/src/storage.rs b/crates/proof_of_stake/src/storage.rs index 981506c30f..6457456f66 100644 --- a/crates/proof_of_stake/src/storage.rs +++ b/crates/proof_of_stake/src/storage.rs @@ -6,10 +6,10 @@ use std::collections::BTreeSet; use namada_account::protocol_pk_key; use namada_core::address::Address; use namada_core::arith::checked; +use namada_core::chain::Epoch; use namada_core::collections::HashSet; use namada_core::dec::Dec; use namada_core::key::{common, tm_consensus_key_raw_hash}; -use namada_core::storage::Epoch; use namada_core::token; use namada_storage::collections::lazy_map::NestedSubKey; use namada_storage::collections::{LazyCollection, LazySet}; @@ -475,7 +475,7 @@ where pub fn read_validator_deltas_value( storage: &S, validator: &Address, - epoch: &namada_core::storage::Epoch, + epoch: &namada_core::chain::Epoch, ) -> namada_storage::Result> where S: StorageRead, @@ -491,7 +491,7 @@ pub fn read_validator_stake( storage: &S, params: &PosParams, validator: &Address, - epoch: namada_core::storage::Epoch, + epoch: namada_core::chain::Epoch, ) -> namada_storage::Result where S: StorageRead, @@ -513,7 +513,7 @@ pub fn update_validator_deltas( params: &OwnedPosParams, validator: &Address, delta: token::Change, - current_epoch: namada_core::storage::Epoch, + current_epoch: namada_core::chain::Epoch, offset_opt: Option, ) -> namada_storage::Result<()> where @@ -539,7 +539,7 @@ where pub fn read_total_stake( storage: &S, params: &PosParams, - epoch: namada_core::storage::Epoch, + epoch: namada_core::chain::Epoch, ) -> namada_storage::Result where S: StorageRead, @@ -559,7 +559,7 @@ where pub fn read_total_active_stake( storage: &S, params: &PosParams, - epoch: namada_core::storage::Epoch, + epoch: namada_core::chain::Epoch, ) -> namada_storage::Result where S: StorageRead, @@ -578,7 +578,7 @@ where /// Read all addresses from consensus validator set. pub fn read_consensus_validator_set_addresses( storage: &S, - epoch: namada_core::storage::Epoch, + epoch: namada_core::chain::Epoch, ) -> namada_storage::Result> where S: StorageRead, @@ -593,7 +593,7 @@ where /// Read all addresses from below-capacity validator set. pub fn read_below_capacity_validator_set_addresses( storage: &S, - epoch: namada_core::storage::Epoch, + epoch: namada_core::chain::Epoch, ) -> namada_storage::Result> where S: StorageRead, @@ -608,7 +608,7 @@ where /// Read all addresses from the below-threshold set pub fn read_below_threshold_validator_set_addresses( storage: &S, - epoch: namada_core::storage::Epoch, + epoch: namada_core::chain::Epoch, ) -> namada_storage::Result> where S: StorageRead, @@ -631,7 +631,7 @@ where /// Read all addresses from consensus validator set with their stake. pub fn read_consensus_validator_set_addresses_with_stake( storage: &S, - epoch: namada_core::storage::Epoch, + epoch: namada_core::chain::Epoch, ) -> namada_storage::Result> where S: StorageRead, @@ -661,7 +661,7 @@ where /// Count the number of consensus validators pub fn get_num_consensus_validators( storage: &S, - epoch: namada_core::storage::Epoch, + epoch: namada_core::chain::Epoch, ) -> namada_storage::Result where S: StorageRead, @@ -675,7 +675,7 @@ where /// Read all addresses from below-capacity validator set with their stake. pub fn read_below_capacity_validator_set_addresses_with_stake( storage: &S, - epoch: namada_core::storage::Epoch, + epoch: namada_core::chain::Epoch, ) -> namada_storage::Result> where S: StorageRead, @@ -705,7 +705,7 @@ where /// Read all validator addresses. pub fn read_all_validator_addresses( storage: &S, - epoch: namada_core::storage::Epoch, + epoch: namada_core::chain::Epoch, ) -> namada_storage::Result> where S: StorageRead, @@ -722,7 +722,7 @@ pub fn update_total_deltas( storage: &mut S, params: &OwnedPosParams, delta: token::Change, - current_epoch: namada_core::storage::Epoch, + current_epoch: namada_core::chain::Epoch, offset_opt: Option, update_active_voting_power: bool, ) -> namada_storage::Result<()> diff --git a/crates/proof_of_stake/src/storage_key.rs b/crates/proof_of_stake/src/storage_key.rs index dfbaaa07f5..f590447ad6 100644 --- a/crates/proof_of_stake/src/storage_key.rs +++ b/crates/proof_of_stake/src/storage_key.rs @@ -1,12 +1,12 @@ //! Proof-of-Stake storage keys and storage integration. use namada_core::address::Address; -use namada_core::storage::{DbKeySeg, Epoch, Key, KeySeg}; +use namada_core::storage::DbKeySeg; use namada_storage::collections::{lazy_map, lazy_vec}; use super::ADDRESS; -use crate::epoched; use crate::types::BondId; +use crate::{epoched, Epoch, Key, KeySeg}; const PARAMS_STORAGE_KEY: &str = "params"; const VALIDATOR_ADDRESSES_KEY: &str = "validator_addresses"; diff --git a/crates/proof_of_stake/src/tests/helpers.rs b/crates/proof_of_stake/src/tests/helpers.rs index aa6dece63b..b199700723 100644 --- a/crates/proof_of_stake/src/tests/helpers.rs +++ b/crates/proof_of_stake/src/tests/helpers.rs @@ -4,12 +4,12 @@ use std::cmp::max; use std::ops::Range; use namada_core::address::testing::address_from_simple_seed; +use namada_core::chain::Epoch; use namada_core::dec::Dec; use namada_core::key::testing::{ common_sk_from_simple_seed, keypair_1, keypair_3, }; use namada_core::key::{self, RefTo}; -use namada_core::storage::Epoch; use namada_core::token; use namada_core::token::testing::arb_amount_non_zero_ceiled; use namada_state::testing::TestState; diff --git a/crates/proof_of_stake/src/tests/mod.rs b/crates/proof_of_stake/src/tests/mod.rs index 6bf69e74c1..f4b7e24c54 100644 --- a/crates/proof_of_stake/src/tests/mod.rs +++ b/crates/proof_of_stake/src/tests/mod.rs @@ -26,7 +26,7 @@ pub fn init_genesis_helper( storage: &mut S, params: &PosParams, validators: impl Iterator, - current_epoch: namada_core::storage::Epoch, + current_epoch: namada_core::chain::Epoch, ) -> Result<()> where S: StorageRead + StorageWrite, @@ -44,7 +44,7 @@ pub fn test_init_genesis( storage: &mut S, owned: OwnedPosParams, validators: impl Iterator + Clone, - current_epoch: namada_core::storage::Epoch, + current_epoch: namada_core::chain::Epoch, ) -> Result where S: StorageRead + StorageWrite, @@ -270,7 +270,7 @@ pub fn update_validator_deltas( params: &OwnedPosParams, validator: &Address, delta: token::Change, - current_epoch: namada_core::storage::Epoch, + current_epoch: namada_core::chain::Epoch, offset_opt: Option, ) -> Result<()> where @@ -289,7 +289,7 @@ where /// DI indirection pub fn read_below_threshold_validator_set_addresses( storage: &S, - epoch: namada_core::storage::Epoch, + epoch: namada_core::chain::Epoch, ) -> Result> where S: StorageRead, diff --git a/crates/proof_of_stake/src/tests/state_machine.rs b/crates/proof_of_stake/src/tests/state_machine.rs index bdccf10977..adb1f1f9a8 100644 --- a/crates/proof_of_stake/src/tests/state_machine.rs +++ b/crates/proof_of_stake/src/tests/state_machine.rs @@ -9,11 +9,11 @@ use std::ops::Deref; use assert_matches::assert_matches; use itertools::Itertools; use namada_core::address::{self, Address}; +use namada_core::chain::Epoch; use namada_core::collections::HashSet; use namada_core::dec::Dec; use namada_core::key; use namada_core::key::common::PublicKey; -use namada_core::storage::Epoch; use namada_core::token::Change; use namada_governance::parameters::GovernanceParameters; use namada_state::testing::TestState; diff --git a/crates/proof_of_stake/src/tests/state_machine_v2.rs b/crates/proof_of_stake/src/tests/state_machine_v2.rs index a7eac77ab5..b53d86919e 100644 --- a/crates/proof_of_stake/src/tests/state_machine_v2.rs +++ b/crates/proof_of_stake/src/tests/state_machine_v2.rs @@ -10,11 +10,11 @@ use assert_matches::assert_matches; use derivative::Derivative; use itertools::Itertools; use namada_core::address::{self, Address}; +use namada_core::chain::Epoch; use namada_core::collections::HashSet; use namada_core::dec::Dec; use namada_core::key; use namada_core::key::common::PublicKey; -use namada_core::storage::Epoch; use namada_core::token::Change; use namada_governance::parameters::GovernanceParameters; use namada_state::testing::TestState; diff --git a/crates/proof_of_stake/src/tests/test_helper_fns.rs b/crates/proof_of_stake/src/tests/test_helper_fns.rs index 830febed1c..651c4de7d5 100644 --- a/crates/proof_of_stake/src/tests/test_helper_fns.rs +++ b/crates/proof_of_stake/src/tests/test_helper_fns.rs @@ -5,8 +5,9 @@ use std::collections::{BTreeMap, BTreeSet}; use namada_core::address::testing::{ established_address_1, established_address_2, established_address_3, }; +use namada_core::chain::Epoch; use namada_core::dec::Dec; -use namada_core::storage::{Epoch, Key}; +use namada_core::storage::Key; use namada_core::token; use namada_state::testing::TestState; use namada_storage::collections::lazy_map::NestedMap; diff --git a/crates/proof_of_stake/src/tests/test_pos.rs b/crates/proof_of_stake/src/tests/test_pos.rs index d8829250e7..d6a430c796 100644 --- a/crates/proof_of_stake/src/tests/test_pos.rs +++ b/crates/proof_of_stake/src/tests/test_pos.rs @@ -6,11 +6,11 @@ use std::collections::BTreeMap; use assert_matches::assert_matches; use namada_core::address::Address; +use namada_core::chain::{BlockHeight, Epoch}; use namada_core::collections::HashSet; use namada_core::dec::Dec; use namada_core::key::testing::{common_sk_from_simple_seed, gen_keypair}; use namada_core::key::RefTo; -use namada_core::storage::{BlockHeight, Epoch}; use namada_core::{address, key}; use namada_state::testing::TestState; use namada_storage::collections::lazy_map::Collectable; diff --git a/crates/proof_of_stake/src/tests/test_slash_and_redel.rs b/crates/proof_of_stake/src/tests/test_slash_and_redel.rs index c374408a72..4306a016e4 100644 --- a/crates/proof_of_stake/src/tests/test_slash_and_redel.rs +++ b/crates/proof_of_stake/src/tests/test_slash_and_redel.rs @@ -9,10 +9,10 @@ use namada_core::address::testing::{ established_address_1, established_address_2, }; use namada_core::address::{self, Address}; +use namada_core::chain::{BlockHeight, Epoch}; use namada_core::dec::Dec; use namada_core::key::testing::{keypair_1, keypair_2, keypair_3}; use namada_core::key::RefTo; -use namada_core::storage::{BlockHeight, Epoch}; use namada_core::token::NATIVE_MAX_DECIMAL_PLACES; use namada_state::testing::TestState; use namada_storage::collections::lazy_map::Collectable; diff --git a/crates/proof_of_stake/src/tests/test_validator.rs b/crates/proof_of_stake/src/tests/test_validator.rs index 62d41910e0..8988d1d115 100644 --- a/crates/proof_of_stake/src/tests/test_validator.rs +++ b/crates/proof_of_stake/src/tests/test_validator.rs @@ -4,12 +4,12 @@ use std::cmp::min; use namada_core::address::testing::arb_established_address; use namada_core::address::{self, Address, EstablishedAddressGen}; +use namada_core::chain::Epoch; use namada_core::dec::Dec; use namada_core::key::testing::{ arb_common_keypair, common_sk_from_simple_seed, }; use namada_core::key::{self, common, RefTo}; -use namada_core::storage::Epoch; use namada_core::token; use namada_state::testing::TestState; use namada_storage::collections::lazy_map; diff --git a/crates/proof_of_stake/src/types/mod.rs b/crates/proof_of_stake/src/types/mod.rs index 6be5b78fdb..8274c40637 100644 --- a/crates/proof_of_stake/src/types/mod.rs +++ b/crates/proof_of_stake/src/types/mod.rs @@ -12,7 +12,6 @@ use namada_core::address::Address; use namada_core::collections::HashMap; use namada_core::dec::Dec; use namada_core::key::common; -use namada_core::storage::{Epoch, KeySeg}; use namada_core::token; use namada_core::token::Amount; use namada_macros::BorshDeserializer; @@ -24,6 +23,7 @@ pub use rev_order::ReverseOrdTokenAmount; use serde::{Deserialize, Serialize}; use crate::parameters::PosParams; +use crate::{Epoch, KeySeg}; /// Stored positions of validators in validator sets pub type ValidatorSetPositions = crate::epoched::NestedEpoched< diff --git a/crates/proof_of_stake/src/validator_set_update.rs b/crates/proof_of_stake/src/validator_set_update.rs index d46b89b3cb..5271074d95 100644 --- a/crates/proof_of_stake/src/validator_set_update.rs +++ b/crates/proof_of_stake/src/validator_set_update.rs @@ -2,9 +2,9 @@ use namada_core::address::Address; use namada_core::arith::checked; +use namada_core::chain::Epoch; use namada_core::collections::{HashMap, HashSet}; use namada_core::key::PublicKeyTmRawHash; -use namada_core::storage::Epoch; use namada_core::token; use namada_storage::collections::lazy_map::{NestedSubKey, SubKey}; use namada_storage::{StorageRead, StorageWrite}; diff --git a/crates/sdk/src/args.rs b/crates/sdk/src/args.rs index 59845d5c23..c68344c090 100644 --- a/crates/sdk/src/args.rs +++ b/crates/sdk/src/args.rs @@ -6,14 +6,13 @@ use std::str::FromStr; use std::time::Duration as StdDuration; use namada_core::address::Address; -use namada_core::chain::ChainId; +use namada_core::chain::{BlockHeight, ChainId, Epoch}; use namada_core::collections::HashMap; use namada_core::dec::Dec; use namada_core::ethereum_events::EthAddress; use namada_core::keccak::KeccakHash; use namada_core::key::{common, SchemeType}; use namada_core::masp::{MaspEpoch, PaymentAddress}; -use namada_core::storage::{BlockHeight, Epoch}; use namada_core::time::DateTimeUtc; use namada_core::{storage, token}; use namada_governance::cli::onchain::{ diff --git a/crates/sdk/src/error.rs b/crates/sdk/src/error.rs index fc93e2c94f..8ab27fba80 100644 --- a/crates/sdk/src/error.rs +++ b/crates/sdk/src/error.rs @@ -1,9 +1,9 @@ //! Generic Error Type for all of the Shared Crate use namada_core::address::Address; +use namada_core::chain::Epoch; use namada_core::dec::Dec; use namada_core::ethereum_events::EthAddress; -use namada_core::storage::Epoch; use namada_core::{arith, storage}; use namada_events::EventError; use namada_tx::Tx; diff --git a/crates/sdk/src/eth_bridge/bridge_pool.rs b/crates/sdk/src/eth_bridge/bridge_pool.rs index 90c4686e44..6f2383426d 100644 --- a/crates/sdk/src/eth_bridge/bridge_pool.rs +++ b/crates/sdk/src/eth_bridge/bridge_pool.rs @@ -741,8 +741,8 @@ mod recommendations { use std::collections::BTreeSet; use borsh::BorshDeserialize; + use namada_core::chain::BlockHeight; use namada_core::ethereum_events::Uint as EthUint; - use namada_core::storage::BlockHeight; use namada_core::uint::{self, Uint, I256}; use namada_ethereum_bridge::storage::proof::BridgePoolRootProof; use namada_vote_ext::validator_set_update::{ diff --git a/crates/sdk/src/eth_bridge/validator_set.rs b/crates/sdk/src/eth_bridge/validator_set.rs index d79aac1b14..a2aac05857 100644 --- a/crates/sdk/src/eth_bridge/validator_set.rs +++ b/crates/sdk/src/eth_bridge/validator_set.rs @@ -9,10 +9,10 @@ use data_encoding::HEXLOWER; use ethbridge_bridge_contract::Bridge; use ethers::providers::Middleware; use futures::future::FutureExt; +use namada_core::chain::Epoch; use namada_core::eth_abi::EncodeCell; use namada_core::ethereum_events::EthAddress; use namada_core::hints; -use namada_core::storage::Epoch; use namada_ethereum_bridge::storage::proof::EthereumProof; use namada_vote_ext::validator_set_update::{ ValidatorSetArgs, VotingPowersMap, diff --git a/crates/sdk/src/events/log/dumb_queries.rs b/crates/sdk/src/events/log/dumb_queries.rs index f0c770ce8d..99bd072d90 100644 --- a/crates/sdk/src/events/log/dumb_queries.rs +++ b/crates/sdk/src/events/log/dumb_queries.rs @@ -1,9 +1,9 @@ //! Silly simple event matcher. +use namada_core::chain::BlockHeight; use namada_core::collections::HashMap; use namada_core::hash::Hash; use namada_core::keccak::KeccakHash; -use namada_core::storage::BlockHeight; use namada_ethereum_bridge::event::types::{ BRIDGE_POOL_EXPIRED, BRIDGE_POOL_RELAYED, }; diff --git a/crates/sdk/src/masp.rs b/crates/sdk/src/masp.rs index 2ae0b48ffa..561d56c407 100644 --- a/crates/sdk/src/masp.rs +++ b/crates/sdk/src/masp.rs @@ -40,11 +40,12 @@ use masp_primitives::zip32::{ use masp_proofs::prover::LocalTxProver; use namada_core::address::Address; use namada_core::arith::CheckedAdd; +use namada_core::chain::BlockHeight; use namada_core::collections::{HashMap, HashSet}; use namada_core::dec::Dec; use namada_core::ibc::IbcTxDataRefs; pub use namada_core::masp::*; -use namada_core::storage::{BlockHeight, TxIndex}; +use namada_core::storage::TxIndex; use namada_core::time::DateTimeUtc; use namada_core::uint::Uint; use namada_events::extend::{ diff --git a/crates/sdk/src/masp/shielded_sync/dispatcher.rs b/crates/sdk/src/masp/shielded_sync/dispatcher.rs index add16c84c8..ac60c011d9 100644 --- a/crates/sdk/src/masp/shielded_sync/dispatcher.rs +++ b/crates/sdk/src/masp/shielded_sync/dispatcher.rs @@ -13,10 +13,10 @@ use futures::task::AtomicWaker; use masp_primitives::merkle_tree::{CommitmentTree, IncrementalWitness}; use masp_primitives::sapling::{Node, ViewingKey}; use masp_primitives::transaction::Transaction; +use namada_core::chain::BlockHeight; use namada_core::collections::HashMap; use namada_core::control_flow::time::{Duration, LinearBackoff, Sleep}; use namada_core::hints; -use namada_core::storage::BlockHeight; use namada_tx::IndexedTx; use super::utils::{IndexedNoteEntry, MaspClient}; @@ -862,7 +862,8 @@ mod dispatcher_tests { use std::hint::spin_loop; use futures::join; - use namada_core::storage::{BlockHeight, TxIndex}; + use namada_core::chain::BlockHeight; + use namada_core::storage::TxIndex; use namada_tx::IndexedTx; use tempfile::tempdir; diff --git a/crates/sdk/src/masp/shielded_sync/utils.rs b/crates/sdk/src/masp/shielded_sync/utils.rs index 3ab499eadb..7b4387fd4e 100644 --- a/crates/sdk/src/masp/shielded_sync/utils.rs +++ b/crates/sdk/src/masp/shielded_sync/utils.rs @@ -8,8 +8,9 @@ use masp_primitives::memo::MemoBytes; use masp_primitives::merkle_tree::{CommitmentTree, IncrementalWitness}; use masp_primitives::sapling::{Node, Note, PaymentAddress, ViewingKey}; use masp_primitives::transaction::Transaction; +use namada_core::chain::BlockHeight; use namada_core::collections::HashMap; -use namada_core::storage::{BlockHeight, TxIndex}; +use namada_core::storage::TxIndex; use namada_tx::{IndexedTx, IndexedTxRange, Tx}; #[cfg(not(target_family = "wasm"))] use tokio::sync::Semaphore; diff --git a/crates/sdk/src/masp/test_utils.rs b/crates/sdk/src/masp/test_utils.rs index 7ed4e14734..ede69e923d 100644 --- a/crates/sdk/src/masp/test_utils.rs +++ b/crates/sdk/src/masp/test_utils.rs @@ -6,9 +6,9 @@ use masp_primitives::merkle_tree::{CommitmentTree, IncrementalWitness}; use masp_primitives::sapling::{Node, ViewingKey}; use masp_primitives::transaction::Transaction; use masp_primitives::zip32::ExtendedFullViewingKey; +use namada_core::chain::BlockHeight; use namada_core::collections::HashMap; use namada_core::masp::ExtendedViewingKey; -use namada_core::storage::BlockHeight; use namada_tx::IndexedTx; use crate::error::Error; diff --git a/crates/sdk/src/migrations.rs b/crates/sdk/src/migrations.rs index b66a111f4e..ac81f69452 100644 --- a/crates/sdk/src/migrations.rs +++ b/crates/sdk/src/migrations.rs @@ -9,8 +9,9 @@ use borsh::{BorshDeserialize, BorshSerialize}; use borsh_ext::BorshSerializeExt; use data_encoding::HEXUPPER; use eyre::eyre; +use namada_core::chain::BlockHeight; use namada_core::hash::Hash; -use namada_core::storage::{BlockHeight, Key}; +use namada_core::storage::Key; use namada_macros::{derive_borshdeserializer, typehash}; use namada_migrations::{TypeHash, *}; use namada_storage::{DbColFam, DbMigration, DB}; diff --git a/crates/sdk/src/queries/mod.rs b/crates/sdk/src/queries/mod.rs index d91ffd72dc..f7782570ac 100644 --- a/crates/sdk/src/queries/mod.rs +++ b/crates/sdk/src/queries/mod.rs @@ -2,7 +2,7 @@ //! defined via `router!` macro. // Re-export to show in rustdoc! -use namada_core::storage::BlockHeight; +use namada_core::chain::BlockHeight; use namada_state::{DBIter, StorageHasher, DB}; pub use shell::Shell; use shell::SHELL; diff --git a/crates/sdk/src/queries/router.rs b/crates/sdk/src/queries/router.rs index 43e109849b..bb09b6ec2f 100644 --- a/crates/sdk/src/queries/router.rs +++ b/crates/sdk/src/queries/router.rs @@ -404,7 +404,7 @@ macro_rules! pattern_and_handler_to_method { `storage_value` and `storage_prefix`) from `storage_value`."] pub async fn storage_value(&self, client: &CLIENT, data: Option>, - height: Option, + height: Option, prove: bool, $( $param: &$param_ty ),* ) @@ -456,7 +456,7 @@ macro_rules! pattern_and_handler_to_method { `storage_value` and `storage_prefix`) from `" $handle "`."] pub async fn $handle(&self, client: &CLIENT, data: Option>, - height: Option, + height: Option, prove: bool, $( $param: &$param_ty ),* ) @@ -855,7 +855,7 @@ macro_rules! router { #[cfg(test)] mod test_rpc_handlers { use borsh_ext::BorshSerializeExt; - use namada_core::storage::Epoch; + use namada_core::chain::Epoch; use namada_core::token; use namada_state::{DBIter, StorageHasher, DB}; @@ -982,7 +982,7 @@ mod test_rpc_handlers { /// ``` #[cfg(test)] mod test_rpc { - use namada_core::storage::Epoch; + use namada_core::chain::Epoch; use namada_core::token; use super::test_rpc_handlers::*; @@ -1020,7 +1020,7 @@ mod test_rpc { #[cfg(test)] mod test { - use namada_core::storage::Epoch; + use namada_core::chain::Epoch; use namada_core::tendermint::block; use namada_core::token; use namada_core::token::NATIVE_MAX_DECIMAL_PLACES; diff --git a/crates/sdk/src/queries/shell.rs b/crates/sdk/src/queries/shell.rs index 5a74ca98a5..001c155853 100644 --- a/crates/sdk/src/queries/shell.rs +++ b/crates/sdk/src/queries/shell.rs @@ -10,13 +10,12 @@ use masp_primitives::sapling::Node; use namada_account::{Account, AccountPublicKeysMap}; use namada_core::address::Address; use namada_core::arith::checked; +use namada_core::chain::{BlockHeight, Epoch, Header}; use namada_core::dec::Dec; use namada_core::hash::Hash; use namada_core::hints; use namada_core::masp::{MaspEpoch, TokenMap}; -use namada_core::storage::{ - self, BlockHeight, BlockResults, Epoch, Header, KeySeg, PrefixValue, -}; +use namada_core::storage::{self, BlockResults, KeySeg, PrefixValue}; use namada_core::time::DurationSecs; use namada_core::token::{Denomination, MaspDigitPos}; use namada_core::uint::Uint; diff --git a/crates/sdk/src/queries/shell/eth_bridge.rs b/crates/sdk/src/queries/shell/eth_bridge.rs index d3550c306d..3f2a19d979 100644 --- a/crates/sdk/src/queries/shell/eth_bridge.rs +++ b/crates/sdk/src/queries/shell/eth_bridge.rs @@ -7,6 +7,7 @@ use borsh::{BorshDeserialize, BorshSerialize}; use borsh_ext::BorshSerializeExt; use namada_core::address::Address; use namada_core::arith::checked; +use namada_core::chain::{BlockHeight, Epoch}; use namada_core::collections::{HashMap, HashSet}; use namada_core::eth_abi::{Encode, EncodeCell}; use namada_core::eth_bridge_pool::{PendingTransfer, PendingTransferAppendix}; @@ -14,7 +15,7 @@ use namada_core::ethereum_events::{ EthAddress, EthereumEvent, TransferToEthereum, }; use namada_core::keccak::KeccakHash; -use namada_core::storage::{BlockHeight, DbKeySeg, Epoch, Key}; +use namada_core::storage::{DbKeySeg, Key}; use namada_core::token::Amount; use namada_core::voting_power::FractionalVotingPower; use namada_core::{ethereum_structs, hints}; diff --git a/crates/sdk/src/queries/types.rs b/crates/sdk/src/queries/types.rs index 8764a130ce..35970ab1fa 100644 --- a/crates/sdk/src/queries/types.rs +++ b/crates/sdk/src/queries/types.rs @@ -1,6 +1,6 @@ use std::fmt::Debug; -use namada_core::storage::BlockHeight; +use namada_core::chain::BlockHeight; use namada_state::{DBIter, StorageHasher, WlState, DB}; use thiserror::Error; diff --git a/crates/sdk/src/queries/vp/pos.rs b/crates/sdk/src/queries/vp/pos.rs index 208a2ad6f7..0c2568f973 100644 --- a/crates/sdk/src/queries/vp/pos.rs +++ b/crates/sdk/src/queries/vp/pos.rs @@ -5,9 +5,9 @@ use std::collections::{BTreeMap, BTreeSet}; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; use namada_core::address::Address; use namada_core::arith::{self, checked}; +use namada_core::chain::Epoch; use namada_core::collections::{HashMap, HashSet}; use namada_core::key::common; -use namada_core::storage::Epoch; use namada_core::token; use namada_proof_of_stake::parameters::PosParams; use namada_proof_of_stake::queries::{ diff --git a/crates/sdk/src/rpc.rs b/crates/sdk/src/rpc.rs index 9888fdf13a..44a2ab554a 100644 --- a/crates/sdk/src/rpc.rs +++ b/crates/sdk/src/rpc.rs @@ -11,14 +11,13 @@ use masp_primitives::sapling::Node; use namada_account::Account; use namada_core::address::{Address, InternalAddress}; use namada_core::arith::checked; +use namada_core::chain::{BlockHeight, Epoch}; use namada_core::collections::{HashMap, HashSet}; use namada_core::hash::Hash; use namada_core::ibc::IbcTokenHash; use namada_core::key::common; use namada_core::masp::MaspEpoch; -use namada_core::storage::{ - BlockHeight, BlockResults, Epoch, Key, PrefixValue, -}; +use namada_core::storage::{BlockResults, Key, PrefixValue}; use namada_core::time::DurationSecs; use namada_core::token::{ Amount, DenominatedAmount, Denomination, MaspDigitPos, diff --git a/crates/sdk/src/tx.rs b/crates/sdk/src/tx.rs index 5562748b8f..5700f38cd3 100644 --- a/crates/sdk/src/tx.rs +++ b/crates/sdk/src/tx.rs @@ -21,6 +21,7 @@ use masp_primitives::transaction::{builder, Transaction as MaspTransaction}; use namada_account::{InitAccount, UpdateAccount}; use namada_core::address::{Address, IBC, MASP}; use namada_core::arith::checked; +use namada_core::chain::Epoch; use namada_core::collections::HashSet; use namada_core::dec::Dec; use namada_core::hash::Hash; @@ -39,7 +40,6 @@ use namada_core::masp::{ AssetData, ExtendedSpendingKey, MaspEpoch, TransferSource, TransferTarget, }; use namada_core::storage; -use namada_core::storage::Epoch; use namada_core::time::DateTimeUtc; use namada_governance::cli::onchain::{ DefaultProposal, OnChainProposal, PgfFundingProposal, PgfStewardProposal, diff --git a/crates/sdk/src/wallet/keys.rs b/crates/sdk/src/wallet/keys.rs index 4da7a1e584..85ed6e4c96 100644 --- a/crates/sdk/src/wallet/keys.rs +++ b/crates/sdk/src/wallet/keys.rs @@ -7,8 +7,8 @@ use std::str::FromStr; use borsh::{BorshDeserialize, BorshSerialize}; use borsh_ext::BorshSerializeExt; use data_encoding::HEXLOWER; +use namada_core::chain::BlockHeight; use namada_core::masp::{ExtendedSpendingKey, ExtendedViewingKey}; -use namada_core::storage::BlockHeight; use orion::{aead, kdf}; use serde::{Deserialize, Serialize}; use thiserror::Error; diff --git a/crates/sdk/src/wallet/mod.rs b/crates/sdk/src/wallet/mod.rs index e3d1c3f5a1..727bc02ddb 100644 --- a/crates/sdk/src/wallet/mod.rs +++ b/crates/sdk/src/wallet/mod.rs @@ -16,12 +16,12 @@ use bip39::{Language, Mnemonic, MnemonicType, Seed}; use borsh::{BorshDeserialize, BorshSerialize}; use namada_core::address::{Address, ImplicitAddress}; use namada_core::arith::checked; +use namada_core::chain::BlockHeight; use namada_core::collections::{HashMap, HashSet}; use namada_core::key::*; use namada_core::masp::{ ExtendedSpendingKey, ExtendedViewingKey, PaymentAddress, }; -use namada_core::storage::BlockHeight; use namada_core::time::DateTimeUtc; use namada_ibc::trace::is_ibc_denom; pub use pre_genesis::gen_key_to_store; diff --git a/crates/sdk/src/wallet/store.rs b/crates/sdk/src/wallet/store.rs index bf071b13df..4ddd27bbbd 100644 --- a/crates/sdk/src/wallet/store.rs +++ b/crates/sdk/src/wallet/store.rs @@ -10,12 +10,12 @@ use bimap::BiBTreeMap; use itertools::Itertools; use masp_primitives::zip32; use namada_core::address::{Address, ImplicitAddress}; +use namada_core::chain::BlockHeight; use namada_core::collections::HashSet; use namada_core::key::*; use namada_core::masp::{ ExtendedSpendingKey, ExtendedViewingKey, PaymentAddress, }; -use namada_core::storage::BlockHeight; use serde::{Deserialize, Serialize}; use zeroize::Zeroizing; diff --git a/crates/state/src/in_memory.rs b/crates/state/src/in_memory.rs index 752b378f9d..d8d58406d3 100644 --- a/crates/state/src/in_memory.rs +++ b/crates/state/src/in_memory.rs @@ -3,7 +3,7 @@ use std::num::NonZeroUsize; use clru::CLruCache; use namada_core::address::{Address, EstablishedAddressGen, InternalAddress}; use namada_core::borsh::{BorshDeserialize, BorshSerialize}; -use namada_core::chain::{ChainId, CHAIN_ID_LENGTH}; +use namada_core::chain::{ChainId, BLOCK_HEIGHT_LENGTH, CHAIN_ID_LENGTH}; use namada_core::hash::Hash; use namada_core::parameters::{EpochDuration, Parameters}; use namada_core::time::DateTimeUtc; @@ -18,7 +18,7 @@ use namada_storage::tx_queue::ExpiredTxsQueue; use namada_storage::types::CommitOnlyData; use namada_storage::{ BlockHeight, BlockResults, Epoch, Epochs, EthEventsQueue, Header, Key, - KeySeg, StorageHasher, TxIndex, BLOCK_HEIGHT_LENGTH, EPOCH_TYPE_LENGTH, + KeySeg, StorageHasher, TxIndex, EPOCH_TYPE_LENGTH, }; use crate::{Error, Result}; diff --git a/crates/state/src/lib.rs b/crates/state/src/lib.rs index b063447d42..616573b0e2 100644 --- a/crates/state/src/lib.rs +++ b/crates/state/src/lib.rs @@ -32,13 +32,15 @@ pub use in_memory::{ }; use namada_core::address::Address; use namada_core::arith::{self, checked}; +pub use namada_core::chain::{ + BlockHash, BlockHeight, Epoch, Epochs, Header, BLOCK_HASH_LENGTH, + BLOCK_HEIGHT_LENGTH, +}; use namada_core::eth_bridge_pool::is_pending_transfer_key; pub use namada_core::hash::Sha256Hasher; use namada_core::hash::{Error as HashError, Hash}; pub use namada_core::storage::{ - BlockHash, BlockHeight, BlockResults, Epoch, Epochs, EthEventsQueue, - Header, Key, KeySeg, TxIndex, BLOCK_HASH_LENGTH, BLOCK_HEIGHT_LENGTH, - EPOCH_TYPE_LENGTH, + BlockResults, EthEventsQueue, Key, KeySeg, TxIndex, EPOCH_TYPE_LENGTH, }; use namada_core::tendermint::merkle::proof::ProofOps; use namada_gas::{MEMORY_ACCESS_GAS_PER_BYTE, STORAGE_ACCESS_GAS_PER_BYTE}; @@ -302,7 +304,7 @@ macro_rules! impl_storage_read { fn get_block_height( &self, - ) -> std::result::Result { + ) -> std::result::Result { let (height, gas) = self.in_mem().get_block_height(); self.charge_gas(gas).into_storage_result()?; Ok(height) @@ -310,7 +312,7 @@ macro_rules! impl_storage_read { fn get_block_header( &self, - height: storage::BlockHeight, + height: BlockHeight, ) -> std::result::Result, namada_storage::Error> { let (header, gas) = @@ -321,7 +323,7 @@ macro_rules! impl_storage_read { fn get_block_epoch( &self, - ) -> std::result::Result { + ) -> std::result::Result { let (epoch, gas) = self.in_mem().get_current_epoch(); self.charge_gas(gas).into_storage_result()?; Ok(epoch) diff --git a/crates/state/src/wl_state.rs b/crates/state/src/wl_state.rs index 60ed93a0c4..df43a057df 100644 --- a/crates/state/src/wl_state.rs +++ b/crates/state/src/wl_state.rs @@ -4,7 +4,7 @@ use std::ops::{Deref, DerefMut}; use namada_core::address::Address; use namada_core::arith::checked; use namada_core::borsh::BorshSerializeExt; -use namada_core::chain::ChainId; +use namada_core::chain::{ChainId, Header}; use namada_core::masp::MaspEpoch; use namada_core::parameters::{EpochDuration, Parameters}; use namada_core::storage; @@ -567,7 +567,7 @@ where #[cfg(any(test, feature = "testing", feature = "benches"))] { if self.in_mem.header.is_none() { - self.in_mem.header = Some(storage::Header { + self.in_mem.header = Some(Header { hash: Hash::default(), #[allow(clippy::disallowed_methods)] time: DateTimeUtc::now(), diff --git a/crates/storage/src/db.rs b/crates/storage/src/db.rs index 60bb530e86..f69fa51386 100644 --- a/crates/storage/src/db.rs +++ b/crates/storage/src/db.rs @@ -2,11 +2,9 @@ use std::fmt::Debug; use std::num::TryFromIntError; use namada_core::address::EstablishedAddressGen; +use namada_core::chain::{BlockHeight, Epoch, Epochs, Header}; use namada_core::hash::{Error as HashError, Hash}; -use namada_core::storage::{ - BlockHeight, BlockResults, DbColFam, Epoch, Epochs, EthEventsQueue, Header, - Key, -}; +use namada_core::storage::{BlockResults, DbColFam, EthEventsQueue, Key}; use namada_core::time::DateTimeUtc; use namada_core::{arith, ethereum_events, ethereum_structs}; use namada_merkle_tree::{ diff --git a/crates/storage/src/lib.rs b/crates/storage/src/lib.rs index 5ccc6692bd..93ca831a73 100644 --- a/crates/storage/src/lib.rs +++ b/crates/storage/src/lib.rs @@ -30,6 +30,7 @@ pub use db::{Error as DbError, Result as DbResult, *}; pub use error::{CustomError, Error, OptionExt, Result, ResultExt}; use namada_core::address::Address; use namada_core::borsh::{BorshDeserialize, BorshSerialize, BorshSerializeExt}; +pub use namada_core::chain::{BlockHash, BlockHeight, Epoch, Epochs, Header}; pub use namada_core::hash::StorageHasher; pub use namada_core::storage::*; diff --git a/crates/storage/src/mockdb.rs b/crates/storage/src/mockdb.rs index 1de9122307..782b88701f 100644 --- a/crates/storage/src/mockdb.rs +++ b/crates/storage/src/mockdb.rs @@ -8,10 +8,9 @@ use std::path::Path; use itertools::Either; use namada_core::borsh::{BorshDeserialize, BorshSerialize}; +use namada_core::chain::{BlockHeight, Epoch, Header}; use namada_core::hash::Hash; -use namada_core::storage::{ - BlockHeight, DbColFam, Epoch, Header, Key, KeySeg, KEY_SEGMENT_SEPARATOR, -}; +use namada_core::storage::{DbColFam, Key, KeySeg, KEY_SEGMENT_SEPARATOR}; use namada_core::{decode, encode, ethereum_events}; use namada_merkle_tree::{ tree_key_prefix_with_epoch, tree_key_prefix_with_height, diff --git a/crates/systems/src/parameters.rs b/crates/systems/src/parameters.rs index 6dc746a051..a8fd89e409 100644 --- a/crates/systems/src/parameters.rs +++ b/crates/systems/src/parameters.rs @@ -1,8 +1,8 @@ //! Parameters abstract interfaces +use namada_core::chain::BlockHeight; pub use namada_core::parameters::*; use namada_core::storage; -use namada_core::storage::BlockHeight; use namada_core::time::DurationSecs; pub use namada_storage::Result; diff --git a/crates/systems/src/proof_of_stake.rs b/crates/systems/src/proof_of_stake.rs index 170bc49862..d81957054d 100644 --- a/crates/systems/src/proof_of_stake.rs +++ b/crates/systems/src/proof_of_stake.rs @@ -1,7 +1,7 @@ //! Proof-of-Stake abstract interfaces use namada_core::address::Address; -use namada_core::storage; +use namada_core::chain::Epoch; pub use namada_storage::Result; /// Abstract PoS storage read interface @@ -14,7 +14,7 @@ pub trait Read { fn is_delegator( storage: &S, address: &Address, - epoch: Option, + epoch: Option, ) -> Result; /// Read PoS pipeline length parameter diff --git a/crates/tests/src/e2e/helpers.rs b/crates/tests/src/e2e/helpers.rs index e722dc3ba8..a437992b07 100644 --- a/crates/tests/src/e2e/helpers.rs +++ b/crates/tests/src/e2e/helpers.rs @@ -22,9 +22,9 @@ use namada_apps_lib::config::utils::convert_tm_addr_to_socket_addr; use namada_apps_lib::config::{Config, TendermintMode}; use namada_core::token::NATIVE_MAX_DECIMAL_PLACES; use namada_sdk::address::Address; +use namada_sdk::chain::Epoch; use namada_sdk::key::*; use namada_sdk::queries::{Rpc, RPC}; -use namada_sdk::storage::Epoch; use namada_sdk::tendermint_rpc::HttpClient; use namada_sdk::token; use namada_sdk::wallet::fs::FsWalletUtils; diff --git a/crates/tests/src/e2e/ibc_tests.rs b/crates/tests/src/e2e/ibc_tests.rs index 0c1abdac72..992f911586 100644 --- a/crates/tests/src/e2e/ibc_tests.rs +++ b/crates/tests/src/e2e/ibc_tests.rs @@ -29,6 +29,7 @@ use namada_apps_lib::facade::tendermint::merkle::proof::ProofOps as TmProof; use namada_apps_lib::facade::tendermint_rpc::{Client, HttpClient, Url}; use namada_core::string_encoding::StringEncoded; use namada_sdk::address::{Address, InternalAddress, MASP}; +use namada_sdk::chain::{BlockHeight, Epoch}; use namada_sdk::events::extend::ReadFromEventAttributes; use namada_sdk::governance::cli::onchain::PgfFunding; use namada_sdk::governance::pgf::ADDRESS as PGF_ADDRESS; @@ -78,7 +79,7 @@ use namada_sdk::parameters::{storage as param_storage, EpochDuration}; use namada_sdk::queries::RPC; use namada_sdk::state::ics23_specs::ibc_proof_specs; use namada_sdk::state::Sha256Hasher; -use namada_sdk::storage::{BlockHeight, Epoch, Key}; +use namada_sdk::storage::Key; use namada_sdk::tendermint::abci::Event as AbciEvent; use namada_sdk::tendermint::block::Height as TmHeight; use namada_sdk::token::Amount; diff --git a/crates/tests/src/e2e/ledger_tests.rs b/crates/tests/src/e2e/ledger_tests.rs index 13900958b1..07dd87e101 100644 --- a/crates/tests/src/e2e/ledger_tests.rs +++ b/crates/tests/src/e2e/ledger_tests.rs @@ -28,7 +28,7 @@ use namada_apps_lib::wallet; use namada_core::chain::ChainId; use namada_core::token::NATIVE_MAX_DECIMAL_PLACES; use namada_sdk::address::Address; -use namada_sdk::storage::Epoch; +use namada_sdk::chain::Epoch; use namada_sdk::time::DateTimeUtc; use namada_sdk::token; use namada_test_utils::TestWasms; diff --git a/crates/tests/src/integration/ledger_tests.rs b/crates/tests/src/integration/ledger_tests.rs index dcd163acfd..236f4f6aa2 100644 --- a/crates/tests/src/integration/ledger_tests.rs +++ b/crates/tests/src/integration/ledger_tests.rs @@ -9,9 +9,10 @@ use data_encoding::HEXLOWER; use namada_apps_lib::wallet::defaults::{ self, get_unencrypted_keypair, is_use_device, }; +use namada_core::chain::Epoch; use namada_core::dec::Dec; use namada_core::hash::Hash; -use namada_core::storage::{DbColFam, Epoch, Key}; +use namada_core::storage::{DbColFam, Key}; use namada_core::token::NATIVE_MAX_DECIMAL_PLACES; use namada_node::shell::testing::client::run; use namada_node::shell::testing::node::NodeResults; diff --git a/crates/tests/src/native_vp/pos.rs b/crates/tests/src/native_vp/pos.rs index 59b7ca736f..4244e456dc 100644 --- a/crates/tests/src/native_vp/pos.rs +++ b/crates/tests/src/native_vp/pos.rs @@ -95,10 +95,10 @@ //! - add slashes //! - add rewards +use namada_sdk::chain::Epoch; use namada_sdk::proof_of_stake::parameters::{OwnedPosParams, PosParams}; use namada_sdk::proof_of_stake::test_utils::test_init_genesis as init_genesis; use namada_sdk::proof_of_stake::types::GenesisValidator; -use namada_sdk::storage::Epoch; use crate::tx::tx_host_env; @@ -590,6 +590,7 @@ pub mod testing { use derivative::Derivative; use itertools::Either; + use namada_sdk::chain::Epoch; use namada_sdk::dec::Dec; use namada_sdk::gas::TxGasMeter; use namada_sdk::key::common::PublicKey; @@ -602,7 +603,6 @@ pub mod testing { }; use namada_sdk::proof_of_stake::types::{BondId, ValidatorState}; use namada_sdk::proof_of_stake::ADDRESS as POS_ADDRESS; - use namada_sdk::storage::Epoch; use namada_sdk::token::{Amount, Change}; use namada_sdk::{address, governance, key, token}; use namada_tx_prelude::{Address, StorageRead, StorageWrite}; diff --git a/crates/tests/src/vm_host_env/ibc.rs b/crates/tests/src/vm_host_env/ibc.rs index 35a186a58c..2465339e3d 100644 --- a/crates/tests/src/vm_host_env/ibc.rs +++ b/crates/tests/src/vm_host_env/ibc.rs @@ -6,8 +6,8 @@ use ibc_testkit::testapp::ibc::clients::mock::client_state::{ }; use ibc_testkit::testapp::ibc::clients::mock::consensus_state::MockConsensusState; use ibc_testkit::testapp::ibc::clients::mock::header::MockHeader; +use namada_core::chain::testing::get_dummy_header; use namada_core::collections::HashMap; -use namada_core::storage::testing::get_dummy_header; use namada_sdk::address::{self, Address, InternalAddress}; use namada_sdk::gas::{TxGasMeter, VpGasMeter}; use namada_sdk::governance::parameters::GovernanceParameters; diff --git a/crates/tests/src/vm_host_env/mod.rs b/crates/tests/src/vm_host_env/mod.rs index 2dd015cc07..b958a63ab5 100644 --- a/crates/tests/src/vm_host_env/mod.rs +++ b/crates/tests/src/vm_host_env/mod.rs @@ -24,7 +24,7 @@ mod tests { use borsh_ext::BorshSerializeExt; use itertools::Itertools; - use namada_core::storage::testing::get_dummy_header; + use namada_core::chain::testing::get_dummy_header; use namada_sdk::account::pks_handle; use namada_sdk::hash::Hash; use namada_sdk::ibc::context::nft_transfer_mod::testing::DummyNftTransferModule; diff --git a/crates/tx/src/types.rs b/crates/tx/src/types.rs index bf0aca6f40..b3947cf9f4 100644 --- a/crates/tx/src/types.rs +++ b/crates/tx/src/types.rs @@ -16,11 +16,11 @@ use namada_core::borsh::schema::{add_definition, Declaration, Definition}; use namada_core::borsh::{ self, BorshDeserialize, BorshSchema, BorshSerialize, BorshSerializeExt, }; -use namada_core::chain::ChainId; +use namada_core::chain::{BlockHeight, ChainId}; use namada_core::collections::{HashMap, HashSet}; use namada_core::key::*; use namada_core::masp::{AssetData, TxId}; -use namada_core::storage::{BlockHeight, TxIndex}; +use namada_core::storage::TxIndex; use namada_core::time::DateTimeUtc; use namada_macros::BorshDeserializer; #[cfg(feature = "migrations")] diff --git a/crates/tx_prelude/src/lib.rs b/crates/tx_prelude/src/lib.rs index 8019053522..ff1c25a0c6 100644 --- a/crates/tx_prelude/src/lib.rs +++ b/crates/tx_prelude/src/lib.rs @@ -30,14 +30,14 @@ pub use namada_core::borsh::{ BorshDeserialize, BorshSerialize, BorshSerializeExt, }; use namada_core::chain::CHAIN_ID_LENGTH; +pub use namada_core::chain::{ + BlockHash, BlockHeight, Epoch, Header, BLOCK_HASH_LENGTH, +}; pub use namada_core::ethereum_events::EthAddress; use namada_core::internal::HostEnvResult; use namada_core::key::common; use namada_core::storage::TxIndex; -pub use namada_core::storage::{ - self, BlockHash, BlockHeight, Epoch, Header, BLOCK_HASH_LENGTH, -}; -pub use namada_core::{address, encode, eth_bridge_pool, *}; +pub use namada_core::{address, encode, eth_bridge_pool, storage, *}; use namada_events::{EmitEvents, Event, EventToEmit, EventType}; pub use namada_governance::storage as gov_storage; pub use namada_macros::transaction; @@ -193,11 +193,11 @@ impl StorageRead for Ctx { } } - fn get_block_epoch(&self) -> Result { + fn get_block_epoch(&self) -> Result { Ok(Epoch(unsafe { namada_tx_get_block_epoch() })) } - fn get_pred_epochs(&self) -> Result { + fn get_pred_epochs(&self) -> Result { let read_result = unsafe { namada_tx_get_pred_epochs() }; let bytes = read_from_buffer(read_result, namada_tx_result_buffer) .ok_or(Error::SimpleMessage( diff --git a/crates/vm/src/host_env.rs b/crates/vm/src/host_env.rs index 9c762c7fa7..134ebb26a7 100644 --- a/crates/vm/src/host_env.rs +++ b/crates/vm/src/host_env.rs @@ -10,9 +10,10 @@ use namada_account::AccountPublicKeysMap; use namada_core::address::{self, Address, ESTABLISHED_ADDRESS_BYTES_LEN}; use namada_core::arith::{self, checked}; use namada_core::borsh::{BorshDeserialize, BorshSerializeExt}; +use namada_core::chain::BlockHeight; use namada_core::hash::Hash; use namada_core::internal::{HostEnvResult, KeyVal}; -use namada_core::storage::{BlockHeight, Key, TxIndex, TX_INDEX_LENGTH}; +use namada_core::storage::{Key, TxIndex, TX_INDEX_LENGTH}; use namada_events::{Event, EventTypeBuilder}; use namada_gas::{ self as gas, GasMetering, TxGasMeter, VpGasMeter, diff --git a/crates/vote_ext/src/bridge_pool_roots.rs b/crates/vote_ext/src/bridge_pool_roots.rs index 586cfcaf12..f5c72255bf 100644 --- a/crates/vote_ext/src/bridge_pool_roots.rs +++ b/crates/vote_ext/src/bridge_pool_roots.rs @@ -6,10 +6,10 @@ use std::ops::{Deref, DerefMut}; use namada_core::address::Address; use namada_core::borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; +use namada_core::chain::BlockHeight; use namada_core::collections::HashSet; use namada_core::key::common; use namada_core::key::common::Signature; -use namada_core::storage::BlockHeight; use namada_macros::BorshDeserializer; #[cfg(feature = "migrations")] use namada_migrations::*; diff --git a/crates/vote_ext/src/ethereum_events.rs b/crates/vote_ext/src/ethereum_events.rs index 0dc94c4a28..56db6606ed 100644 --- a/crates/vote_ext/src/ethereum_events.rs +++ b/crates/vote_ext/src/ethereum_events.rs @@ -6,10 +6,10 @@ use std::ops::Deref; use namada_core::address::Address; use namada_core::borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; +use namada_core::chain::BlockHeight; use namada_core::collections::HashMap; use namada_core::ethereum_events::EthereumEvent; use namada_core::key::common::{self, Signature}; -use namada_core::storage::BlockHeight; use namada_macros::BorshDeserializer; #[cfg(feature = "migrations")] use namada_migrations::*; diff --git a/crates/vote_ext/src/validator_set_update.rs b/crates/vote_ext/src/validator_set_update.rs index a5ae7116f5..c04b7a9b02 100644 --- a/crates/vote_ext/src/validator_set_update.rs +++ b/crates/vote_ext/src/validator_set_update.rs @@ -5,12 +5,12 @@ use std::ops::Deref; use namada_core::address::Address; use namada_core::borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; +use namada_core::chain::Epoch; use namada_core::collections::HashMap; use namada_core::eth_abi::{AbiEncode, Encode, Token}; use namada_core::ethereum_events::EthAddress; use namada_core::keccak::KeccakHash; use namada_core::key::common::{self, Signature}; -use namada_core::storage::Epoch; use namada_core::voting_power::{EthBridgeVotingPower, FractionalVotingPower}; use namada_core::{ethereum_structs, token}; use namada_macros::BorshDeserializer; diff --git a/crates/vp/src/native_vp.rs b/crates/vp/src/native_vp.rs index ce2c95264c..93ee12a23c 100644 --- a/crates/vp/src/native_vp.rs +++ b/crates/vp/src/native_vp.rs @@ -9,8 +9,8 @@ use std::marker::PhantomData; use namada_core::address::Address; use namada_core::borsh::BorshDeserialize; +use namada_core::chain::Epochs; use namada_core::hash::Hash; -use namada_core::storage::Epochs; use namada_core::{borsh, storage}; use namada_events::{Event, EventType}; use namada_gas::{GasMetering, VpGasMeter}; diff --git a/crates/vp/src/vp_host_fns.rs b/crates/vp/src/vp_host_fns.rs index bac8f03c7a..9634611c26 100644 --- a/crates/vp/src/vp_host_fns.rs +++ b/crates/vp/src/vp_host_fns.rs @@ -6,10 +6,9 @@ use std::num::TryFromIntError; use namada_core::address::{Address, ESTABLISHED_ADDRESS_BYTES_LEN}; use namada_core::arith::{self, checked}; +use namada_core::chain::{BlockHeight, Epoch, Epochs, Header}; use namada_core::hash::{Hash, HASH_LENGTH}; -use namada_core::storage::{ - BlockHeight, Epoch, Epochs, Header, Key, TxIndex, TX_INDEX_LENGTH, -}; +use namada_core::storage::{Key, TxIndex, TX_INDEX_LENGTH}; use namada_events::{Event, EventTypeBuilder}; use namada_gas as gas; use namada_gas::{GasMetering, VpGasMeter, MEMORY_ACCESS_GAS_PER_BYTE}; diff --git a/crates/vp_env/src/lib.rs b/crates/vp_env/src/lib.rs index 7751632299..d750e8dbdf 100644 --- a/crates/vp_env/src/lib.rs +++ b/crates/vp_env/src/lib.rs @@ -22,8 +22,9 @@ pub mod collection_validation; use namada_core::address::Address; use namada_core::borsh::BorshDeserialize; +use namada_core::chain::{BlockHeight, Epoch, Epochs, Header}; use namada_core::hash::Hash; -use namada_core::storage::{BlockHeight, Epoch, Epochs, Header, Key, TxIndex}; +use namada_core::storage::{Key, TxIndex}; use namada_events::{Event, EventType}; use namada_storage::StorageRead; use namada_tx::BatchedTxRef; diff --git a/crates/vp_prelude/src/lib.rs b/crates/vp_prelude/src/lib.rs index 2c045467a6..9d48f9827c 100644 --- a/crates/vp_prelude/src/lib.rs +++ b/crates/vp_prelude/src/lib.rs @@ -28,11 +28,11 @@ pub use namada_core::address::Address; pub use namada_core::borsh::{ BorshDeserialize, BorshSerialize, BorshSerializeExt, }; -use namada_core::chain::CHAIN_ID_LENGTH; +use namada_core::chain::{BlockHeight, Epoch, Epochs, Header, CHAIN_ID_LENGTH}; pub use namada_core::collections::HashSet; use namada_core::hash::{Hash, HASH_LENGTH}; use namada_core::internal::HostEnvResult; -use namada_core::storage::{BlockHeight, Epoch, Epochs, Header, TxIndex}; +use namada_core::storage::TxIndex; pub use namada_core::validity_predicate::{VpError, VpErrorExtResult}; pub use namada_core::*; use namada_events::{Event, EventType}; @@ -318,7 +318,7 @@ impl<'view> VpEnv<'view> for Ctx { get_block_epoch() } - fn get_pred_epochs(&self) -> namada_storage::Result { + fn get_pred_epochs(&self) -> namada_storage::Result { // Both `CtxPreStorageRead` and `CtxPostStorageRead` have the same impl get_pred_epochs() } @@ -471,7 +471,7 @@ impl StorageRead for CtxPreStorageRead<'_> { get_block_epoch() } - fn get_pred_epochs(&self) -> namada_storage::Result { + fn get_pred_epochs(&self) -> namada_storage::Result { get_pred_epochs() } @@ -544,7 +544,7 @@ impl StorageRead for CtxPostStorageRead<'_> { get_block_epoch() } - fn get_pred_epochs(&self) -> namada_storage::Result { + fn get_pred_epochs(&self) -> namada_storage::Result { get_pred_epochs() } diff --git a/wasm/vp_implicit/src/lib.rs b/wasm/vp_implicit/src/lib.rs index c04a1c8127..cffc14fe06 100644 --- a/wasm/vp_implicit/src/lib.rs +++ b/wasm/vp_implicit/src/lib.rs @@ -258,11 +258,11 @@ mod tests { }; use namada_tests::vp::vp_host_env::storage::Key; use namada_tests::vp::*; + use namada_tx_prelude::chain::Epoch; // Use this as `#[test]` annotation to enable logging use namada_tx_prelude::dec::Dec; use namada_tx_prelude::proof_of_stake::parameters::OwnedPosParams; use namada_tx_prelude::proof_of_stake::types::GenesisValidator; - use namada_tx_prelude::storage::Epoch; use namada_tx_prelude::{StorageWrite, TxEnv}; use namada_vp_prelude::account::AccountPublicKeysMap; use namada_vp_prelude::key::RefTo; diff --git a/wasm/vp_user/src/lib.rs b/wasm/vp_user/src/lib.rs index ef577eac20..78544afca6 100644 --- a/wasm/vp_user/src/lib.rs +++ b/wasm/vp_user/src/lib.rs @@ -229,12 +229,12 @@ mod tests { }; use namada_tests::vp::vp_host_env::storage::Key; use namada_tests::vp::*; + use namada_tx_prelude::chain::Epoch; use namada_tx_prelude::dec::Dec; use namada_tx_prelude::proof_of_stake::parameters::{ OwnedPosParams, PosParams, }; use namada_tx_prelude::proof_of_stake::types::GenesisValidator; - use namada_tx_prelude::storage::Epoch; use namada_tx_prelude::{StorageWrite, TxEnv}; use namada_vp_prelude::account::AccountPublicKeysMap; use namada_vp_prelude::key::RefTo; From cac41c48c4a35cf5d4a3d63760f8ad0e11e16c9f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Mon, 19 Aug 2024 16:41:50 +0100 Subject: [PATCH 08/73] core: rename s/Header/BlockHeader --- crates/core/src/chain.rs | 8 ++++---- crates/ibc/src/actions.rs | 2 +- crates/ibc/src/vp/context.rs | 12 +++++++++--- crates/node/src/shell/finalize_block.rs | 4 ++-- crates/node/src/shell/mod.rs | 6 +++--- crates/node/src/shell/testing/node.rs | 6 +++--- crates/node/src/shims/abcipp_shim_types.rs | 6 +++--- crates/node/src/storage/rocksdb.rs | 7 +++++-- crates/parameters/src/lib.rs | 12 ++++++------ crates/sdk/src/queries/shell.rs | 6 +++--- crates/state/src/in_memory.rs | 6 +++--- crates/state/src/lib.rs | 6 +++--- crates/state/src/wl_state.rs | 4 ++-- crates/storage/src/db.rs | 9 ++++++--- crates/storage/src/lib.rs | 15 ++++++++++----- crates/storage/src/mockdb.rs | 7 +++++-- crates/test_utils/src/ibc.rs | 4 ++-- crates/tx_prelude/src/lib.rs | 6 +++--- crates/vp/src/native_vp.rs | 10 +++++----- crates/vp/src/vp_host_fns.rs | 4 ++-- crates/vp_env/src/lib.rs | 4 ++-- crates/vp_prelude/src/lib.rs | 14 ++++++++------ 22 files changed, 90 insertions(+), 68 deletions(-) diff --git a/crates/core/src/chain.rs b/crates/core/src/chain.rs index 22774dff61..419089d14d 100644 --- a/crates/core/src/chain.rs +++ b/crates/core/src/chain.rs @@ -523,7 +523,7 @@ impl Epochs { #[derive( Clone, Debug, BorshSerialize, BorshDeserialize, BorshDeserializer, Default, )] -pub struct Header { +pub struct BlockHeader { /// Merkle root hash of block pub hash: Hash, /// Timestamp associated to block @@ -532,7 +532,7 @@ pub struct Header { pub next_validators_hash: Hash, } -impl Header { +impl BlockHeader { /// The number of bytes when this header is encoded pub fn encoded_len(&self) -> usize { self.serialize_to_vec().len() @@ -729,9 +729,9 @@ pub mod testing { } /// A dummy header used for testing - pub fn get_dummy_header() -> Header { + pub fn get_dummy_header() -> BlockHeader { use crate::time::DurationSecs; - Header { + BlockHeader { hash: Hash([0; 32]), #[allow( clippy::disallowed_methods, diff --git a/crates/ibc/src/actions.rs b/crates/ibc/src/actions.rs index 74e5cd2bd2..51400163a7 100644 --- a/crates/ibc/src/actions.rs +++ b/crates/ibc/src/actions.rs @@ -79,7 +79,7 @@ where fn get_block_header( &self, height: BlockHeight, - ) -> StorageResult> { + ) -> StorageResult> { StorageRead::get_block_header(self.state, height) } diff --git a/crates/ibc/src/vp/context.rs b/crates/ibc/src/vp/context.rs index 95c5698e21..e196b3ee9b 100644 --- a/crates/ibc/src/vp/context.rs +++ b/crates/ibc/src/vp/context.rs @@ -5,7 +5,7 @@ use std::marker::PhantomData; use namada_core::address::Address; use namada_core::arith::checked; -use namada_core::chain::{BlockHeight, Epoch, Epochs, Header}; +use namada_core::chain::{BlockHeader, BlockHeight, Epoch, Epochs}; use namada_core::collections::{HashMap, HashSet}; use namada_core::storage::{Key, TxIndex}; use namada_events::Event; @@ -152,7 +152,10 @@ Self: 'iter; self.ctx.get_block_height() } - fn get_block_header(&self, height: BlockHeight) -> Result> { + fn get_block_header( + &self, + height: BlockHeight, + ) -> Result> { self.ctx.get_block_header(height) } @@ -339,7 +342,10 @@ where self.ctx.get_block_height() } - fn get_block_header(&self, height: BlockHeight) -> Result> { + fn get_block_header( + &self, + height: BlockHeight, + ) -> Result> { self.ctx.get_block_header(height) } diff --git a/crates/node/src/shell/finalize_block.rs b/crates/node/src/shell/finalize_block.rs index ae72f4cc05..50eda190c8 100644 --- a/crates/node/src/shell/finalize_block.rs +++ b/crates/node/src/shell/finalize_block.rs @@ -20,7 +20,7 @@ use namada_sdk::state::write_log::StorageModification; use namada_sdk::state::{ ResultExt, StorageResult, StorageWrite, EPOCH_SWITCH_BLOCKS_DELAY, }; -use namada_sdk::storage::{BlockResults, Epoch, Header}; +use namada_sdk::storage::{BlockHeader, BlockResults, Epoch}; use namada_sdk::tx::data::protocol::ProtocolTxType; use namada_sdk::tx::data::VpStatusFlags; use namada_sdk::tx::event::{Batch, Code}; @@ -215,7 +215,7 @@ where /// validator changes, and evidence of byzantine behavior. Applies slashes /// if necessary. Returns a boolean indicating if a new epoch and the height /// of the new block. - fn update_state(&mut self, header: Header) -> (BlockHeight, bool) { + fn update_state(&mut self, header: BlockHeader) -> (BlockHeight, bool) { let height = self.state.in_mem().get_last_block_height().next_height(); self.state diff --git a/crates/node/src/shell/mod.rs b/crates/node/src/shell/mod.rs index 7330ebb692..fa29d58052 100644 --- a/crates/node/src/shell/mod.rs +++ b/crates/node/src/shell/mod.rs @@ -1462,7 +1462,7 @@ pub mod test_utils { use namada_sdk::proof_of_stake::storage::validator_consensus_key_handle; use namada_sdk::state::mockdb::MockDB; use namada_sdk::state::{LastBlock, StorageWrite}; - use namada_sdk::storage::{Epoch, Header}; + use namada_sdk::storage::{BlockHeader, Epoch}; use namada_sdk::tendermint::abci::types::VoteInfo; use tempfile::tempdir; use tokio::sync::mpsc::{Sender, UnboundedReceiver}; @@ -1904,7 +1904,7 @@ pub mod test_utils { impl Default for FinalizeBlock { fn default() -> Self { FinalizeBlock { - header: Header { + header: BlockHeader { hash: Hash([0; 32]), #[allow(clippy::disallowed_methods)] time: DateTimeUtc::now(), @@ -1965,7 +1965,7 @@ pub mod test_utils { byzantine_validators: Option>, ) { // Let the header time be always ahead of the next epoch min start time - let header = Header { + let header = BlockHeader { time: shell.state.in_mem().next_epoch_min_start_time.next_second(), ..Default::default() }; diff --git a/crates/node/src/shell/testing/node.rs b/crates/node/src/shell/testing/node.rs index 86af6d494d..24e324441a 100644 --- a/crates/node/src/shell/testing/node.rs +++ b/crates/node/src/shell/testing/node.rs @@ -11,7 +11,7 @@ use data_encoding::HEXUPPER; use itertools::Either; use lazy_static::lazy_static; use namada_sdk::address::Address; -use namada_sdk::chain::{BlockHeight, Epoch, Header}; +use namada_sdk::chain::{BlockHeader, BlockHeight, Epoch}; use namada_sdk::collections::HashMap; use namada_sdk::control_flow::time::Duration; use namada_sdk::eth_bridge::oracle::config::Config as OracleConfig; @@ -483,7 +483,7 @@ impl MockNode { }; // build finalize block abci request let req = FinalizeBlock { - header: Header { + header: BlockHeader { hash: Hash([0; 32]), #[allow(clippy::disallowed_methods)] time: DateTimeUtc::now(), @@ -602,7 +602,7 @@ impl MockNode { // process proposal succeeded, now run finalize block let req = FinalizeBlock { - header: Header { + header: BlockHeader { hash: Hash([0; 32]), #[allow(clippy::disallowed_methods)] time: DateTimeUtc::now(), diff --git a/crates/node/src/shims/abcipp_shim_types.rs b/crates/node/src/shims/abcipp_shim_types.rs index e6b847aa13..e7cde20221 100644 --- a/crates/node/src/shims/abcipp_shim_types.rs +++ b/crates/node/src/shims/abcipp_shim_types.rs @@ -170,7 +170,7 @@ pub mod shim { use bytes::Bytes; use namada_sdk::hash::Hash; - use namada_sdk::storage::Header; + use namada_sdk::storage::BlockHeader; use namada_sdk::tendermint::abci::types::CommitInfo; use namada_sdk::tendermint::account::Id; use namada_sdk::tendermint::block::Height; @@ -193,7 +193,7 @@ pub mod shim { #[derive(Debug, Clone)] pub struct FinalizeBlock { - pub header: Header, + pub header: BlockHeader, pub block_hash: Hash, pub byzantine_validators: Vec, pub txs: Vec, @@ -217,7 +217,7 @@ pub mod shim { fn from(req: tm_request::BeginBlock) -> FinalizeBlock { let header = req.header; FinalizeBlock { - header: Header { + header: BlockHeader { hash: Hash::try_from(header.app_hash.as_bytes()) .unwrap_or_default(), time: DateTimeUtc::try_from(header.time).unwrap(), diff --git a/crates/node/src/storage/rocksdb.rs b/crates/node/src/storage/rocksdb.rs index facb1f79f7..b7395e07c1 100644 --- a/crates/node/src/storage/rocksdb.rs +++ b/crates/node/src/storage/rocksdb.rs @@ -71,7 +71,7 @@ use namada_sdk::state::{ StoreType, DB, }; use namada_sdk::storage::{ - BlockHeight, DbColFam, Epoch, Header, Key, KeySeg, BLOCK_CF, DIFFS_CF, + BlockHeader, BlockHeight, DbColFam, Epoch, Key, KeySeg, BLOCK_CF, DIFFS_CF, REPLAY_PROTECTION_CF, ROLLBACK_CF, STATE_CF, SUBSPACE_CF, }; use namada_sdk::{decode, encode, ethereum_events}; @@ -1265,7 +1265,10 @@ impl DB for RocksDB { Ok(()) } - fn read_block_header(&self, height: BlockHeight) -> Result> { + fn read_block_header( + &self, + height: BlockHeight, + ) -> Result> { let block_cf = self.get_column_family(BLOCK_CF)?; let header_key = format!("{}/{BLOCK_HEADER_KEY_SEGMENT}", height.raw()); self.read_value(block_cf, header_key) diff --git a/crates/parameters/src/lib.rs b/crates/parameters/src/lib.rs index 5d644a7fbe..e5b68ac6cc 100644 --- a/crates/parameters/src/lib.rs +++ b/crates/parameters/src/lib.rs @@ -629,7 +629,7 @@ where #[cfg(test)] mod tests { - use namada_core::chain::Header; + use namada_core::chain::BlockHeader; use namada_core::time::DateTimeUtc; use namada_storage::testing::TestStorage; @@ -721,7 +721,7 @@ mod tests { storage.set_mock_block_header( height, - Header { + BlockHeader { time: DateTimeUtc::from_unix_timestamp(timestamp).unwrap(), ..Default::default() }, @@ -744,7 +744,7 @@ mod tests { storage.set_mock_block_header( height, - Header { + BlockHeader { time: DateTimeUtc::from_unix_timestamp(timestamp).unwrap(), ..Default::default() }, @@ -777,7 +777,7 @@ mod tests { storage.set_mock_block_header( height, - Header { + BlockHeader { time: DateTimeUtc::from_unix_timestamp(timestamp).unwrap(), ..Default::default() }, @@ -801,7 +801,7 @@ mod tests { for height in 1u64..=2 { storage.set_mock_block_header( BlockHeight(height), - Header { + BlockHeader { time: DateTimeUtc::unix_epoch(), ..Default::default() }, @@ -824,7 +824,7 @@ mod tests { storage.set_mock_block_header( height, - Header { + BlockHeader { time: DateTimeUtc::from_unix_timestamp(timestamp).unwrap(), ..Default::default() }, diff --git a/crates/sdk/src/queries/shell.rs b/crates/sdk/src/queries/shell.rs index 001c155853..52132f703c 100644 --- a/crates/sdk/src/queries/shell.rs +++ b/crates/sdk/src/queries/shell.rs @@ -10,7 +10,7 @@ use masp_primitives::sapling::Node; use namada_account::{Account, AccountPublicKeysMap}; use namada_core::address::Address; use namada_core::arith::checked; -use namada_core::chain::{BlockHeight, Epoch, Header}; +use namada_core::chain::{BlockHeader, BlockHeight, Epoch}; use namada_core::dec::Dec; use namada_core::hash::Hash; use namada_core::hints; @@ -120,7 +120,7 @@ router! {SHELL, ( "ibc_packet" / [event_type: IbcEventType] / [source_port: PortId] / [source_channel: ChannelId] / [destination_port: PortId] / [destination_channel: ChannelId] / [sequence: Sequence]) -> Option = ibc_packet, // Get the block header associated with the requested height - ( "block_header" / [height: BlockHeight] ) -> Option
= block_header, + ( "block_header" / [height: BlockHeight] ) -> Option = block_header, // Return an estimate of the maximum time taken to decide a block ( "max_block_time" ) -> DurationSecs = max_block_time, @@ -161,7 +161,7 @@ where fn block_header( ctx: RequestCtx<'_, D, H, V, T>, height: BlockHeight, -) -> namada_storage::Result> +) -> namada_storage::Result> where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, diff --git a/crates/state/src/in_memory.rs b/crates/state/src/in_memory.rs index d8d58406d3..a2807d520d 100644 --- a/crates/state/src/in_memory.rs +++ b/crates/state/src/in_memory.rs @@ -17,7 +17,7 @@ use namada_storage::conversion_state::ConversionState; use namada_storage::tx_queue::ExpiredTxsQueue; use namada_storage::types::CommitOnlyData; use namada_storage::{ - BlockHeight, BlockResults, Epoch, Epochs, EthEventsQueue, Header, Key, + BlockHeader, BlockHeight, BlockResults, Epoch, Epochs, EthEventsQueue, Key, KeySeg, StorageHasher, TxIndex, EPOCH_TYPE_LENGTH, }; @@ -39,7 +39,7 @@ where /// During `FinalizeBlock`, this is the header of the block that is /// going to be committed. After a block is committed, this is reset to /// `None` until the next `FinalizeBlock` phase is reached. - pub header: Option
, + pub header: Option, /// The most recently committed block, if any. pub last_block: Option, /// The epoch of the most recently committed block. If it is `Epoch(0)`, @@ -189,7 +189,7 @@ where /// Set the block header. /// The header is not in the Merkle tree as it's tracked by Tendermint. /// Hence, we don't update the tree when this is set. - pub fn set_header(&mut self, header: Header) -> Result<()> { + pub fn set_header(&mut self, header: BlockHeader) -> Result<()> { self.header = Some(header); Ok(()) } diff --git a/crates/state/src/lib.rs b/crates/state/src/lib.rs index 616573b0e2..f591dd13f3 100644 --- a/crates/state/src/lib.rs +++ b/crates/state/src/lib.rs @@ -33,7 +33,7 @@ pub use in_memory::{ use namada_core::address::Address; use namada_core::arith::{self, checked}; pub use namada_core::chain::{ - BlockHash, BlockHeight, Epoch, Epochs, Header, BLOCK_HASH_LENGTH, + BlockHash, BlockHeader, BlockHeight, Epoch, Epochs, BLOCK_HASH_LENGTH, BLOCK_HEIGHT_LENGTH, }; use namada_core::eth_bridge_pool::is_pending_transfer_key; @@ -167,7 +167,7 @@ pub trait StateRead: StorageRead + Debug { fn get_block_header( &self, height: Option, - ) -> Result<(Option
, u64)> { + ) -> Result<(Option, u64)> { match height { Some(h) if h == self.in_mem().get_block_height().0 => { let header = self.in_mem().header.clone(); @@ -313,7 +313,7 @@ macro_rules! impl_storage_read { fn get_block_header( &self, height: BlockHeight, - ) -> std::result::Result, namada_storage::Error> + ) -> std::result::Result, namada_storage::Error> { let (header, gas) = StateRead::get_block_header(self, Some(height)).into_storage_result()?; diff --git a/crates/state/src/wl_state.rs b/crates/state/src/wl_state.rs index df43a057df..5d498ffc6a 100644 --- a/crates/state/src/wl_state.rs +++ b/crates/state/src/wl_state.rs @@ -4,7 +4,7 @@ use std::ops::{Deref, DerefMut}; use namada_core::address::Address; use namada_core::arith::checked; use namada_core::borsh::BorshSerializeExt; -use namada_core::chain::{ChainId, Header}; +use namada_core::chain::ChainId; use namada_core::masp::MaspEpoch; use namada_core::parameters::{EpochDuration, Parameters}; use namada_core::storage; @@ -567,7 +567,7 @@ where #[cfg(any(test, feature = "testing", feature = "benches"))] { if self.in_mem.header.is_none() { - self.in_mem.header = Some(Header { + self.in_mem.header = Some(namada_core::chain::BlockHeader { hash: Hash::default(), #[allow(clippy::disallowed_methods)] time: DateTimeUtc::now(), diff --git a/crates/storage/src/db.rs b/crates/storage/src/db.rs index f69fa51386..ce8c36e644 100644 --- a/crates/storage/src/db.rs +++ b/crates/storage/src/db.rs @@ -2,7 +2,7 @@ use std::fmt::Debug; use std::num::TryFromIntError; use namada_core::address::EstablishedAddressGen; -use namada_core::chain::{BlockHeight, Epoch, Epochs, Header}; +use namada_core::chain::{BlockHeader, BlockHeight, Epoch, Epochs}; use namada_core::hash::{Error as HashError, Hash}; use namada_core::storage::{BlockResults, DbColFam, EthEventsQueue, Key}; use namada_core::time::DateTimeUtc; @@ -85,7 +85,7 @@ pub struct BlockStateWrite<'a> { /// Merkle tree stores pub merkle_tree_stores: MerkleTreeStoresWrite<'a>, /// Header of the block - pub header: Option<&'a Header>, + pub header: Option<&'a BlockHeader>, /// Height of the block pub height: BlockHeight, /// Time of the block @@ -154,7 +154,10 @@ pub trait DB: Debug { ) -> Result<()>; /// Read the block header with the given height from the DB - fn read_block_header(&self, height: BlockHeight) -> Result>; + fn read_block_header( + &self, + height: BlockHeight, + ) -> Result>; /// Read the merkle tree stores with the given epoch. If a store_type is /// given, it reads only the specified tree. Otherwise, it reads all diff --git a/crates/storage/src/lib.rs b/crates/storage/src/lib.rs index 93ca831a73..07379c5dbd 100644 --- a/crates/storage/src/lib.rs +++ b/crates/storage/src/lib.rs @@ -30,7 +30,9 @@ pub use db::{Error as DbError, Result as DbResult, *}; pub use error::{CustomError, Error, OptionExt, Result, ResultExt}; use namada_core::address::Address; use namada_core::borsh::{BorshDeserialize, BorshSerialize, BorshSerializeExt}; -pub use namada_core::chain::{BlockHash, BlockHeight, Epoch, Epochs, Header}; +pub use namada_core::chain::{ + BlockHash, BlockHeader, BlockHeight, Epoch, Epochs, +}; pub use namada_core::hash::StorageHasher; pub use namada_core::storage::*; @@ -97,7 +99,10 @@ pub trait StorageRead { fn get_block_height(&self) -> Result; /// Getting the block header. - fn get_block_header(&self, height: BlockHeight) -> Result>; + fn get_block_header( + &self, + height: BlockHeight, + ) -> Result>; /// Getting the block epoch. The epoch is that of the block to which the /// current transaction is being applied. @@ -324,7 +329,7 @@ pub mod testing { native_token: Address, conversion_state: ConversionState, merkle_tree_key_filter: fn(&Key) -> bool, - mock_block_headers: HashMap, + mock_block_headers: HashMap, } fn merklize_all_keys(_key: &Key) -> bool { @@ -353,7 +358,7 @@ pub mod testing { pub fn set_mock_block_header( &mut self, height: BlockHeight, - header: Header, + header: BlockHeader, ) { self.mock_block_headers.insert(height, header); } @@ -398,7 +403,7 @@ pub mod testing { fn get_block_header( &self, height: BlockHeight, - ) -> Result> { + ) -> Result> { Ok(self.mock_block_headers.get(&height).cloned()) } diff --git a/crates/storage/src/mockdb.rs b/crates/storage/src/mockdb.rs index 782b88701f..00d38d662b 100644 --- a/crates/storage/src/mockdb.rs +++ b/crates/storage/src/mockdb.rs @@ -8,7 +8,7 @@ use std::path::Path; use itertools::Either; use namada_core::borsh::{BorshDeserialize, BorshSerialize}; -use namada_core::chain::{BlockHeight, Epoch, Header}; +use namada_core::chain::{BlockHeader, BlockHeight, Epoch}; use namada_core::hash::Hash; use namada_core::storage::{DbColFam, Key, KeySeg, KEY_SEGMENT_SEPARATOR}; use namada_core::{decode, encode, ethereum_events}; @@ -281,7 +281,10 @@ impl DB for MockDB { Ok(()) } - fn read_block_header(&self, height: BlockHeight) -> Result> { + fn read_block_header( + &self, + height: BlockHeight, + ) -> Result> { let header_key = format!("{}/{BLOCK_HEADER_KEY_SEGMENT}", height.raw()); self.read_value(header_key) } diff --git a/crates/test_utils/src/ibc.rs b/crates/test_utils/src/ibc.rs index d9820ef2b1..50fc8c99a5 100644 --- a/crates/test_utils/src/ibc.rs +++ b/crates/test_utils/src/ibc.rs @@ -11,7 +11,7 @@ use namada_core::ibc::clients::tendermint::types::{ use namada_core::ibc::core::client::types::Height; use namada_core::ibc::primitives::proto::Any; use namada_state::ics23_specs::ibc_proof_specs; -use namada_state::{Header, Sha256Hasher}; +use namada_state::{BlockHeader, Sha256Hasher}; use prost::Message; pub fn make_new_client_state_bytes(height: u64) -> Vec { @@ -41,7 +41,7 @@ pub fn make_new_client_state_bytes(height: u64) -> Vec { Any::from(client_state).encode_to_vec() } -pub fn make_new_consensus_state_bytes(header: Header) -> Vec { +pub fn make_new_consensus_state_bytes(header: BlockHeader) -> Vec { let consensus_state: TmConsensusState = TmConsensusStateType { timestamp: header .time diff --git a/crates/tx_prelude/src/lib.rs b/crates/tx_prelude/src/lib.rs index ff1c25a0c6..8b8d9d409a 100644 --- a/crates/tx_prelude/src/lib.rs +++ b/crates/tx_prelude/src/lib.rs @@ -31,7 +31,7 @@ pub use namada_core::borsh::{ }; use namada_core::chain::CHAIN_ID_LENGTH; pub use namada_core::chain::{ - BlockHash, BlockHeight, Epoch, Header, BLOCK_HASH_LENGTH, + BlockHash, BlockHeader, BlockHeight, Epoch, BLOCK_HASH_LENGTH, }; pub use namada_core::ethereum_events::EthAddress; use namada_core::internal::HostEnvResult; @@ -182,11 +182,11 @@ impl StorageRead for Ctx { fn get_block_header( &self, height: BlockHeight, - ) -> Result, Error> { + ) -> Result, Error> { let read_result = unsafe { namada_tx_get_block_header(height.0) }; match read_from_buffer(read_result, namada_tx_result_buffer) { Some(value) => Ok(Some( - Header::try_from_slice(&value[..]) + BlockHeader::try_from_slice(&value[..]) .expect("The conversion shouldn't fail"), )), None => Ok(None), diff --git a/crates/vp/src/native_vp.rs b/crates/vp/src/native_vp.rs index 93ee12a23c..6ab01f7817 100644 --- a/crates/vp/src/native_vp.rs +++ b/crates/vp/src/native_vp.rs @@ -17,8 +17,8 @@ use namada_gas::{GasMetering, VpGasMeter}; use namada_state as state; use namada_state::prefix_iter::PrefixIterators; use namada_state::{ - BlockHeight, Epoch, Header, Key, ResultExt, StorageRead, StorageResult, - TxIndex, + BlockHeader, BlockHeight, Epoch, Key, ResultExt, StorageRead, + StorageResult, TxIndex, }; use namada_tx::{BatchedTxRef, Tx, TxCommitments}; pub use namada_vp_env::VpEnv; @@ -228,7 +228,7 @@ where fn get_block_header( &self, height: BlockHeight, - ) -> Result, state::StorageError> { + ) -> Result, state::StorageError> { self.ctx.get_block_header(height) } @@ -306,7 +306,7 @@ where fn get_block_header( &self, height: BlockHeight, - ) -> Result, state::StorageError> { + ) -> Result, state::StorageError> { self.ctx.get_block_header(height) } @@ -375,7 +375,7 @@ where fn get_block_header( &self, height: BlockHeight, - ) -> Result, state::StorageError> { + ) -> Result, state::StorageError> { vp_host_fns::get_block_header(self.gas_meter, self.state, height) .into_storage_result() } diff --git a/crates/vp/src/vp_host_fns.rs b/crates/vp/src/vp_host_fns.rs index 9634611c26..35cb825825 100644 --- a/crates/vp/src/vp_host_fns.rs +++ b/crates/vp/src/vp_host_fns.rs @@ -6,7 +6,7 @@ use std::num::TryFromIntError; use namada_core::address::{Address, ESTABLISHED_ADDRESS_BYTES_LEN}; use namada_core::arith::{self, checked}; -use namada_core::chain::{BlockHeight, Epoch, Epochs, Header}; +use namada_core::chain::{BlockHeader, BlockHeight, Epoch, Epochs}; use namada_core::hash::{Hash, HASH_LENGTH}; use namada_core::storage::{Key, TxIndex, TX_INDEX_LENGTH}; use namada_events::{Event, EventTypeBuilder}; @@ -244,7 +244,7 @@ pub fn get_block_header( gas_meter: &RefCell, state: &S, height: BlockHeight, -) -> EnvResult> +) -> EnvResult> where S: StateRead + Debug, { diff --git a/crates/vp_env/src/lib.rs b/crates/vp_env/src/lib.rs index d750e8dbdf..36cc0f1575 100644 --- a/crates/vp_env/src/lib.rs +++ b/crates/vp_env/src/lib.rs @@ -22,7 +22,7 @@ pub mod collection_validation; use namada_core::address::Address; use namada_core::borsh::BorshDeserialize; -use namada_core::chain::{BlockHeight, Epoch, Epochs, Header}; +use namada_core::chain::{BlockHeader, BlockHeight, Epoch, Epochs}; use namada_core::hash::Hash; use namada_core::storage::{Key, TxIndex}; use namada_events::{Event, EventType}; @@ -77,7 +77,7 @@ where fn get_block_header( &self, height: BlockHeight, - ) -> Result, namada_storage::Error>; + ) -> Result, namada_storage::Error>; /// Getting the block epoch. The epoch is that of the block to which the /// current transaction is being applied. diff --git a/crates/vp_prelude/src/lib.rs b/crates/vp_prelude/src/lib.rs index 9d48f9827c..8fd7e70e0a 100644 --- a/crates/vp_prelude/src/lib.rs +++ b/crates/vp_prelude/src/lib.rs @@ -28,7 +28,9 @@ pub use namada_core::address::Address; pub use namada_core::borsh::{ BorshDeserialize, BorshSerialize, BorshSerializeExt, }; -use namada_core::chain::{BlockHeight, Epoch, Epochs, Header, CHAIN_ID_LENGTH}; +use namada_core::chain::{ + BlockHeader, BlockHeight, Epoch, Epochs, CHAIN_ID_LENGTH, +}; pub use namada_core::collections::HashSet; use namada_core::hash::{Hash, HASH_LENGTH}; use namada_core::internal::HostEnvResult; @@ -308,7 +310,7 @@ impl<'view> VpEnv<'view> for Ctx { fn get_block_header( &self, height: BlockHeight, - ) -> Result, StorageError> { + ) -> Result, StorageError> { // Both `CtxPreStorageRead` and `CtxPostStorageRead` have the same impl get_block_header(height) } @@ -463,7 +465,7 @@ impl StorageRead for CtxPreStorageRead<'_> { fn get_block_header( &self, height: BlockHeight, - ) -> Result, StorageError> { + ) -> Result, StorageError> { get_block_header(height) } @@ -536,7 +538,7 @@ impl StorageRead for CtxPostStorageRead<'_> { fn get_block_header( &self, height: BlockHeight, - ) -> Result, StorageError> { + ) -> Result, StorageError> { get_block_header(height) } @@ -596,11 +598,11 @@ fn get_block_height() -> Result { fn get_block_header( height: BlockHeight, -) -> Result, StorageError> { +) -> Result, StorageError> { let read_result = unsafe { namada_vp_get_block_header(height.0) }; match read_from_buffer(read_result, namada_vp_result_buffer) { Some(value) => Ok(Some( - Header::try_from_slice(&value[..]) + BlockHeader::try_from_slice(&value[..]) .expect("The conversion shouldn't fail"), )), None => Ok(None), From de9eac80dfa27bb660e77d64864717c1ac1769f1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Mon, 19 Aug 2024 16:55:53 +0100 Subject: [PATCH 09/73] core: update crate description --- crates/core/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/core/src/lib.rs b/crates/core/src/lib.rs index 2ea47ec655..377d30737d 100644 --- a/crates/core/src/lib.rs +++ b/crates/core/src/lib.rs @@ -1,4 +1,4 @@ -//! The core public types, storage_api, VpEnv and TxEnv. +//! The core Namada types, helpers and re-exported dependencies. #![doc(html_favicon_url = "https://dev.namada.net/master/favicon.png")] #![doc(html_logo_url = "https://dev.namada.net/master/rustdoc-logo.png")] From 23af704b41f5550b6401736905ae560640d6fd1f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Mon, 19 Aug 2024 17:06:50 +0100 Subject: [PATCH 10/73] replay_protection: re-export storage Key --- crates/replay_protection/src/lib.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/crates/replay_protection/src/lib.rs b/crates/replay_protection/src/lib.rs index 5772e2cb15..7b554f0261 100644 --- a/crates/replay_protection/src/lib.rs +++ b/crates/replay_protection/src/lib.rs @@ -19,7 +19,8 @@ use namada_core::address::{Address, InternalAddress}; use namada_core::hash::Hash; -use namada_core::storage::{DbKeySeg, Key}; +use namada_core::storage::DbKeySeg; +pub use namada_core::storage::Key; const ERROR_MSG: &str = "Cannot obtain a valid db key"; From 7756c4afe4113042df373820d15e7d04e0cf1661 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Mon, 19 Aug 2024 17:17:24 +0100 Subject: [PATCH 11/73] merkle_tree: re-export types in pub api --- crates/merkle_tree/src/eth_bridge_pool.rs | 6 ++++-- crates/merkle_tree/src/lib.rs | 15 +++++++++------ 2 files changed, 13 insertions(+), 8 deletions(-) diff --git a/crates/merkle_tree/src/eth_bridge_pool.rs b/crates/merkle_tree/src/eth_bridge_pool.rs index 588bba1019..e4bfed3c96 100644 --- a/crates/merkle_tree/src/eth_bridge_pool.rs +++ b/crates/merkle_tree/src/eth_bridge_pool.rs @@ -6,15 +6,17 @@ use eyre::eyre; use namada_core::borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; use namada_core::chain::BlockHeight; use namada_core::eth_abi::{Encode, Token}; -use namada_core::eth_bridge_pool::PendingTransfer; +pub use namada_core::eth_bridge_pool::PendingTransfer; use namada_core::hash::Hash; -use namada_core::keccak::{keccak_hash, KeccakHash}; +use namada_core::keccak::keccak_hash; use namada_core::storage; use namada_core::storage::DbKeySeg; use namada_macros::BorshDeserializer; #[cfg(feature = "migrations")] use namada_migrations::*; +use crate::KeccakHash; + #[derive(thiserror::Error, Debug)] #[error(transparent)] /// Generic error that may be returned by the validity predicate diff --git a/crates/merkle_tree/src/lib.rs b/crates/merkle_tree/src/lib.rs index b8aa33129c..f9f2f37e49 100644 --- a/crates/merkle_tree/src/lib.rs +++ b/crates/merkle_tree/src/lib.rs @@ -25,22 +25,25 @@ use std::str::FromStr; use arse_merkle_tree::default_store::DefaultStore; use arse_merkle_tree::error::Error as MtError; +pub use arse_merkle_tree::H256; use arse_merkle_tree::{ - Hash as SmtHash, Key as TreeKey, SparseMerkleTree as ArseMerkleTree, H256, + Hash as SmtHash, Key as TreeKey, SparseMerkleTree as ArseMerkleTree, }; use eth_bridge_pool::{BridgePoolProof, BridgePoolTree}; use ics23::commitment_proof::Proof as Ics23Proof; -use ics23::{CommitmentProof, ExistenceProof, NonExistenceProof}; +pub use ics23::CommitmentProof; +use ics23::{ExistenceProof, NonExistenceProof}; use ics23_specs::ibc_leaf_spec; use namada_core::address::{Address, InternalAddress}; use namada_core::borsh::{BorshDeserialize, BorshSerialize, BorshSerializeExt}; use namada_core::bytes::ByteBuf; -use namada_core::chain::{BlockHeight, Epoch}; +pub use namada_core::chain::{BlockHeight, Epoch}; use namada_core::eth_bridge_pool::{is_pending_transfer_key, PendingTransfer}; -use namada_core::hash::{Hash, StorageHasher}; -use namada_core::keccak::KeccakHash; +pub use namada_core::hash::{Hash, StorageHasher}; +pub use namada_core::keccak::KeccakHash; +pub use namada_core::storage::Key; use namada_core::storage::{ - self, DbKeySeg, Error as StorageError, Key, KeySeg, StringKey, TreeBytes, + self, DbKeySeg, Error as StorageError, KeySeg, StringKey, TreeBytes, TreeKeyError, IBC_KEY_LIMIT, }; use namada_core::{decode, DecodeError}; From d68a5bfac9103c3bd7842b2299a931b7c12eac40 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Mon, 19 Aug 2024 17:26:54 +0100 Subject: [PATCH 12/73] storage: re-export types in pub api --- crates/storage/src/conversion_state.rs | 16 ++++++++-------- crates/storage/src/lib.rs | 2 +- crates/storage/src/tx_queue.rs | 2 +- crates/storage/src/types.rs | 2 +- 4 files changed, 11 insertions(+), 11 deletions(-) diff --git a/crates/storage/src/conversion_state.rs b/crates/storage/src/conversion_state.rs index 3031783c72..3f86dde72e 100644 --- a/crates/storage/src/conversion_state.rs +++ b/crates/storage/src/conversion_state.rs @@ -2,14 +2,14 @@ use std::collections::BTreeMap; -use namada_core::address::Address; +pub use namada_core::address::Address; use namada_core::borsh::{BorshDeserialize, BorshSerialize}; -use namada_core::masp::MaspEpoch; -use namada_core::masp_primitives::asset_type::AssetType; -use namada_core::masp_primitives::convert::AllowedConversion; -use namada_core::masp_primitives::merkle_tree::FrozenCommitmentTree; -use namada_core::masp_primitives::sapling; -use namada_core::token::{Denomination, MaspDigitPos}; +pub use namada_core::masp::MaspEpoch; +pub use namada_core::masp_primitives::asset_type::AssetType; +pub use namada_core::masp_primitives::convert::AllowedConversion; +pub use namada_core::masp_primitives::merkle_tree::FrozenCommitmentTree; +pub use namada_core::masp_primitives::sapling::Node as SaplingNode; +pub use namada_core::token::{Denomination, MaspDigitPos}; use namada_macros::BorshDeserializer; #[cfg(feature = "migrations")] use namada_migrations::*; @@ -39,7 +39,7 @@ pub struct ConversionState { /// The last amount of the native token distributed pub normed_inflation: Option, /// The tree currently containing all the conversions - pub tree: FrozenCommitmentTree, + pub tree: FrozenCommitmentTree, /// Map assets to their latest conversion and position in Merkle tree pub assets: BTreeMap, } diff --git a/crates/storage/src/lib.rs b/crates/storage/src/lib.rs index 07379c5dbd..c8fd5a9b36 100644 --- a/crates/storage/src/lib.rs +++ b/crates/storage/src/lib.rs @@ -33,7 +33,7 @@ use namada_core::borsh::{BorshDeserialize, BorshSerialize, BorshSerializeExt}; pub use namada_core::chain::{ BlockHash, BlockHeader, BlockHeight, Epoch, Epochs, }; -pub use namada_core::hash::StorageHasher; +pub use namada_core::hash::{Hash, StorageHasher}; pub use namada_core::storage::*; /// Common storage read interface diff --git a/crates/storage/src/tx_queue.rs b/crates/storage/src/tx_queue.rs index 1ee71134f6..547dfc4742 100644 --- a/crates/storage/src/tx_queue.rs +++ b/crates/storage/src/tx_queue.rs @@ -1,7 +1,7 @@ //! Transaction queue use namada_core::borsh::{BorshDeserialize, BorshSerialize}; -use namada_core::ethereum_events::EthereumEvent; +pub use namada_core::ethereum_events::EthereumEvent; use namada_macros::BorshDeserializer; #[cfg(feature = "migrations")] use namada_migrations::*; diff --git a/crates/storage/src/types.rs b/crates/storage/src/types.rs index e5f323bf09..741485e198 100644 --- a/crates/storage/src/types.rs +++ b/crates/storage/src/types.rs @@ -5,7 +5,7 @@ use std::collections::BTreeMap; use borsh::{BorshDeserialize, BorshSerialize}; use namada_core::borsh::BorshSerializeExt; use namada_core::hash::Hash; -use regex::Regex; +pub use regex::Regex; /// A key-value pair as raw bytes pub type KVBytes = (Box<[u8]>, Box<[u8]>); From 3139dd33bdaa7316da1fe862418d7730f8e62fcc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Mon, 19 Aug 2024 17:39:53 +0100 Subject: [PATCH 13/73] storage: update rustdocs --- crates/storage/src/lib.rs | 23 ++++++++--------------- 1 file changed, 8 insertions(+), 15 deletions(-) diff --git a/crates/storage/src/lib.rs b/crates/storage/src/lib.rs index c8fd5a9b36..fad41bdd21 100644 --- a/crates/storage/src/lib.rs +++ b/crates/storage/src/lib.rs @@ -1,5 +1,11 @@ -//! The common storage read trait is implemented in the storage, client RPC, tx -//! and VPs (both native and WASM). +//! This crate provides +//! +//! - [`StorageRead`] and [`StorageWrite`] (high-level) and [`DB`] (low-level) +//! traits +//! - `MockDB` [`DB`] implementation for testing +//! - [`collections`] with generic lazy collections for storage +//! - [`conversion_state`] for shielded token rewards +//! - helpers for storage iteration #![doc(html_favicon_url = "https://dev.namada.net/master/favicon.png")] #![doc(html_logo_url = "https://dev.namada.net/master/rustdoc-logo.png")] @@ -37,19 +43,6 @@ pub use namada_core::hash::{Hash, StorageHasher}; pub use namada_core::storage::*; /// Common storage read interface -/// -/// If you're using this trait and having compiler complaining about needing an -/// explicit lifetime parameter, simply use trait bounds with the following -/// syntax: -/// -/// ```rust,ignore -/// where -/// S: StorageRead -/// ``` -/// -/// If you want to know why this is needed, see the to-do task below. The -/// syntax for this relies on higher-rank lifetimes, see e.g. -/// . pub trait StorageRead { /// Storage read prefix iterator type PrefixIter<'iter> From c6b309ea31a4b04c32f80b7df77cb3f07c65ab2a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Mon, 19 Aug 2024 17:55:28 +0100 Subject: [PATCH 14/73] gas: re-export types in pub api --- crates/gas/src/storage.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/gas/src/storage.rs b/crates/gas/src/storage.rs index e86a08c8a8..088cc92b14 100644 --- a/crates/gas/src/storage.rs +++ b/crates/gas/src/storage.rs @@ -1,6 +1,6 @@ //! Gas storage keys -use namada_core::storage::Key; +pub use namada_core::storage::Key; const ERROR_MSG: &str = "Cannot obtain a valid db key"; From 39784a3b7466c6abb4fa7d42d7b82c13a9a93320 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Mon, 19 Aug 2024 17:57:47 +0100 Subject: [PATCH 15/73] tx_env: re-export types in pub api --- crates/tx_env/src/lib.rs | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/crates/tx_env/src/lib.rs b/crates/tx_env/src/lib.rs index 78bdf32717..7eb99caa85 100644 --- a/crates/tx_env/src/lib.rs +++ b/crates/tx_env/src/lib.rs @@ -18,11 +18,13 @@ clippy::print_stderr )] -use namada_core::address::Address; -use namada_core::borsh::{BorshDeserialize, BorshSerialize, BorshSerializeExt}; -use namada_core::storage; -use namada_events::{Event, EventToEmit, EventType}; -use namada_storage::{Result, ResultExt, StorageRead, StorageWrite}; +pub use namada_core::address::Address; +pub use namada_core::borsh::{ + BorshDeserialize, BorshSerialize, BorshSerializeExt, +}; +pub use namada_core::storage; +pub use namada_events::{Event, EventToEmit, EventType}; +pub use namada_storage::{Result, ResultExt, StorageRead, StorageWrite}; /// Transaction host functions pub trait TxEnv: StorageRead + StorageWrite { From 8d440c394929591e4bf8f39b97a0e419c059878c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Tue, 20 Aug 2024 14:10:42 +0100 Subject: [PATCH 16/73] tx: remove unnecessary "salt" feature --- crates/tx/Cargo.toml | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/crates/tx/Cargo.toml b/crates/tx/Cargo.toml index 6321b51434..d3a94971fa 100644 --- a/crates/tx/Cargo.toml +++ b/crates/tx/Cargo.toml @@ -13,9 +13,8 @@ repository.workspace = true version.workspace = true [features] -default = ["salt"] +default = [] testing = ["proptest", "namada_account/testing", "namada_core/testing"] -salt = ["rand_core"] migrations = [ "namada_migrations", "linkme", @@ -44,7 +43,7 @@ num-traits.workspace = true proptest = { workspace = true, optional = true } prost-types.workspace = true prost.workspace = true -rand_core = { workspace = true, optional = true, features = ["getrandom"] } +rand_core = { workspace = true, features = ["getrandom"] } serde.workspace = true serde_json.workspace = true sha2.workspace = true From 496e10e1bfdf726dd4cef705b49acb464167882a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Tue, 20 Aug 2024 14:13:38 +0100 Subject: [PATCH 17/73] vp_env: re-export types in pub api --- crates/vp_env/src/lib.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/vp_env/src/lib.rs b/crates/vp_env/src/lib.rs index 36cc0f1575..4a957a7064 100644 --- a/crates/vp_env/src/lib.rs +++ b/crates/vp_env/src/lib.rs @@ -22,11 +22,11 @@ pub mod collection_validation; use namada_core::address::Address; use namada_core::borsh::BorshDeserialize; -use namada_core::chain::{BlockHeader, BlockHeight, Epoch, Epochs}; +pub use namada_core::chain::{BlockHeader, BlockHeight, Epoch, Epochs}; use namada_core::hash::Hash; -use namada_core::storage::{Key, TxIndex}; +pub use namada_core::storage::{Key, TxIndex}; use namada_events::{Event, EventType}; -use namada_storage::StorageRead; +pub use namada_storage::StorageRead; use namada_tx::BatchedTxRef; /// Validity predicate's environment is available for native VPs and WASM VPs From c72d752d67799427c694d6023ab61c94c99e20d4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Tue, 20 Aug 2024 14:39:36 +0100 Subject: [PATCH 18/73] account: re-export types in pub api --- crates/account/src/lib.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/crates/account/src/lib.rs b/crates/account/src/lib.rs index 1916e3d976..6e49abf7ac 100644 --- a/crates/account/src/lib.rs +++ b/crates/account/src/lib.rs @@ -26,8 +26,10 @@ mod types; pub use auth::AccountPublicKeysMap; use borsh::{BorshDeserialize, BorshSerialize}; -use namada_core::address::Address; -use namada_core::key::common; +pub use namada_core::address::Address; +pub use namada_core::hash::Hash; +pub use namada_core::key::common; +pub use namada_core::storage::Key; use namada_macros::BorshDeserializer; #[cfg(feature = "migrations")] use namada_migrations::*; From 4c7363c5c0a37359edb2e0faffaf9bdd09e65188 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Tue, 20 Aug 2024 14:40:51 +0100 Subject: [PATCH 19/73] controller: re-export types in pub api --- crates/controller/src/lib.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/crates/controller/src/lib.rs b/crates/controller/src/lib.rs index 8dd5573808..4db54fc321 100644 --- a/crates/controller/src/lib.rs +++ b/crates/controller/src/lib.rs @@ -17,9 +17,10 @@ clippy::print_stderr )] -use namada_core::arith::{self, checked}; -use namada_core::dec::Dec; -use namada_core::uint::Uint; +pub use namada_core::arith; +use namada_core::arith::checked; +pub use namada_core::dec::Dec; +pub use namada_core::uint::Uint; use thiserror::Error; #[allow(missing_docs)] From d5e5baa7565459f1e138fc9cb1bfd00ad14340ae Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Tue, 20 Aug 2024 15:04:30 +0100 Subject: [PATCH 20/73] vp: re-export types in pub api --- crates/vp/src/lib.rs | 4 ++++ crates/vp/src/native_vp.rs | 31 +++++++++++++------------------ 2 files changed, 17 insertions(+), 18 deletions(-) diff --git a/crates/vp/src/lib.rs b/crates/vp/src/lib.rs index 554e045082..f9faf301e8 100644 --- a/crates/vp/src/lib.rs +++ b/crates/vp/src/lib.rs @@ -21,4 +21,8 @@ pub mod native_vp; pub mod vp_host_fns; +pub use namada_core::address::Address; +pub use namada_core::hash::Hash; +pub use namada_events::{Event, EventType}; +pub use namada_state as state; pub use namada_vp_env::VpEnv; diff --git a/crates/vp/src/native_vp.rs b/crates/vp/src/native_vp.rs index 6ab01f7817..513acd59b3 100644 --- a/crates/vp/src/native_vp.rs +++ b/crates/vp/src/native_vp.rs @@ -7,24 +7,19 @@ use std::collections::BTreeSet; use std::fmt::Debug; use std::marker::PhantomData; -use namada_core::address::Address; +use namada_core::borsh; use namada_core::borsh::BorshDeserialize; use namada_core::chain::Epochs; -use namada_core::hash::Hash; -use namada_core::{borsh, storage}; -use namada_events::{Event, EventType}; use namada_gas::{GasMetering, VpGasMeter}; -use namada_state as state; -use namada_state::prefix_iter::PrefixIterators; -use namada_state::{ - BlockHeader, BlockHeight, Epoch, Key, ResultExt, StorageRead, +use namada_tx::{BatchedTxRef, Tx, TxCommitments}; +use state::prefix_iter::PrefixIterators; +use state::{ + BlockHeader, BlockHeight, Epoch, Key, ResultExt, StateRead, StorageRead, StorageResult, TxIndex, }; -use namada_tx::{BatchedTxRef, Tx, TxCommitments}; -pub use namada_vp_env::VpEnv; -use state::StateRead; use super::vp_host_fns; +use crate::{state, Address, Event, EventType, Hash, VpEnv}; /// Possible error in a native VP host function call /// The `state::StorageError` may wrap the `vp_host_fns::RuntimeError` @@ -182,20 +177,20 @@ where fn read_bytes( &self, - key: &storage::Key, + key: &Key, ) -> Result>, state::StorageError> { vp_host_fns::read_pre(self.ctx.gas_meter, self.ctx.state, key) .into_storage_result() } - fn has_key(&self, key: &storage::Key) -> Result { + fn has_key(&self, key: &Key) -> Result { vp_host_fns::has_key_pre(self.ctx.gas_meter, self.ctx.state, key) .into_storage_result() } fn iter_prefix<'iter>( &'iter self, - prefix: &storage::Key, + prefix: &Key, ) -> Result, state::StorageError> { vp_host_fns::iter_prefix_pre( self.ctx.gas_meter, @@ -260,20 +255,20 @@ where fn read_bytes( &self, - key: &storage::Key, + key: &Key, ) -> Result>, state::StorageError> { vp_host_fns::read_post(self.ctx.gas_meter, self.ctx.state, key) .into_storage_result() } - fn has_key(&self, key: &storage::Key) -> Result { + fn has_key(&self, key: &Key) -> Result { vp_host_fns::has_key_post(self.ctx.gas_meter, self.ctx.state, key) .into_storage_result() } fn iter_prefix<'iter>( &'iter self, - prefix: &storage::Key, + prefix: &Key, ) -> Result, state::StorageError> { vp_host_fns::iter_prefix_post( self.ctx.gas_meter, @@ -494,7 +489,7 @@ where fn read_temp( &self, - key: &storage::Key, + key: &Key, ) -> Result, Self::Err> { VpEnv::read_temp(self, key) } From af68c80b0b534238d86796b6c38dfe1b3c687a2a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Tue, 20 Aug 2024 15:24:11 +0100 Subject: [PATCH 21/73] proof_of_stake: remove direct `namada_storage` dep --- Cargo.lock | 1 - crates/proof_of_stake/Cargo.toml | 1 - crates/proof_of_stake/src/epoched.rs | 83 ++++++------- crates/proof_of_stake/src/error.rs | 24 ++-- crates/proof_of_stake/src/lib.rs | 29 +++-- crates/proof_of_stake/src/queries.rs | 47 ++++---- crates/proof_of_stake/src/rewards.rs | 24 ++-- crates/proof_of_stake/src/slashing.rs | 60 +++++----- crates/proof_of_stake/src/storage.rs | 111 +++++++++--------- crates/proof_of_stake/src/storage_key.rs | 3 +- .../proof_of_stake/src/tests/state_machine.rs | 7 +- .../src/tests/state_machine_v2.rs | 5 +- .../src/tests/test_helper_fns.rs | 7 +- crates/proof_of_stake/src/tests/test_pos.rs | 5 +- .../src/tests/test_slash_and_redel.rs | 7 +- .../src/tests/test_validator.rs | 4 +- crates/proof_of_stake/src/types/mod.rs | 5 +- .../src/validator_set_update.rs | 35 +++--- wasm/Cargo.lock | 1 - wasm_for_tests/Cargo.lock | 1 - 20 files changed, 216 insertions(+), 244 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5df288af56..4b42df069a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5159,7 +5159,6 @@ dependencies = [ "namada_migrations", "namada_parameters", "namada_state", - "namada_storage", "namada_systems", "namada_trans_token", "namada_tx", diff --git a/crates/proof_of_stake/Cargo.toml b/crates/proof_of_stake/Cargo.toml index c53a567b33..09eadcbc56 100644 --- a/crates/proof_of_stake/Cargo.toml +++ b/crates/proof_of_stake/Cargo.toml @@ -29,7 +29,6 @@ namada_events = { path = "../events", default-features = false } namada_macros = { path = "../macros" } namada_migrations = { path = "../migrations", optional = true } namada_state = { path = "../state" } -namada_storage = { path = "../storage" } namada_systems = { path = "../systems" } namada_tx = { path = "../tx" } namada_vp = { path = "../vp" } diff --git a/crates/proof_of_stake/src/epoched.rs b/crates/proof_of_stake/src/epoched.rs index e4250676f9..02cb014ef7 100644 --- a/crates/proof_of_stake/src/epoched.rs +++ b/crates/proof_of_stake/src/epoched.rs @@ -12,13 +12,14 @@ use namada_core::storage; use namada_macros::BorshDeserializer; #[cfg(feature = "migrations")] use namada_migrations::*; -use namada_storage::collections::lazy_map::{LazyMap, NestedMap}; -use namada_storage::collections::{self, LazyCollection}; -use namada_storage::{StorageRead, StorageWrite}; +use namada_state::collections::{self, LazyCollection}; use namada_systems::governance; use crate::parameters::PosParams; -use crate::{read_pos_params, Epoch}; +use crate::{ + read_pos_params, Epoch, LazyMap, NestedMap, StorageRead, StorageResult, + StorageWrite, +}; /// Sub-key holding a lazy map in storage pub const LAZY_MAP_SUB_KEY: &str = "lazy_map"; @@ -82,7 +83,7 @@ where storage: &mut S, value: Data, current_epoch: Epoch, - ) -> namada_storage::Result<()> + ) -> StorageResult<()> where S: StorageWrite + StorageRead, { @@ -98,7 +99,7 @@ where storage: &S, epoch: Epoch, params: &PosParams, - ) -> namada_storage::Result> + ) -> StorageResult> where S: StorageRead, { @@ -140,7 +141,7 @@ where value: Data, current_epoch: Epoch, offset: u64, - ) -> namada_storage::Result<()> + ) -> StorageResult<()> where S: StorageWrite + StorageRead, Gov: governance::Read, @@ -156,7 +157,7 @@ where value: Data, current_epoch: Epoch, offset: u64, - ) -> namada_storage::Result<()> + ) -> StorageResult<()> where S: StorageWrite + StorageRead, { @@ -176,7 +177,7 @@ where storage: &mut S, params: &PosParams, current_epoch: Epoch, - ) -> namada_storage::Result<()> + ) -> StorageResult<()> where S: StorageWrite + StorageRead, { @@ -248,10 +249,7 @@ where .unwrap() } - fn get_last_update( - &self, - storage: &S, - ) -> namada_storage::Result> + fn get_last_update(&self, storage: &S) -> StorageResult> where S: StorageRead, { @@ -279,10 +277,7 @@ where .unwrap() } - fn get_oldest_epoch( - &self, - storage: &S, - ) -> namada_storage::Result> + fn get_oldest_epoch(&self, storage: &S) -> StorageResult> where S: StorageRead, { @@ -294,7 +289,7 @@ where &self, storage: &mut S, new_oldest_epoch: Epoch, - ) -> namada_storage::Result<()> + ) -> StorageResult<()> where S: StorageRead + StorageWrite, { @@ -325,11 +320,7 @@ where } /// Initialize new nested data at the given epoch. - pub fn init( - &self, - storage: &mut S, - epoch: Epoch, - ) -> namada_storage::Result<()> + pub fn init(&self, storage: &mut S, epoch: Epoch) -> StorageResult<()> where S: StorageWrite + StorageRead, { @@ -348,7 +339,7 @@ where pub fn get_last_update( &self, storage: &S, - ) -> namada_storage::Result> + ) -> StorageResult> where S: StorageRead, { @@ -361,7 +352,7 @@ where &self, storage: &mut S, current_epoch: Epoch, - ) -> namada_storage::Result<()> + ) -> StorageResult<()> where S: StorageWrite + StorageRead, { @@ -379,7 +370,7 @@ where pub fn get_oldest_epoch( &self, storage: &S, - ) -> namada_storage::Result> + ) -> StorageResult> where S: StorageRead, { @@ -391,7 +382,7 @@ where &self, storage: &mut S, new_oldest_epoch: Epoch, - ) -> namada_storage::Result<()> + ) -> StorageResult<()> where S: StorageRead + StorageWrite, { @@ -411,7 +402,7 @@ where storage: &mut S, params: &PosParams, current_epoch: Epoch, - ) -> namada_storage::Result<()> + ) -> StorageResult<()> where S: StorageRead + StorageWrite, { @@ -489,7 +480,7 @@ where storage: &mut S, value: Data, current_epoch: Epoch, - ) -> namada_storage::Result<()> + ) -> StorageResult<()> where S: StorageWrite + StorageRead, { @@ -504,7 +495,7 @@ where &self, storage: &S, epoch: Epoch, - ) -> namada_storage::Result> + ) -> StorageResult> where S: StorageRead, { @@ -517,7 +508,7 @@ where storage: &S, epoch: Epoch, params: &PosParams, - ) -> namada_storage::Result> + ) -> StorageResult> where S: StorageRead, { @@ -559,7 +550,7 @@ where value: Data, current_epoch: Epoch, offset: u64, - ) -> namada_storage::Result<()> + ) -> StorageResult<()> where S: StorageWrite + StorageRead, Gov: governance::Read, @@ -581,7 +572,7 @@ where value: Data, current_epoch: Epoch, offset: u64, - ) -> namada_storage::Result<()> + ) -> StorageResult<()> where S: StorageWrite + StorageRead, Gov: governance::Read, @@ -597,7 +588,7 @@ where value: Data, current_epoch: Epoch, offset: u64, - ) -> namada_storage::Result<()> + ) -> StorageResult<()> where S: StorageWrite + StorageRead, { @@ -615,7 +606,7 @@ where storage: &mut S, params: &PosParams, current_epoch: Epoch, - ) -> namada_storage::Result<()> + ) -> StorageResult<()> where S: StorageWrite + StorageRead, { @@ -699,7 +690,7 @@ where pub fn get_last_update( &self, storage: &S, - ) -> namada_storage::Result> + ) -> StorageResult> where S: StorageRead, { @@ -720,7 +711,7 @@ where pub fn to_hashmap( &self, storage: &S, - ) -> namada_storage::Result> + ) -> StorageResult> where S: StorageRead, { @@ -740,10 +731,7 @@ where .unwrap() } - fn get_oldest_epoch( - &self, - storage: &S, - ) -> namada_storage::Result> + fn get_oldest_epoch(&self, storage: &S) -> StorageResult> where S: StorageRead, { @@ -755,7 +743,7 @@ where &self, storage: &mut S, new_oldest_epoch: Epoch, - ) -> namada_storage::Result<()> + ) -> StorageResult<()> where S: StorageRead + StorageWrite, { @@ -1128,7 +1116,7 @@ mod test { use crate::types::GenesisValidator; #[test] - fn test_epoched_data_trimming() -> namada_storage::Result<()> { + fn test_epoched_data_trimming() -> StorageResult<()> { let mut s = init_storage()?; let key_prefix = storage::Key::parse("test").unwrap(); @@ -1199,7 +1187,7 @@ mod test { } #[test] - fn test_epoched_without_data_trimming() -> namada_storage::Result<()> { + fn test_epoched_without_data_trimming() -> StorageResult<()> { let mut s = init_storage()?; let key_prefix = storage::Key::parse("test").unwrap(); @@ -1267,7 +1255,7 @@ mod test { } #[test] - fn test_epoched_delta_data_trimming() -> namada_storage::Result<()> { + fn test_epoched_delta_data_trimming() -> StorageResult<()> { let mut s = init_storage()?; let key_prefix = storage::Key::parse("test").unwrap(); @@ -1340,8 +1328,7 @@ mod test { } #[test] - fn test_epoched_delta_without_data_trimming() -> namada_storage::Result<()> - { + fn test_epoched_delta_without_data_trimming() -> StorageResult<()> { let mut s = init_storage()?; // Nothing should ever get trimmed @@ -1425,7 +1412,7 @@ mod test { Ok(()) } - fn init_storage() -> namada_storage::Result { + fn init_storage() -> StorageResult { let mut s = TestState::default(); let gov_params = namada_governance::parameters::GovernanceParameters::default(); diff --git a/crates/proof_of_stake/src/error.rs b/crates/proof_of_stake/src/error.rs index 9c74bedfcd..ea4529d88a 100644 --- a/crates/proof_of_stake/src/error.rs +++ b/crates/proof_of_stake/src/error.rs @@ -6,8 +6,8 @@ use namada_core::chain::Epoch; use namada_core::dec::Dec; use thiserror::Error; -use crate::rewards; use crate::types::ValidatorState; +use crate::{rewards, StorageError}; #[allow(missing_docs)] #[derive(Error, Debug)] @@ -167,67 +167,67 @@ pub enum ConsensusKeyChangeError { MustBeEd25519, } -impl From for namada_storage::Error { +impl From for StorageError { fn from(err: BecomeValidatorError) -> Self { Self::new(err) } } -impl From for namada_storage::Error { +impl From for StorageError { fn from(err: BondError) -> Self { Self::new(err) } } -impl From for namada_storage::Error { +impl From for StorageError { fn from(err: UnbondError) -> Self { Self::new(err) } } -impl From for namada_storage::Error { +impl From for StorageError { fn from(err: CommissionRateChangeError) -> Self { Self::new(err) } } -impl From for namada_storage::Error { +impl From for StorageError { fn from(err: InflationError) -> Self { Self::new(err) } } -impl From for namada_storage::Error { +impl From for StorageError { fn from(err: UnjailValidatorError) -> Self { Self::new(err) } } -impl From for namada_storage::Error { +impl From for StorageError { fn from(err: RedelegationError) -> Self { Self::new(err) } } -impl From for namada_storage::Error { +impl From for StorageError { fn from(err: DeactivationError) -> Self { Self::new(err) } } -impl From for namada_storage::Error { +impl From for StorageError { fn from(err: ReactivationError) -> Self { Self::new(err) } } -impl From for namada_storage::Error { +impl From for StorageError { fn from(err: MetadataError) -> Self { Self::new(err) } } -impl From for namada_storage::Error { +impl From for StorageError { fn from(err: ConsensusKeyChangeError) -> Self { Self::new(err) } diff --git a/crates/proof_of_stake/src/lib.rs b/crates/proof_of_stake/src/lib.rs index ff846b45d3..89f520be66 100644 --- a/crates/proof_of_stake/src/lib.rs +++ b/crates/proof_of_stake/src/lib.rs @@ -47,12 +47,19 @@ pub use namada_core::chain::Epoch; use namada_core::collections::HashSet; pub use namada_core::dec::Dec; use namada_core::key::common; -pub use namada_core::storage::{Key, KeySeg}; use namada_core::tendermint::abci::types::Misbehavior; use namada_core::token; use namada_events::EmitEvents; -use namada_storage::collections::lazy_map::{self, Collectable, LazyMap}; -use namada_storage::{OptionExt, StorageRead, StorageWrite}; +pub use namada_state::collections::lazy_map::{ + self, Collectable, LazyMap, NestedMap, +}; +pub use namada_state::collections::lazy_set::{self, LazySet}; +pub use namada_state::collections::lazy_vec::{self, LazyVec}; +pub use namada_state::collections::LazyCollection; +pub use namada_state::{ + iter_prefix_bytes, Key, KeySeg, OptionExt, ResultExt, StorageError, + StorageRead, StorageResult, StorageWrite, +}; pub use namada_systems::proof_of_stake::*; use namada_systems::{governance, trans_token}; pub use parameters::{OwnedPosParams, PosParams}; @@ -223,7 +230,7 @@ where let prefix = bonds_for_source_prefix(address); match epoch { Some(epoch) => { - let iter = namada_storage::iter_prefix_bytes(storage, &prefix)?; + let iter = iter_prefix_bytes(storage, &prefix)?; for res in iter { let (key, _) = res?; if let Some((bond_id, bond_epoch)) = is_bond_key(&key) { @@ -237,7 +244,7 @@ where Ok(false) } None => { - let iter = namada_storage::iter_prefix_bytes(storage, &prefix)?; + let iter = iter_prefix_bytes(storage, &prefix)?; for res in iter { let (key, _) = res?; if let Some((bond_id, _epoch)) = is_bond_key(&key) { @@ -1207,7 +1214,7 @@ where .or_default() .insert(epoch, amount); } - Ok::<_, namada_storage::Error>((start, rbonds)) + Ok::<_, StorageError>((start, rbonds)) } else { for src_validator in &modified.validators_to_remove { if modified @@ -1319,14 +1326,14 @@ where let offset = offset_opt.unwrap_or(params.pipeline_len); if !address.is_established() { - return Err(namada_storage::Error::new_const( + return Err(StorageError::new_const( "The given address {address} is not established. Only an \ established address can become a validator.", )); } if is_validator(storage, address)? { - return Err(namada_storage::Error::new_const( + return Err(StorageError::new_const( "The given address is already a validator", )); } @@ -1334,7 +1341,7 @@ where // The address may not have any bonds if it is going to be initialized as a // validator if has_bonds::(storage, address)? { - return Err(namada_storage::Error::new_const( + return Err(StorageError::new_const( "The given address has delegations and therefore cannot become a \ validator. Unbond first.", )); @@ -2630,9 +2637,7 @@ where )? .to_uint() .ok_or_else(|| { - namada_storage::Error::SimpleMessage( - "Found negative liveness threshold", - ) + StorageError::SimpleMessage("Found negative liveness threshold") })? .as_u64(); diff --git a/crates/proof_of_stake/src/queries.rs b/crates/proof_of_stake/src/queries.rs index e8454c2913..e9e6a03f1e 100644 --- a/crates/proof_of_stake/src/queries.rs +++ b/crates/proof_of_stake/src/queries.rs @@ -10,10 +10,9 @@ use namada_core::collections::{HashMap, HashSet}; use namada_core::dec::Dec; use namada_core::key::common; use namada_core::token; -use namada_storage::collections::lazy_map::{NestedSubKey, SubKey}; -use namada_storage::StorageRead; use namada_systems::governance; +use crate::lazy_map::{NestedSubKey, SubKey}; use crate::slashing::{find_validator_slashes, get_slashed_amount}; use crate::storage::{ bond_handle, delegation_targets_handle, @@ -24,7 +23,10 @@ use crate::types::{ BondDetails, BondId, BondsAndUnbondsDetail, BondsAndUnbondsDetails, DelegationEpochs, Slash, UnbondDetails, }; -use crate::{raw_bond_amount, storage_key, PosParams}; +use crate::{ + iter_prefix_bytes, raw_bond_amount, storage_key, PosParams, StorageError, + StorageRead, StorageResult, +}; /// Find all validators to which a given bond `owner` (or source) has a /// delegation @@ -32,7 +34,7 @@ pub fn find_delegation_validators( storage: &S, owner: &Address, epoch: &Epoch, -) -> namada_storage::Result> +) -> StorageResult> where S: StorageRead, { @@ -85,7 +87,7 @@ pub fn find_delegations( storage: &S, owner: &Address, epoch: &Epoch, -) -> namada_storage::Result> +) -> StorageResult> where S: StorageRead, Gov: governance::Read, @@ -143,10 +145,7 @@ where } /// Find if the given source address has any bonds. -pub fn has_bonds( - storage: &S, - source: &Address, -) -> namada_storage::Result +pub fn has_bonds(storage: &S, source: &Address) -> StorageResult where S: StorageRead, Gov: governance::Read, @@ -161,7 +160,7 @@ pub fn find_bonds( storage: &S, source: &Address, validator: &Address, -) -> namada_storage::Result> +) -> StorageResult> where S: StorageRead, { @@ -176,7 +175,7 @@ pub fn find_unbonds( storage: &S, source: &Address, validator: &Address, -) -> namada_storage::Result> +) -> StorageResult> where S: StorageRead, { @@ -202,7 +201,7 @@ pub fn bonds_and_unbonds( storage: &S, source: Option
, validator: Option
, -) -> namada_storage::Result +) -> StorageResult where S: StorageRead, Gov: governance::Read, @@ -224,7 +223,7 @@ fn get_multiple_bonds_and_unbonds( params: &PosParams, source: Option
, validator: Option
, -) -> namada_storage::Result +) -> StorageResult where S: StorageRead, { @@ -245,8 +244,8 @@ where }; // We have to iterate raw bytes, cause the epoched data `last_update` field // gets matched here too - let mut raw_bonds = namada_storage::iter_prefix_bytes(storage, &prefix)? - .filter_map(|result| { + let mut raw_bonds = + iter_prefix_bytes(storage, &prefix)?.filter_map(|result| { if let Ok((key, val_bytes)) = result { if let Some((bond_id, start)) = storage_key::is_bond_key(&key) { if source.is_some() @@ -274,8 +273,8 @@ where Some(source) => storage_key::unbonds_for_source_prefix(source), None => storage_key::unbonds_prefix(), }; - let mut raw_unbonds = namada_storage::iter_prefix_bytes(storage, &prefix)? - .filter_map(|result| { + let mut raw_unbonds = + iter_prefix_bytes(storage, &prefix)?.filter_map(|result| { if let Ok((key, val_bytes)) = result { if let Some((bond_id, start, withdraw)) = storage_key::is_unbond_key(&key) @@ -332,7 +331,7 @@ where slashes, &mut applied_slashes, )); - Ok::<_, namada_storage::Error>(()) + Ok::<_, StorageError>(()) })?; raw_unbonds.try_for_each(|(bond_id, start, withdraw, amount)| { @@ -353,7 +352,7 @@ where slashes, &mut applied_slashes, )); - Ok::<_, namada_storage::Error>(()) + Ok::<_, StorageError>(()) })?; Ok(bonds_and_unbonds @@ -377,7 +376,7 @@ fn find_bonds_and_unbonds_details( params: &PosParams, source: Address, validator: Address, -) -> namada_storage::Result +) -> StorageResult where S: StorageRead, { @@ -555,7 +554,7 @@ pub fn get_validator_protocol_key( storage: &S, addr: &Address, epoch: Epoch, -) -> namada_storage::Result> +) -> StorageResult> where S: StorageRead, Gov: governance::Read, @@ -571,7 +570,7 @@ pub fn get_validator_eth_hot_key( storage: &S, validator: &Address, epoch: Epoch, -) -> namada_storage::Result> +) -> StorageResult> where S: StorageRead, Gov: governance::Read, @@ -588,7 +587,7 @@ pub fn read_validator_stake( storage: &S, validator: &Address, epoch: Epoch, -) -> namada_storage::Result +) -> StorageResult where S: StorageRead, Gov: governance::Read, @@ -603,7 +602,7 @@ pub fn get_consensus_validator_from_protocol_pk( storage: &S, pk: &common::PublicKey, epoch: Option, -) -> namada_storage::Result> +) -> StorageResult> where S: StorageRead, Gov: governance::Read, diff --git a/crates/proof_of_stake/src/rewards.rs b/crates/proof_of_stake/src/rewards.rs index c225840cc6..7a03f43d61 100644 --- a/crates/proof_of_stake/src/rewards.rs +++ b/crates/proof_of_stake/src/rewards.rs @@ -8,11 +8,10 @@ use namada_core::collections::{HashMap, HashSet}; use namada_core::dec::Dec; use namada_core::token; use namada_core::uint::{Uint, I256}; -use namada_storage::collections::lazy_map::NestedSubKey; -use namada_storage::{ResultExt, StorageRead, StorageWrite}; use namada_systems::{governance, parameters, trans_token}; use thiserror::Error; +use crate::lazy_map::NestedSubKey; use crate::storage::{ consensus_validator_set_handle, get_last_reward_claim_epoch, read_last_pos_inflation_amount, read_last_staked_ratio, read_pos_params, @@ -24,7 +23,8 @@ use crate::storage::{ use crate::types::{into_tm_voting_power, BondId, ValidatorState, VoteInfo}; use crate::{ bond_amounts_for_rewards, get_total_consensus_stake, staking_token_address, - storage, storage_key, InflationError, PosParams, + storage, storage_key, InflationError, PosParams, ResultExt, StorageRead, + StorageResult, StorageWrite, }; /// This is equal to 0.01. @@ -65,7 +65,7 @@ pub fn compute_inflation( epochs_per_year: u64, target_ratio: Dec, last_ratio: Dec, -) -> namada_storage::Result { +) -> StorageResult { let controller = PDController::new( total_native_amount.into(), max_reward_rate, @@ -175,7 +175,7 @@ pub(crate) fn log_block_rewards( height: BlockHeight, current_epoch: Epoch, new_epoch: bool, -) -> namada_storage::Result<()> +) -> StorageResult<()> where S: StorageWrite + StorageRead, Gov: governance::Read, @@ -217,7 +217,7 @@ pub(crate) fn log_block_rewards_aux( epoch: impl Into, proposer_address: &Address, votes: Vec, -) -> namada_storage::Result<()> +) -> StorageResult<()> where S: StorageRead + StorageWrite, Gov: governance::Read, @@ -359,7 +359,7 @@ pub fn apply_inflation( storage: &mut S, last_epoch: Epoch, num_blocks_in_last_epoch: u64, -) -> namada_storage::Result<()> +) -> StorageResult<()> where S: StorageRead + StorageWrite, Gov: governance::Read, @@ -441,7 +441,7 @@ pub fn update_rewards_products_and_mint_inflation( inflation: token::Amount, staking_token: &Address, total_native_tokens: token::Amount, -) -> namada_storage::Result<()> +) -> StorageResult<()> where S: StorageRead + StorageWrite, Token: trans_token::Write, @@ -559,7 +559,7 @@ pub fn compute_current_rewards_from_bonds( source: &Address, validator: &Address, current_epoch: Epoch, -) -> namada_storage::Result +) -> StorageResult where S: StorageRead, Gov: governance::Read, @@ -616,7 +616,7 @@ pub fn add_rewards_to_counter( source: &Address, validator: &Address, new_rewards: token::Amount, -) -> namada_storage::Result<()> +) -> StorageResult<()> where S: StorageRead + StorageWrite, { @@ -631,7 +631,7 @@ pub fn take_rewards_from_counter( storage: &mut S, source: &Address, validator: &Address, -) -> namada_storage::Result +) -> StorageResult where S: StorageRead + StorageWrite, { @@ -647,7 +647,7 @@ pub fn read_rewards_counter( storage: &S, source: &Address, validator: &Address, -) -> namada_storage::Result +) -> StorageResult where S: StorageRead, { diff --git a/crates/proof_of_stake/src/slashing.rs b/crates/proof_of_stake/src/slashing.rs index 4367561d10..b1a7613ed9 100644 --- a/crates/proof_of_stake/src/slashing.rs +++ b/crates/proof_of_stake/src/slashing.rs @@ -13,14 +13,10 @@ use namada_core::key::tm_raw_hash_to_string; use namada_core::tendermint::abci::types::{Misbehavior, MisbehaviorKind}; use namada_core::token; use namada_events::EmitEvents; -use namada_storage::collections::lazy_map::{ - Collectable, NestedMap, NestedSubKey, SubKey, -}; -use namada_storage::collections::LazyMap; -use namada_storage::{OptionExt, ResultExt, StorageRead, StorageWrite}; use namada_systems::governance; use crate::event::PosEvent; +use crate::lazy_map::{Collectable, NestedMap, NestedSubKey, SubKey}; use crate::storage::{ enqueued_slashes_handle, read_pos_params, read_validator_last_slash_epoch, read_validator_stake, total_bonded_handle, total_unbonded_handle, @@ -37,8 +33,10 @@ use crate::types::{ use crate::validator_set_update::update_validator_set; use crate::{ fold_and_slash_redelegated_bonds, get_total_consensus_stake, - jail_validator, storage, storage_key, types, EagerRedelegatedUnbonds, - FoldRedelegatedBondsResult, OwnedPosParams, PosParams, + iter_prefix_bytes, jail_validator, storage, storage_key, types, + EagerRedelegatedUnbonds, FoldRedelegatedBondsResult, LazyMap, OptionExt, + OwnedPosParams, PosParams, ResultExt, StorageError, StorageRead, + StorageResult, StorageWrite, }; /// Apply PoS slashes from the evidence @@ -48,7 +46,7 @@ pub(crate) fn record_slashes_from_evidence( pos_params: &PosParams, current_epoch: Epoch, validator_set_update_epoch: Epoch, -) -> namada_storage::Result<()> +) -> StorageResult<()> where S: StorageWrite + StorageRead, Gov: governance::Read, @@ -150,7 +148,7 @@ pub fn slash( slash_type: SlashType, validator: &Address, validator_set_update_epoch: Epoch, -) -> namada_storage::Result<()> +) -> StorageResult<()> where S: StorageRead + StorageWrite, Gov: governance::Read, @@ -210,7 +208,7 @@ pub fn process_slashes( storage: &mut S, events: &mut impl EmitEvents, current_epoch: Epoch, -) -> namada_storage::Result<()> +) -> StorageResult<()> where S: StorageRead + StorageWrite, Gov: governance::Read, @@ -409,7 +407,7 @@ pub fn slash_validator_redelegation( dest_total_redelegated_unbonded: &TotalRedelegatedUnbonded, slash_rate: Dec, dest_slashed_amounts: &mut BTreeMap, -) -> namada_storage::Result<()> +) -> StorageResult<()> where S: StorageRead, { @@ -467,7 +465,7 @@ pub fn slash_redelegation( total_redelegated_unbonded: &TotalRedelegatedUnbonded, slash_rate: Dec, slashed_amounts: &mut BTreeMap, -) -> namada_storage::Result<()> +) -> StorageResult<()> where S: StorageRead, { @@ -496,9 +494,9 @@ where .at(src_validator) .get(storage, &bond_start)? .unwrap_or_default(); - Ok::<_, namada_storage::Error>(redelegated_unbonded) + Ok::<_, StorageError>(redelegated_unbonded) }) - .collect::>()?; + .collect::>()?; let mut init_tot_unbonded = token::Amount::sum(redelegated_unbonded.into_iter()) .ok_or_err_msg("token amount overflow")?; @@ -588,7 +586,7 @@ pub fn slash_validator( slash_rate: Dec, current_epoch: Epoch, slashed_amounts_map: &BTreeMap, -) -> namada_storage::Result> +) -> StorageResult> where S: StorageRead, { @@ -651,9 +649,7 @@ where redelegated_bonds.get(bond_start), slash_rate, )?; - Ok::(checked!( - acc + slashed - )?) + Ok::(checked!(acc + slashed)?) }, )?; @@ -720,7 +716,7 @@ pub fn compute_bond_at_epoch( start: Epoch, amount: token::Amount, redelegated_bonds: Option<&EagerRedelegatedBondsMap>, -) -> namada_storage::Result +) -> StorageResult where S: StorageRead, { @@ -777,7 +773,7 @@ pub fn compute_slash_bond_at_epoch( bond_amount: token::Amount, redelegated_bonds: Option<&EagerRedelegatedBondsMap>, slash_rate: Dec, -) -> namada_storage::Result +) -> StorageResult where S: StorageRead, { @@ -811,7 +807,7 @@ pub fn find_slashes_in_range( start: Epoch, end: Option, validator: &Address, -) -> namada_storage::Result> +) -> StorageResult> where S: StorageRead, { @@ -880,12 +876,12 @@ pub fn compute_slashable_amount( /// Find all slashes and the associated validators in the PoS system pub fn find_all_slashes( storage: &S, -) -> namada_storage::Result>> +) -> StorageResult>> where S: StorageRead, { let mut slashes: HashMap> = HashMap::new(); - let slashes_iter = namada_storage::iter_prefix_bytes( + let slashes_iter = iter_prefix_bytes( storage, &storage_key::slashes_prefix(), )? @@ -919,7 +915,7 @@ where pub fn find_all_enqueued_slashes( storage: &S, epoch: Epoch, -) -> namada_storage::Result>>> +) -> StorageResult>>> where S: StorageRead, { @@ -954,7 +950,7 @@ where pub fn find_validator_slashes( storage: &S, validator: &Address, -) -> namada_storage::Result> +) -> StorageResult> where S: StorageRead, { @@ -968,7 +964,7 @@ pub fn get_slashed_amount( params: &PosParams, amount: token::Amount, slashes: &BTreeMap, -) -> namada_storage::Result { +) -> StorageResult { let mut updated_amount = amount; let mut computed_amounts = Vec::::new(); @@ -1020,7 +1016,7 @@ pub fn compute_amount_after_slashing_unbond( unbonds: &BTreeMap, redelegated_unbonds: &EagerRedelegatedUnbonds, slashes: Vec, -) -> namada_storage::Result +) -> StorageResult where S: StorageRead, { @@ -1078,7 +1074,7 @@ pub fn compute_amount_after_slashing_withdraw( (token::Amount, EagerRedelegatedBondsMap), >, slashes: Vec, -) -> namada_storage::Result +) -> StorageResult where S: StorageRead, { @@ -1156,7 +1152,7 @@ fn process_validator_slash( slash_rate: Dec, current_epoch: Epoch, slashed_amount_map: &mut EagerRedelegatedBondsMap, -) -> namada_storage::Result<()> +) -> StorageResult<()> where S: StorageRead + StorageWrite, { @@ -1195,7 +1191,7 @@ where ) = res?; Ok(dest_validator) }) - .collect::>>()?; + .collect::>>()?; for dest_validator in dest_validators { let to_modify = slashed_amount_map @@ -1232,7 +1228,7 @@ fn compute_cubic_slash_rate( storage: &S, params: &PosParams, infraction_epoch: Epoch, -) -> namada_storage::Result +) -> StorageResult where S: StorageRead, { @@ -1273,7 +1269,7 @@ where let stake = Dec::try_from(validator_stake).into_storage_result()?; - Ok::(checked!(acc + stake)?) + Ok::(checked!(acc + stake)?) })?; sum_vp_fraction = checked!(sum_vp_fraction + (infracting_stake / consensus_stake))?; diff --git a/crates/proof_of_stake/src/storage.rs b/crates/proof_of_stake/src/storage.rs index 6457456f66..e75299d401 100644 --- a/crates/proof_of_stake/src/storage.rs +++ b/crates/proof_of_stake/src/storage.rs @@ -11,11 +11,9 @@ use namada_core::collections::HashSet; use namada_core::dec::Dec; use namada_core::key::{common, tm_consensus_key_raw_hash}; use namada_core::token; -use namada_storage::collections::lazy_map::NestedSubKey; -use namada_storage::collections::{LazyCollection, LazySet}; -use namada_storage::{Result, StorageRead, StorageWrite}; use namada_systems::governance; +use crate::lazy_map::NestedSubKey; use crate::storage_key::consensus_keys_key; use crate::types::{ BelowCapacityValidatorSets, BondId, Bonds, CommissionRates, @@ -30,7 +28,10 @@ use crate::types::{ ValidatorSetPositions, ValidatorState, ValidatorStates, ValidatorTotalUnbonded, WeightedValidator, }; -use crate::{storage_key, MetadataError, OwnedPosParams, PosParams}; +use crate::{ + storage_key, LazyCollection, LazySet, MetadataError, OwnedPosParams, + PosParams, StorageRead, StorageResult, StorageWrite, +}; // ---- Storage handles ---- @@ -260,9 +261,7 @@ pub fn delegation_targets_handle(delegator: &Address) -> DelegationTargets { // ---- Storage read + write ---- /// Read owned PoS parameters -pub fn read_owned_pos_params( - storage: &S, -) -> namada_storage::Result +pub fn read_owned_pos_params(storage: &S) -> StorageResult where S: StorageRead, { @@ -272,7 +271,7 @@ where } /// Read PoS parameters -pub fn read_pos_params(storage: &S) -> namada_storage::Result +pub fn read_pos_params(storage: &S) -> StorageResult where S: StorageRead, Gov: governance::Read, @@ -286,7 +285,7 @@ where pub fn read_non_pos_owned_params( storage: &S, owned: OwnedPosParams, -) -> namada_storage::Result +) -> StorageResult where S: StorageRead, Gov: governance::Read, @@ -302,7 +301,7 @@ where pub fn write_pos_params( storage: &mut S, params: &OwnedPosParams, -) -> namada_storage::Result<()> +) -> StorageResult<()> where S: StorageRead + StorageWrite, { @@ -314,7 +313,7 @@ where pub fn find_validator_by_raw_hash( storage: &S, raw_hash: impl AsRef, -) -> namada_storage::Result> +) -> StorageResult> where S: StorageRead, { @@ -327,7 +326,7 @@ pub fn write_validator_address_raw_hash( storage: &mut S, validator: &Address, consensus_key: &common::PublicKey, -) -> namada_storage::Result<()> +) -> StorageResult<()> where S: StorageRead + StorageWrite, { @@ -342,7 +341,7 @@ where pub fn read_validator_max_commission_rate_change( storage: &S, validator: &Address, -) -> namada_storage::Result> +) -> StorageResult> where S: StorageRead, { @@ -355,7 +354,7 @@ pub fn write_validator_max_commission_rate_change( storage: &mut S, validator: &Address, change: Dec, -) -> namada_storage::Result<()> +) -> StorageResult<()> where S: StorageRead + StorageWrite, { @@ -367,7 +366,7 @@ where pub fn read_validator_last_slash_epoch( storage: &S, validator: &Address, -) -> namada_storage::Result> +) -> StorageResult> where S: StorageRead, { @@ -380,7 +379,7 @@ pub fn write_validator_last_slash_epoch( storage: &mut S, validator: &Address, epoch: Epoch, -) -> namada_storage::Result<()> +) -> StorageResult<()> where S: StorageRead + StorageWrite, { @@ -391,7 +390,7 @@ where /// Read last block proposer address. pub fn read_last_block_proposer_address( storage: &S, -) -> namada_storage::Result> +) -> StorageResult> where S: StorageRead, { @@ -403,7 +402,7 @@ where pub fn write_last_block_proposer_address( storage: &mut S, address: Address, -) -> namada_storage::Result<()> +) -> StorageResult<()> where S: StorageRead + StorageWrite, { @@ -412,9 +411,7 @@ where } /// Read last epoch's staked ratio. -pub fn read_last_staked_ratio( - storage: &S, -) -> namada_storage::Result> +pub fn read_last_staked_ratio(storage: &S) -> StorageResult> where S: StorageRead, { @@ -426,7 +423,7 @@ where pub fn write_last_staked_ratio( storage: &mut S, ratio: Dec, -) -> namada_storage::Result<()> +) -> StorageResult<()> where S: StorageRead + StorageWrite, { @@ -437,7 +434,7 @@ where /// Read last epoch's PoS inflation amount. pub fn read_last_pos_inflation_amount( storage: &S, -) -> namada_storage::Result> +) -> StorageResult> where S: StorageRead, { @@ -449,7 +446,7 @@ where pub fn write_last_pos_inflation_amount( storage: &mut S, inflation: token::Amount, -) -> namada_storage::Result<()> +) -> StorageResult<()> where S: StorageRead + StorageWrite, { @@ -462,7 +459,7 @@ pub fn read_validator_state( storage: &S, validator: &Address, epoch: &Epoch, -) -> namada_storage::Result> +) -> StorageResult> where S: StorageRead, Gov: governance::Read, @@ -476,7 +473,7 @@ pub fn read_validator_deltas_value( storage: &S, validator: &Address, epoch: &namada_core::chain::Epoch, -) -> namada_storage::Result> +) -> StorageResult> where S: StorageRead, { @@ -492,7 +489,7 @@ pub fn read_validator_stake( params: &PosParams, validator: &Address, epoch: namada_core::chain::Epoch, -) -> namada_storage::Result +) -> StorageResult where S: StorageRead, { @@ -515,7 +512,7 @@ pub fn update_validator_deltas( delta: token::Change, current_epoch: namada_core::chain::Epoch, offset_opt: Option, -) -> namada_storage::Result<()> +) -> StorageResult<()> where S: StorageRead + StorageWrite, Gov: governance::Read, @@ -540,7 +537,7 @@ pub fn read_total_stake( storage: &S, params: &PosParams, epoch: namada_core::chain::Epoch, -) -> namada_storage::Result +) -> StorageResult where S: StorageRead, { @@ -560,7 +557,7 @@ pub fn read_total_active_stake( storage: &S, params: &PosParams, epoch: namada_core::chain::Epoch, -) -> namada_storage::Result +) -> StorageResult where S: StorageRead, { @@ -579,7 +576,7 @@ where pub fn read_consensus_validator_set_addresses( storage: &S, epoch: namada_core::chain::Epoch, -) -> namada_storage::Result> +) -> StorageResult> where S: StorageRead, { @@ -594,7 +591,7 @@ where pub fn read_below_capacity_validator_set_addresses( storage: &S, epoch: namada_core::chain::Epoch, -) -> namada_storage::Result> +) -> StorageResult> where S: StorageRead, { @@ -609,7 +606,7 @@ where pub fn read_below_threshold_validator_set_addresses( storage: &S, epoch: namada_core::chain::Epoch, -) -> namada_storage::Result> +) -> StorageResult> where S: StorageRead, Gov: governance::Read, @@ -632,7 +629,7 @@ where pub fn read_consensus_validator_set_addresses_with_stake( storage: &S, epoch: namada_core::chain::Epoch, -) -> namada_storage::Result> +) -> StorageResult> where S: StorageRead, { @@ -662,7 +659,7 @@ where pub fn get_num_consensus_validators( storage: &S, epoch: namada_core::chain::Epoch, -) -> namada_storage::Result +) -> StorageResult where S: StorageRead, { @@ -676,7 +673,7 @@ where pub fn read_below_capacity_validator_set_addresses_with_stake( storage: &S, epoch: namada_core::chain::Epoch, -) -> namada_storage::Result> +) -> StorageResult> where S: StorageRead, { @@ -706,7 +703,7 @@ where pub fn read_all_validator_addresses( storage: &S, epoch: namada_core::chain::Epoch, -) -> namada_storage::Result> +) -> StorageResult> where S: StorageRead, { @@ -725,7 +722,7 @@ pub fn update_total_deltas( current_epoch: namada_core::chain::Epoch, offset_opt: Option, update_active_voting_power: bool, -) -> namada_storage::Result<()> +) -> StorageResult<()> where S: StorageRead + StorageWrite, Gov: governance::Read, @@ -770,7 +767,7 @@ where pub fn read_validator_email( storage: &S, validator: &Address, -) -> namada_storage::Result> +) -> StorageResult> where S: StorageRead, { @@ -783,7 +780,7 @@ pub fn write_validator_email( storage: &mut S, validator: &Address, email: &String, -) -> namada_storage::Result<()> +) -> StorageResult<()> where S: StorageRead + StorageWrite, { @@ -799,7 +796,7 @@ where pub fn read_validator_description( storage: &S, validator: &Address, -) -> namada_storage::Result> +) -> StorageResult> where S: StorageRead, { @@ -812,7 +809,7 @@ pub fn write_validator_description( storage: &mut S, validator: &Address, description: &String, -) -> namada_storage::Result<()> +) -> StorageResult<()> where S: StorageRead + StorageWrite, { @@ -828,7 +825,7 @@ where pub fn read_validator_website( storage: &S, validator: &Address, -) -> namada_storage::Result> +) -> StorageResult> where S: StorageRead, { @@ -841,7 +838,7 @@ pub fn write_validator_website( storage: &mut S, validator: &Address, website: &String, -) -> namada_storage::Result<()> +) -> StorageResult<()> where S: StorageRead + StorageWrite, { @@ -857,7 +854,7 @@ where pub fn read_validator_discord_handle( storage: &S, validator: &Address, -) -> namada_storage::Result> +) -> StorageResult> where S: StorageRead, { @@ -870,7 +867,7 @@ pub fn write_validator_discord_handle( storage: &mut S, validator: &Address, discord_handle: &String, -) -> namada_storage::Result<()> +) -> StorageResult<()> where S: StorageRead + StorageWrite, { @@ -886,7 +883,7 @@ where pub fn read_validator_avatar( storage: &S, validator: &Address, -) -> namada_storage::Result> +) -> StorageResult> where S: StorageRead, { @@ -899,7 +896,7 @@ pub fn write_validator_avatar( storage: &mut S, validator: &Address, avatar: &String, -) -> namada_storage::Result<()> +) -> StorageResult<()> where S: StorageRead + StorageWrite, { @@ -915,7 +912,7 @@ where pub fn read_validator_name( storage: &S, validator: &Address, -) -> namada_storage::Result> +) -> StorageResult> where S: StorageRead, { @@ -928,7 +925,7 @@ pub fn write_validator_name( storage: &mut S, validator: &Address, validator_name: &String, -) -> namada_storage::Result<()> +) -> StorageResult<()> where S: StorageRead + StorageWrite, { @@ -944,7 +941,7 @@ pub fn write_validator_metadata( storage: &mut S, validator: &Address, metadata: &ValidatorMetaData, -) -> namada_storage::Result<()> +) -> StorageResult<()> where S: StorageRead + StorageWrite, { @@ -974,7 +971,7 @@ pub fn get_last_reward_claim_epoch( storage: &S, delegator: &Address, validator: &Address, -) -> namada_storage::Result> +) -> StorageResult> where S: StorageRead, { @@ -990,7 +987,7 @@ pub fn write_last_reward_claim_epoch( delegator: &Address, validator: &Address, epoch: Epoch, -) -> namada_storage::Result<()> +) -> StorageResult<()> where S: StorageRead + StorageWrite, { @@ -1006,7 +1003,7 @@ where pub fn try_insert_consensus_key( storage: &mut S, consensus_key: &common::PublicKey, -) -> namada_storage::Result<()> +) -> StorageResult<()> where S: StorageRead + StorageWrite, { @@ -1017,7 +1014,7 @@ where /// Get the unique set of consensus keys in storage pub fn get_consensus_key_set( storage: &S, -) -> namada_storage::Result> +) -> StorageResult> where S: StorageRead, { @@ -1030,7 +1027,7 @@ where pub fn is_consensus_key_used( storage: &S, consensus_key: &common::PublicKey, -) -> namada_storage::Result +) -> StorageResult where S: StorageRead, { @@ -1044,7 +1041,7 @@ pub fn get_consensus_key( storage: &S, addr: &Address, epoch: Epoch, -) -> namada_storage::Result> +) -> StorageResult> where S: StorageRead, Gov: governance::Read, diff --git a/crates/proof_of_stake/src/storage_key.rs b/crates/proof_of_stake/src/storage_key.rs index f590447ad6..57db98a2df 100644 --- a/crates/proof_of_stake/src/storage_key.rs +++ b/crates/proof_of_stake/src/storage_key.rs @@ -2,11 +2,10 @@ use namada_core::address::Address; use namada_core::storage::DbKeySeg; -use namada_storage::collections::{lazy_map, lazy_vec}; use super::ADDRESS; use crate::types::BondId; -use crate::{epoched, Epoch, Key, KeySeg}; +use crate::{epoched, lazy_map, lazy_vec, Epoch, Key, KeySeg}; const PARAMS_STORAGE_KEY: &str = "params"; const VALIDATOR_ADDRESSES_KEY: &str = "validator_addresses"; diff --git a/crates/proof_of_stake/src/tests/state_machine.rs b/crates/proof_of_stake/src/tests/state_machine.rs index adb1f1f9a8..1feb57fc3f 100644 --- a/crates/proof_of_stake/src/tests/state_machine.rs +++ b/crates/proof_of_stake/src/tests/state_machine.rs @@ -17,10 +17,6 @@ use namada_core::key::common::PublicKey; use namada_core::token::Change; use namada_governance::parameters::GovernanceParameters; use namada_state::testing::TestState; -use namada_storage::collections::lazy_map::{ - Collectable, NestedSubKey, SubKey, -}; -use namada_storage::StorageRead; use namada_trans_token::{self as token, read_balance}; use proptest::prelude::*; use proptest::test_runner::Config; @@ -31,6 +27,7 @@ use proptest_state_machine::{ // `tracing` logs from tests use test_log::test; +use crate::lazy_map::{Collectable, NestedSubKey, SubKey}; use crate::parameters::testing::arb_rate; use crate::parameters::PosParams; use crate::storage::{ @@ -55,7 +52,7 @@ use crate::{ is_validator_frozen, validator_deltas_handle, validator_slashes_handle, validator_state_handle, BondsForRemovalRes, EagerRedelegatedUnbonds, FoldRedelegatedBondsResult, ModifiedRedelegation, RedelegationError, - ResultSlashing, + ResultSlashing, StorageRead, }; prop_state_machine! { diff --git a/crates/proof_of_stake/src/tests/state_machine_v2.rs b/crates/proof_of_stake/src/tests/state_machine_v2.rs index b53d86919e..33fd072da9 100644 --- a/crates/proof_of_stake/src/tests/state_machine_v2.rs +++ b/crates/proof_of_stake/src/tests/state_machine_v2.rs @@ -18,8 +18,6 @@ use namada_core::key::common::PublicKey; use namada_core::token::Change; use namada_governance::parameters::GovernanceParameters; use namada_state::testing::TestState; -use namada_storage::collections::lazy_map::{NestedSubKey, SubKey}; -use namada_storage::StorageRead; use namada_trans_token::{self as token, read_balance}; use proptest::prelude::*; use proptest::test_runner::Config; @@ -33,6 +31,7 @@ use yansi::Paint; use super::helpers::advance_epoch; use super::utils::DbgPrintDiff; +use crate::lazy_map::{NestedSubKey, SubKey}; use crate::parameters::testing::arb_rate; use crate::parameters::PosParams; use crate::slashing::find_slashes_in_range; @@ -58,7 +57,7 @@ use crate::{ below_capacity_validator_set_handle, bond_handle, consensus_validator_set_handle, delegator_redelegated_bonds_handle, validator_deltas_handle, validator_slashes_handle, validator_state_handle, - RedelegationError, + RedelegationError, StorageRead, }; prop_state_machine! { diff --git a/crates/proof_of_stake/src/tests/test_helper_fns.rs b/crates/proof_of_stake/src/tests/test_helper_fns.rs index 651c4de7d5..4090fffe40 100644 --- a/crates/proof_of_stake/src/tests/test_helper_fns.rs +++ b/crates/proof_of_stake/src/tests/test_helper_fns.rs @@ -10,9 +10,8 @@ use namada_core::dec::Dec; use namada_core::storage::Key; use namada_core::token; use namada_state::testing::TestState; -use namada_storage::collections::lazy_map::NestedMap; -use namada_storage::collections::LazyCollection; +use crate::lazy_map::NestedMap; use crate::slashing::{ apply_list_slashes, compute_amount_after_slashing_unbond, compute_amount_after_slashing_withdraw, compute_bond_at_epoch, @@ -32,8 +31,8 @@ use crate::types::{ use crate::{ compute_modified_redelegation, compute_new_redelegated_unbonds, find_bonds_to_remove, fold_and_slash_redelegated_bonds, - EagerRedelegatedUnbonds, FoldRedelegatedBondsResult, ModifiedRedelegation, - OwnedPosParams, + EagerRedelegatedUnbonds, FoldRedelegatedBondsResult, LazyCollection, + ModifiedRedelegation, OwnedPosParams, }; /// `iterateBondsUpToAmountTest` diff --git a/crates/proof_of_stake/src/tests/test_pos.rs b/crates/proof_of_stake/src/tests/test_pos.rs index d6a430c796..2e1816fab1 100644 --- a/crates/proof_of_stake/src/tests/test_pos.rs +++ b/crates/proof_of_stake/src/tests/test_pos.rs @@ -13,8 +13,6 @@ use namada_core::key::testing::{common_sk_from_simple_seed, gen_keypair}; use namada_core::key::RefTo; use namada_core::{address, key}; use namada_state::testing::TestState; -use namada_storage::collections::lazy_map::Collectable; -use namada_storage::StorageRead; use namada_trans_token::{ self as token, credit_tokens, get_effective_total_native_supply, read_balance, @@ -26,6 +24,7 @@ use proptest::test_runner::Config; use test_log::test; use crate::epoched::EpochOffset; +use crate::lazy_map::Collectable; use crate::parameters::testing::arb_pos_params; use crate::parameters::OwnedPosParams; use crate::queries::find_delegation_validators; @@ -61,7 +60,7 @@ use crate::{ consensus_validator_set_handle, is_delegator, is_validator, jail_for_liveness, read_validator_stake, staking_token_address, unbond_handle, validator_consensus_key_handle, - validator_set_positions_handle, validator_state_handle, + validator_set_positions_handle, validator_state_handle, StorageRead, }; proptest! { diff --git a/crates/proof_of_stake/src/tests/test_slash_and_redel.rs b/crates/proof_of_stake/src/tests/test_slash_and_redel.rs index 4306a016e4..ab404cad4d 100644 --- a/crates/proof_of_stake/src/tests/test_slash_and_redel.rs +++ b/crates/proof_of_stake/src/tests/test_slash_and_redel.rs @@ -15,8 +15,6 @@ use namada_core::key::testing::{keypair_1, keypair_2, keypair_3}; use namada_core::key::RefTo; use namada_core::token::NATIVE_MAX_DECIMAL_PLACES; use namada_state::testing::TestState; -use namada_storage::collections::lazy_map::Collectable; -use namada_storage::StorageRead; use namada_trans_token::{self as token, credit_tokens, read_balance}; use proptest::prelude::*; use proptest::test_runner::Config; @@ -24,6 +22,7 @@ use proptest::test_runner::Config; // `tracing` logs from tests use test_log::test; +use crate::lazy_map::Collectable; use crate::storage::{ bond_handle, delegator_redelegated_bonds_handle, delegator_redelegated_unbonds_handle, enqueued_slashes_handle, @@ -44,7 +43,9 @@ use crate::tests::{ withdraw_tokens, }; use crate::types::{BondId, GenesisValidator, Slash, SlashType}; -use crate::{staking_token_address, OwnedPosParams, RedelegationError}; +use crate::{ + staking_token_address, OwnedPosParams, RedelegationError, StorageRead, +}; proptest! { // Generate arb valid input for `test_simple_redelegation_aux` diff --git a/crates/proof_of_stake/src/tests/test_validator.rs b/crates/proof_of_stake/src/tests/test_validator.rs index 8988d1d115..e9fc6d987f 100644 --- a/crates/proof_of_stake/src/tests/test_validator.rs +++ b/crates/proof_of_stake/src/tests/test_validator.rs @@ -12,7 +12,6 @@ use namada_core::key::testing::{ use namada_core::key::{self, common, RefTo}; use namada_core::token; use namada_state::testing::TestState; -use namada_storage::collections::lazy_map; use namada_trans_token::credit_tokens; use proptest::prelude::*; use proptest::test_runner::Config; @@ -47,7 +46,8 @@ use crate::validator_set_update::{ insert_validator_into_validator_set, update_validator_set, }; use crate::{ - is_validator, staking_token_address, BecomeValidator, OwnedPosParams, + is_validator, lazy_map, staking_token_address, BecomeValidator, + OwnedPosParams, }; proptest! { diff --git a/crates/proof_of_stake/src/types/mod.rs b/crates/proof_of_stake/src/types/mod.rs index 8274c40637..dd37583813 100644 --- a/crates/proof_of_stake/src/types/mod.rs +++ b/crates/proof_of_stake/src/types/mod.rs @@ -17,13 +17,12 @@ use namada_core::token::Amount; use namada_macros::BorshDeserializer; #[cfg(feature = "migrations")] use namada_migrations::*; -use namada_storage::collections::lazy_map::NestedMap; -use namada_storage::collections::{LazyMap, LazySet, LazyVec}; pub use rev_order::ReverseOrdTokenAmount; use serde::{Deserialize, Serialize}; +use crate::lazy_map::NestedMap; use crate::parameters::PosParams; -use crate::{Epoch, KeySeg}; +use crate::{Epoch, KeySeg, LazyMap, LazySet, LazyVec}; /// Stored positions of validators in validator sets pub type ValidatorSetPositions = crate::epoched::NestedEpoched< diff --git a/crates/proof_of_stake/src/validator_set_update.rs b/crates/proof_of_stake/src/validator_set_update.rs index 5271074d95..f1527c0f65 100644 --- a/crates/proof_of_stake/src/validator_set_update.rs +++ b/crates/proof_of_stake/src/validator_set_update.rs @@ -6,11 +6,10 @@ use namada_core::chain::Epoch; use namada_core::collections::{HashMap, HashSet}; use namada_core::key::PublicKeyTmRawHash; use namada_core::token; -use namada_storage::collections::lazy_map::{NestedSubKey, SubKey}; -use namada_storage::{StorageRead, StorageWrite}; use namada_systems::governance; use once_cell::unsync::Lazy; +use crate::lazy_map::{NestedSubKey, SubKey}; use crate::storage::{ below_capacity_validator_set_handle, consensus_validator_set_handle, get_num_consensus_validators, read_validator_stake, @@ -22,7 +21,7 @@ use crate::types::{ ConsensusValidatorSet, Position, ReverseOrdTokenAmount, ValidatorPositionAddresses, ValidatorSetUpdate, ValidatorState, }; -use crate::PosParams; +use crate::{PosParams, StorageRead, StorageResult, StorageWrite}; /// Update validator set at the pipeline epoch when a validator receives a new /// bond and when its bond is unbonded (self-bond or delegation). @@ -33,7 +32,7 @@ pub fn update_validator_set( token_change: token::Change, current_epoch: Epoch, offset: Option, -) -> namada_storage::Result<()> +) -> StorageResult<()> where S: StorageRead + StorageWrite, Gov: governance::Read, @@ -361,7 +360,7 @@ pub fn insert_validator_into_validator_set( stake: token::Amount, current_epoch: Epoch, offset: u64, -) -> namada_storage::Result<()> +) -> StorageResult<()> where S: StorageRead + StorageWrite, Gov: governance::Read, @@ -464,7 +463,7 @@ pub fn remove_consensus_validator( params: &PosParams, epoch: Epoch, validator: &Address, -) -> namada_storage::Result<()> +) -> StorageResult<()> where S: StorageRead + StorageWrite, { @@ -492,7 +491,7 @@ pub fn remove_below_capacity_validator( params: &PosParams, epoch: Epoch, validator: &Address, -) -> namada_storage::Result<()> +) -> StorageResult<()> where S: StorageRead + StorageWrite, { @@ -524,7 +523,7 @@ pub fn promote_next_below_capacity_validator_to_consensus( storage: &mut S, current_epoch: Epoch, offset: u64, -) -> namada_storage::Result<()> +) -> StorageResult<()> where S: StorageRead + StorageWrite, Gov: governance::Read, @@ -570,7 +569,7 @@ pub fn validator_set_update_comet( params: &PosParams, current_epoch: Epoch, f: impl FnMut(ValidatorSetUpdate) -> T, -) -> namada_storage::Result> +) -> StorageResult> where S: StorageRead, { @@ -772,7 +771,7 @@ pub fn copy_validator_sets_and_positions( params: &PosParams, current_epoch: Epoch, target_epoch: Epoch, -) -> namada_storage::Result<()> +) -> StorageResult<()> where S: StorageRead + StorageWrite, { @@ -891,7 +890,7 @@ fn insert_into_consensus_and_demote_to_below_cap( offset: u64, consensus_set: &ConsensusValidatorSet, below_capacity_set: &BelowCapacityValidatorSet, -) -> namada_storage::Result<()> +) -> StorageResult<()> where S: StorageRead + StorageWrite, Gov: governance::Read, @@ -942,7 +941,7 @@ where fn find_first_position( handle: &ValidatorPositionAddresses, storage: &S, -) -> namada_storage::Result> +) -> StorageResult> where S: StorageRead, { @@ -958,7 +957,7 @@ where fn find_last_position( handle: &ValidatorPositionAddresses, storage: &S, -) -> namada_storage::Result> +) -> StorageResult> where S: StorageRead, { @@ -974,7 +973,7 @@ where fn find_next_position( handle: &ValidatorPositionAddresses, storage: &S, -) -> namada_storage::Result +) -> StorageResult where S: StorageRead, { @@ -990,7 +989,7 @@ where fn get_min_consensus_validator_amount( handle: &ConsensusValidatorSet, storage: &S, -) -> namada_storage::Result +) -> StorageResult where S: StorageRead, { @@ -1011,7 +1010,7 @@ where fn get_max_below_capacity_validator_amount( handle: &BelowCapacityValidatorSet, storage: &S, -) -> namada_storage::Result> +) -> StorageResult> where S: StorageRead, { @@ -1035,7 +1034,7 @@ fn insert_validator_into_set( storage: &mut S, epoch: &Epoch, address: &Address, -) -> namada_storage::Result<()> +) -> StorageResult<()> where S: StorageRead + StorageWrite, { @@ -1063,7 +1062,7 @@ fn read_validator_set_position( validator: &Address, epoch: Epoch, _params: &PosParams, -) -> namada_storage::Result> +) -> StorageResult> where S: StorageRead, { diff --git a/wasm/Cargo.lock b/wasm/Cargo.lock index 4513bdab3f..549483674d 100644 --- a/wasm/Cargo.lock +++ b/wasm/Cargo.lock @@ -3762,7 +3762,6 @@ dependencies = [ "namada_events", "namada_macros", "namada_state", - "namada_storage", "namada_systems", "namada_tx", "namada_vp", diff --git a/wasm_for_tests/Cargo.lock b/wasm_for_tests/Cargo.lock index b0da2050a9..7c4c064992 100644 --- a/wasm_for_tests/Cargo.lock +++ b/wasm_for_tests/Cargo.lock @@ -2063,7 +2063,6 @@ dependencies = [ "namada_events", "namada_macros", "namada_state", - "namada_storage", "namada_systems", "namada_tx", "namada_vp", From e74c96348e3da90a56d02b0b9545a2dad95b6336 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Tue, 20 Aug 2024 16:03:49 +0100 Subject: [PATCH 22/73] shielded_token: remove direct `namada_storage` dep --- Cargo.lock | 1 - crates/shielded_token/Cargo.toml | 4 +--- crates/shielded_token/src/conversion.rs | 31 +++++++++++++------------ crates/shielded_token/src/lib.rs | 7 +++--- crates/shielded_token/src/storage.rs | 13 +++++------ crates/shielded_token/src/utils.rs | 14 ++++++----- crates/shielded_token/src/validation.rs | 25 +++++++++++--------- crates/state/Cargo.toml | 3 ++- crates/state/src/lib.rs | 3 ++- 9 files changed, 53 insertions(+), 48 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4b42df069a..646422ab38 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5278,7 +5278,6 @@ dependencies = [ "namada_gas", "namada_parameters", "namada_state", - "namada_storage", "namada_systems", "namada_trans_token", "namada_tx", diff --git a/crates/shielded_token/Cargo.toml b/crates/shielded_token/Cargo.toml index e3677a3a20..b60853ea2f 100644 --- a/crates/shielded_token/Cargo.toml +++ b/crates/shielded_token/Cargo.toml @@ -28,7 +28,6 @@ namada_controller = { path = "../controller" } namada_core = { path = "../core" } namada_gas = { path = "../gas" } namada_state = { path = "../state" } -namada_storage = { path = "../storage" } namada_systems = { path = "../systems" } namada_tx = { path = "../tx" } namada_vp = { path = "../vp" } @@ -47,10 +46,9 @@ thiserror.workspace = true tracing.workspace = true [dev-dependencies] -namada_core = { path = "../core", features = ["testing"] } namada_gas = { path = "../gas" } namada_parameters = { path = "../parameters", features = ["testing"] } -namada_storage = { path = "../storage", features = ["testing"] } +namada_state = { path = "../state", features = ["testing"] } namada_trans_token = { path = "../trans_token" } lazy_static.workspace = true diff --git a/crates/shielded_token/src/conversion.rs b/crates/shielded_token/src/conversion.rs index 292e3f6503..feafb4c845 100644 --- a/crates/shielded_token/src/conversion.rs +++ b/crates/shielded_token/src/conversion.rs @@ -10,7 +10,6 @@ use namada_core::dec::Dec; use namada_core::hash::Hash; use namada_core::token::{Amount, DenominatedAmount, Denomination}; use namada_core::uint::Uint; -use namada_storage::{StorageRead, StorageWrite}; use namada_systems::{parameters, trans_token}; #[cfg(any(feature = "multicore", test))] @@ -20,7 +19,7 @@ use crate::storage_key::{ masp_last_locked_amount_key, masp_locked_amount_target_key, masp_max_reward_rate_key, }; -use crate::WithConversionState; +use crate::{StorageRead, StorageResult, StorageWrite, WithConversionState}; /// Compute shielded token inflation amount #[allow(clippy::too_many_arguments)] @@ -66,7 +65,7 @@ pub fn compute_inflation( pub fn calculate_masp_rewards_precision( storage: &mut S, addr: &Address, -) -> namada_storage::Result<(u128, Denomination)> +) -> StorageResult<(u128, Denomination)> where S: StorageWrite + StorageRead, TransToken: trans_token::Read, @@ -92,7 +91,7 @@ pub fn calculate_masp_rewards( storage: &mut S, token: &Address, masp_epochs_per_year: u64, -) -> namada_storage::Result<((u128, u128), Denomination)> +) -> StorageResult<((u128, u128), Denomination)> where S: StorageWrite + StorageRead, TransToken: trans_token::Keys + trans_token::Read, @@ -233,7 +232,7 @@ where /// Update the MASP's allowed conversions pub fn update_allowed_conversions( _storage: &mut S, -) -> namada_storage::Result<()> +) -> StorageResult<()> where S: StorageWrite + StorageRead + WithConversionState, Params: parameters::Read, @@ -246,7 +245,7 @@ where /// Update the MASP's allowed conversions pub fn update_allowed_conversions( storage: &mut S, -) -> namada_storage::Result<()> +) -> StorageResult<()> where S: StorageWrite + StorageRead + WithConversionState, Params: parameters::Read, @@ -265,14 +264,14 @@ where use namada_core::arith::CheckedAdd; use namada_core::masp::{encode_asset_type, MaspEpoch}; use namada_core::token::{MaspDigitPos, NATIVE_MAX_DECIMAL_PLACES}; - use namada_storage::conversion_state::ConversionLeaf; - use namada_storage::{Error, OptionExt, ResultExt}; use rayon::iter::{ IndexedParallelIterator, IntoParallelIterator, ParallelIterator, }; use rayon::prelude::ParallelSlice; - use crate::mint_rewards; + use crate::{ + mint_rewards, ConversionLeaf, OptionExt, ResultExt, StorageError, + }; // The derived conversions will be placed in MASP address space let masp_addr = MASP; @@ -349,7 +348,7 @@ where storage.get_block_epoch()?, masp_epoch_multiplier, ) - .map_err(namada_storage::Error::new_const)?; + .map_err(StorageError::new_const)?; let prev_masp_epoch = match masp_epoch.prev() { Some(epoch) => epoch, None => return Ok(()), @@ -443,7 +442,9 @@ where normed_inflation, )) .ok_or_else(|| { - Error::new_const("Three digit reward overflow") + StorageError::new_const( + "Three digit reward overflow", + ) })?; total_reward = total_reward .checked_add( @@ -455,7 +456,7 @@ where .unwrap_or_default(), ) .ok_or_else(|| { - Error::new_const( + StorageError::new_const( "Three digit total reward overflow", ) })?; @@ -514,14 +515,14 @@ where addr_bal .u128_eucl_div_rem((reward, precision)) .ok_or_else(|| { - Error::new_const( + StorageError::new_const( "Total reward calculation overflow", ) })? .0, ) .ok_or_else(|| { - Error::new_const("Total reward overflow") + StorageError::new_const("Total reward overflow") })?; } } @@ -660,7 +661,7 @@ mod tests { use namada_core::collections::HashMap; use namada_core::dec::testing::arb_non_negative_dec; use namada_core::token::testing::arb_amount; - use namada_storage::testing::TestStorage; + use namada_state::testing::TestStorage; use namada_trans_token::storage_key::{balance_key, minted_balance_key}; use namada_trans_token::write_denom; use proptest::prelude::*; diff --git a/crates/shielded_token/src/lib.rs b/crates/shielded_token/src/lib.rs index 3a1450d9a6..5592d22504 100644 --- a/crates/shielded_token/src/lib.rs +++ b/crates/shielded_token/src/lib.rs @@ -28,9 +28,10 @@ use std::str::FromStr; pub use masp_primitives::transaction; use namada_core::borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; -use namada_core::dec::Dec; -pub use namada_storage::conversion_state::{ - ConversionLeaf, ConversionState, WithConversionState, +pub use namada_core::dec::Dec; +pub use namada_state::{ + ConversionLeaf, ConversionState, Key, OptionExt, ResultExt, StorageError, + StorageRead, StorageResult, StorageWrite, WithConversionState, }; use serde::{Deserialize, Serialize}; pub use storage::*; diff --git a/crates/shielded_token/src/storage.rs b/crates/shielded_token/src/storage.rs index 0dcc4cd051..c124a0f6a3 100644 --- a/crates/shielded_token/src/storage.rs +++ b/crates/shielded_token/src/storage.rs @@ -3,13 +3,12 @@ use namada_core::arith::checked; use namada_core::token; use namada_core::token::Amount; use namada_core::uint::Uint; -use namada_storage as storage; -use namada_storage::{StorageRead, StorageWrite}; use namada_systems::trans_token; -use storage::ResultExt; use crate::storage_key::*; -use crate::ShieldedParams; +use crate::{ + ResultExt, ShieldedParams, StorageRead, StorageResult, StorageWrite, +}; /// Initialize parameters for the token in storage during the genesis block. pub fn write_params( @@ -17,7 +16,7 @@ pub fn write_params( storage: &mut S, token: &Address, denom: &token::Denomination, -) -> storage::Result<()> +) -> StorageResult<()> where S: StorageRead + StorageWrite, TransToken: trans_token::Keys, @@ -56,7 +55,7 @@ where pub fn mint_rewards( storage: &mut S, amount: token::Amount, -) -> storage::Result<()> +) -> StorageResult<()> where S: StorageRead + StorageWrite, TransToken: trans_token::Write, @@ -71,7 +70,7 @@ where } /// Read the total rewards minted by MASP. -pub fn read_total_rewards(storage: &S) -> storage::Result +pub fn read_total_rewards(storage: &S) -> StorageResult where S: StorageRead, { diff --git a/crates/shielded_token/src/utils.rs b/crates/shielded_token/src/utils.rs index 9396755659..d67d09f5c5 100644 --- a/crates/shielded_token/src/utils.rs +++ b/crates/shielded_token/src/utils.rs @@ -5,17 +5,17 @@ use std::collections::BTreeSet; use masp_primitives::merkle_tree::CommitmentTree; use masp_primitives::sapling::Node; use masp_primitives::transaction::Transaction; -use namada_storage::{Error, Key, Result, StorageRead, StorageWrite}; use crate::storage_key::{ is_masp_transfer_key, masp_commitment_tree_key, masp_nullifier_key, }; +use crate::{Key, StorageError, StorageRead, StorageResult, StorageWrite}; // Writes the nullifiers of the provided masp transaction to storage fn reveal_nullifiers( ctx: &mut impl StorageWrite, transaction: &Transaction, -) -> Result<()> { +) -> StorageResult<()> { for description in transaction .sapling_bundle() .map_or(&vec![], |description| &description.shielded_spends) @@ -33,12 +33,12 @@ fn reveal_nullifiers( pub fn update_note_commitment_tree( ctx: &mut (impl StorageRead + StorageWrite), transaction: &Transaction, -) -> Result<()> { +) -> StorageResult<()> { if let Some(bundle) = transaction.sapling_bundle() { if !bundle.shielded_outputs.is_empty() { let tree_key = masp_commitment_tree_key(); let mut commitment_tree: CommitmentTree = - ctx.read(&tree_key)?.ok_or(Error::SimpleMessage( + ctx.read(&tree_key)?.ok_or(StorageError::SimpleMessage( "Missing note commitment tree in storage", ))?; @@ -47,7 +47,9 @@ pub fn update_note_commitment_tree( commitment_tree .append(Node::from_scalar(description.cmu)) .map_err(|_| { - Error::SimpleMessage("Note commitment tree is full") + StorageError::SimpleMessage( + "Note commitment tree is full", + ) })?; } @@ -62,7 +64,7 @@ pub fn update_note_commitment_tree( pub fn handle_masp_tx( ctx: &mut (impl StorageRead + StorageWrite), shielded: &Transaction, -) -> Result<()> { +) -> StorageResult<()> { // TODO(masp#73): temporarily disabled because of the node aggregation issue // in WASM. Using the host env tx_update_masp_note_commitment_tree or // directly the update_note_commitment_tree function as a workaround diff --git a/crates/shielded_token/src/validation.rs b/crates/shielded_token/src/validation.rs index 2fe6786b51..193846a3a4 100644 --- a/crates/shielded_token/src/validation.rs +++ b/crates/shielded_token/src/validation.rs @@ -18,10 +18,11 @@ use masp_primitives::transaction::{ }; use masp_proofs::bellman::groth16::VerifyingKey; use masp_proofs::sapling::BatchValidator; -use namada_storage::Error; use rand_core::OsRng; use smooth_operator::checked; +use crate::{StorageError, StorageResult}; + // TODO these could be exported from masp_proof crate /// Spend circuit name pub const SPEND_NAME: &str = "masp-spend.params"; @@ -118,16 +119,16 @@ fn load_pvks() -> &'static PVKs { pub fn verify_shielded_tx( transaction: &Transaction, consume_verify_gas: F, -) -> Result<(), Error> +) -> Result<(), StorageError> where - F: Fn(u64) -> std::result::Result<(), Error>, + F: Fn(u64) -> StorageResult<()>, { tracing::debug!("entered verify_shielded_tx()"); let sapling_bundle = if let Some(bundle) = transaction.sapling_bundle() { bundle } else { - return Err(Error::SimpleMessage("no sapling bundle")); + return Err(StorageError::SimpleMessage("no sapling bundle")); }; let tx_data = transaction.deref(); @@ -135,7 +136,7 @@ where let unauth_tx_data = match partial_deauthorize(tx_data) { Some(tx_data) => tx_data, None => { - return Err(Error::SimpleMessage( + return Err(StorageError::SimpleMessage( "Failed to partially de-authorize", )); } @@ -166,14 +167,16 @@ where if !ctx.check_bundle(sapling_bundle.to_owned(), sighash.as_ref().to_owned()) { tracing::debug!("failed check bundle"); - return Err(Error::SimpleMessage("Invalid sapling bundle")); + return Err(StorageError::SimpleMessage("Invalid sapling bundle")); } tracing::debug!("passed check bundle"); // Charge gas before final validation charge_masp_validate_gas(sapling_bundle, consume_verify_gas)?; if !ctx.validate(spend_vk, convert_vk, output_vk, OsRng) { - return Err(Error::SimpleMessage("Invalid proofs or signatures")); + return Err(StorageError::SimpleMessage( + "Invalid proofs or signatures", + )); } Ok(()) } @@ -216,9 +219,9 @@ pub fn partial_deauthorize( fn charge_masp_validate_gas( sapling_bundle: &SaplingBundle, consume_verify_gas: F, -) -> Result<(), Error> +) -> StorageResult<()> where - F: Fn(u64) -> std::result::Result<(), Error>, + F: Fn(u64) -> StorageResult<()>, { // Signatures gas consume_verify_gas(checked!( @@ -264,9 +267,9 @@ where fn charge_masp_check_bundle_gas( sapling_bundle: &SaplingBundle, consume_verify_gas: F, -) -> Result<(), Error> +) -> StorageResult<()> where - F: Fn(u64) -> std::result::Result<(), Error>, + F: Fn(u64) -> StorageResult<()>, { consume_verify_gas(checked!( (sapling_bundle.shielded_spends.len() as u64) diff --git a/crates/state/Cargo.toml b/crates/state/Cargo.toml index 564a0014a6..c56b6285bf 100644 --- a/crates/state/Cargo.toml +++ b/crates/state/Cargo.toml @@ -19,7 +19,8 @@ default = [] testing = [ "proptest", "namada_core/testing", - "namada_merkle_tree/testing" + "namada_merkle_tree/testing", + "namada_storage/testing", ] migrations = [ "namada_migrations", diff --git a/crates/state/src/lib.rs b/crates/state/src/lib.rs index f591dd13f3..0eef2a0686 100644 --- a/crates/state/src/lib.rs +++ b/crates/state/src/lib.rs @@ -51,7 +51,7 @@ pub use namada_merkle_tree::{ }; pub use namada_storage as storage; pub use namada_storage::conversion_state::{ - ConversionState, WithConversionState, + ConversionLeaf, ConversionState, WithConversionState, }; pub use namada_storage::types::{KVBytes, PatternIterator, PrefixIterator}; pub use namada_storage::{ @@ -615,6 +615,7 @@ pub mod testing { use namada_core::address::EstablishedAddressGen; use namada_core::chain::ChainId; use namada_core::time::DateTimeUtc; + pub use namada_storage::testing::{PrefixIter, *}; use namada_storage::tx_queue::ExpiredTxsQueue; use storage::types::CommitOnlyData; From 8e025ad9d1286f93cc9ac1792b84150959993d15 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Tue, 20 Aug 2024 16:16:49 +0100 Subject: [PATCH 23/73] trans_token: remove direct `namada_storage` dep --- Cargo.lock | 2 - crates/token/Cargo.toml | 1 - crates/token/src/lib.rs | 1 - crates/trans_token/Cargo.toml | 2 - crates/trans_token/src/lib.rs | 4 +- crates/trans_token/src/storage.rs | 71 ++++++++++++++----------------- crates/trans_token/src/vp.rs | 5 +-- wasm/Cargo.lock | 3 -- wasm_for_tests/Cargo.lock | 3 -- 9 files changed, 38 insertions(+), 54 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 646422ab38..ef930406a3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5425,7 +5425,6 @@ dependencies = [ "namada_events", "namada_macros", "namada_shielded_token", - "namada_storage", "namada_systems", "namada_trans_token", "proptest", @@ -5446,7 +5445,6 @@ dependencies = [ "namada_ibc", "namada_parameters", "namada_state", - "namada_storage", "namada_systems", "namada_tx", "namada_vm", diff --git a/crates/token/Cargo.toml b/crates/token/Cargo.toml index 62bf495b69..9e6ffe08d6 100644 --- a/crates/token/Cargo.toml +++ b/crates/token/Cargo.toml @@ -24,7 +24,6 @@ namada_core = { path = "../core" } namada_events = { path = "../events", default-features = false } namada_macros = { path = "../macros" } namada_shielded_token = { path = "../shielded_token" } -namada_storage = { path = "../storage" } namada_systems = { path = "../systems" } namada_trans_token = { path = "../trans_token" } diff --git a/crates/token/src/lib.rs b/crates/token/src/lib.rs index 6ba04fbe7e..b8dad40671 100644 --- a/crates/token/src/lib.rs +++ b/crates/token/src/lib.rs @@ -87,7 +87,6 @@ use std::collections::BTreeMap; use namada_core::address::Address; use namada_core::masp::TxId; use namada_events::EmitEvents; -use namada_storage::{StorageRead, StorageWrite}; /// Initialize parameters for the token in storage during the genesis block. pub fn write_params( diff --git a/crates/trans_token/Cargo.toml b/crates/trans_token/Cargo.toml index 6edf89c1a2..c1b9b76530 100644 --- a/crates/trans_token/Cargo.toml +++ b/crates/trans_token/Cargo.toml @@ -22,7 +22,6 @@ migrations = [ namada_core = { path = "../core" } namada_events = { path = "../events", default-features = false } namada_state = { path = "../state" } -namada_storage = { path = "../storage" } namada_systems = { path = "../systems" } namada_tx = { path = "../tx" } namada_vp = { path = "../vp" } @@ -39,7 +38,6 @@ namada_governance = { path = "../governance", features = ["testing"] } namada_ibc = { path = "../ibc", features = ["testing"] } namada_parameters = { path = "../parameters", features = ["testing"] } namada_state = { path = "../state", features = ["testing"] } -namada_storage = { path = "../storage", features = ["testing"] } namada_tx = { path = "../tx", features = ["testing"] } namada_vm = { path = "../vm", features = ["testing"] } diff --git a/crates/trans_token/src/lib.rs b/crates/trans_token/src/lib.rs index 024004972a..680f9f7376 100644 --- a/crates/trans_token/src/lib.rs +++ b/crates/trans_token/src/lib.rs @@ -30,7 +30,9 @@ use namada_core::token; use namada_core::uint::Uint; use namada_events::extend::UserAccount; use namada_events::{EmitEvents, EventLevel}; -use namada_storage::{StorageRead, StorageWrite}; +pub use namada_state::{ + Key, ResultExt, StorageError, StorageRead, StorageResult, StorageWrite, +}; pub use namada_systems::trans_token::*; pub use storage::*; diff --git a/crates/trans_token/src/storage.rs b/crates/trans_token/src/storage.rs index 940eb3a1c5..4391f7d43f 100644 --- a/crates/trans_token/src/storage.rs +++ b/crates/trans_token/src/storage.rs @@ -5,17 +5,14 @@ use namada_core::collections::HashSet; use namada_core::hints; pub use namada_core::storage::Key; use namada_core::token::{self, Amount, AmountError, DenominatedAmount}; -use namada_storage as storage; -use namada_storage::{StorageRead, StorageWrite}; -use storage::ResultExt; use crate::storage_key::*; +use crate::{ + ResultExt, StorageError, StorageRead, StorageResult, StorageWrite, +}; /// Initialize parameters for the token in storage during the genesis block. -pub fn write_params( - storage: &mut S, - address: &Address, -) -> storage::Result<()> +pub fn write_params(storage: &mut S, address: &Address) -> StorageResult<()> where S: StorageRead + StorageWrite, { @@ -28,7 +25,7 @@ pub fn read_balance( storage: &S, token: &Address, owner: &Address, -) -> storage::Result +) -> StorageResult where S: StorageRead, { @@ -43,10 +40,10 @@ pub fn update_balance( token: &Address, owner: &Address, f: F, -) -> storage::Result<()> +) -> StorageResult<()> where S: StorageRead + StorageWrite, - F: FnOnce(token::Amount) -> storage::Result, + F: FnOnce(token::Amount) -> StorageResult, { let key = balance_key(token, owner); let balance = storage.read::(&key)?.unwrap_or_default(); @@ -60,7 +57,7 @@ pub fn increment_balance( token: &Address, owner: &Address, amount: token::Amount, -) -> storage::Result<()> +) -> StorageResult<()> where S: StorageRead + StorageWrite, { @@ -78,7 +75,7 @@ pub fn decrement_balance( token: &Address, owner: &Address, amount: token::Amount, -) -> storage::Result<()> +) -> StorageResult<()> where S: StorageRead + StorageWrite, { @@ -94,7 +91,7 @@ where pub fn read_total_supply( storage: &S, token: &Address, -) -> storage::Result +) -> StorageResult where S: StorageRead, { @@ -108,10 +105,10 @@ pub fn update_total_supply( storage: &mut S, token: &Address, f: F, -) -> storage::Result<()> +) -> StorageResult<()> where S: StorageRead + StorageWrite, - F: FnOnce(token::Amount) -> storage::Result, + F: FnOnce(token::Amount) -> StorageResult, { let key = minted_balance_key(token); let total_supply = storage.read::(&key)?.unwrap_or_default(); @@ -124,7 +121,7 @@ pub fn increment_total_supply( storage: &mut S, token: &Address, amount: token::Amount, -) -> storage::Result<()> +) -> StorageResult<()> where S: StorageRead + StorageWrite, { @@ -141,7 +138,7 @@ pub fn decrement_total_supply( storage: &mut S, token: &Address, amount: token::Amount, -) -> storage::Result<()> +) -> StorageResult<()> where S: StorageRead + StorageWrite, { @@ -156,7 +153,7 @@ where /// Get the effective circulating total supply of native tokens. pub fn get_effective_total_native_supply( storage: &S, -) -> namada_storage::Result +) -> StorageResult where S: StorageRead, { @@ -178,7 +175,7 @@ where pub fn read_denom( storage: &S, token: &Address, -) -> storage::Result> +) -> StorageResult> where S: StorageRead, { @@ -216,7 +213,7 @@ pub fn write_denom( storage: &mut S, token: &Address, denom: token::Denomination, -) -> storage::Result<()> +) -> StorageResult<()> where S: StorageRead + StorageWrite, { @@ -233,7 +230,7 @@ pub fn transfer( src: &Address, dest: &Address, amount: token::Amount, -) -> storage::Result<()> +) -> StorageResult<()> where S: StorageRead + StorageWrite, { @@ -252,12 +249,12 @@ where storage.write(&src_key, new_src_balance)?; storage.write(&dest_key, new_dest_balance) } - None => Err(storage::Error::new_alloc(format!( + None => Err(StorageError::new_alloc(format!( "The transfer would overflow balance of {dest}" ))), } } - None => Err(storage::Error::new_alloc(format!( + None => Err(StorageError::new_alloc(format!( "{src} has insufficient balance" ))), } @@ -271,7 +268,7 @@ pub fn multi_transfer( storage: &mut S, sources: &BTreeMap<(Address, Address), Amount>, dests: &BTreeMap<(Address, Address), Amount>, -) -> storage::Result> +) -> StorageResult> where S: StorageRead + StorageWrite, { @@ -282,21 +279,19 @@ where accounts.extend(dests.keys().cloned()); let unexpected_err = || { - storage::Error::new_const( + StorageError::new_const( "Computing difference between amounts should never overflow", ) }; // Apply the balance change for each account in turn for ref account @ (ref owner, ref token) in accounts { let overflow_err = || { - storage::Error::new_alloc(format!( + StorageError::new_alloc(format!( "The transfer would overflow balance of {owner}" )) }; let underflow_err = || { - storage::Error::new_alloc(format!( - "{owner} has insufficient balance" - )) + StorageError::new_alloc(format!("{owner} has insufficient balance")) }; // Load account balances and deltas let owner_key = balance_key(token, owner); @@ -331,7 +326,7 @@ pub fn mint_tokens( token: &Address, dest: &Address, amount: token::Amount, -) -> storage::Result<()> +) -> StorageResult<()> where S: StorageRead + StorageWrite, { @@ -346,7 +341,7 @@ pub fn credit_tokens( token: &Address, dest: &Address, amount: token::Amount, -) -> storage::Result<()> +) -> StorageResult<()> where S: StorageRead + StorageWrite, { @@ -365,7 +360,7 @@ pub fn burn_tokens( token: &Address, source: &Address, amount: token::Amount, -) -> storage::Result<()> +) -> StorageResult<()> where S: StorageRead + StorageWrite, { @@ -390,9 +385,9 @@ pub fn denominated( amount: token::Amount, token: &Address, storage: &impl StorageRead, -) -> storage::Result { +) -> StorageResult { let denom = read_denom(storage, token)?.ok_or_else(|| { - storage::Error::SimpleMessage( + StorageError::SimpleMessage( "No denomination found in storage for the given token", ) })?; @@ -406,15 +401,15 @@ pub fn denom_to_amount( denom_amount: DenominatedAmount, token: &Address, storage: &impl StorageRead, -) -> storage::Result { +) -> StorageResult { #[cfg(not(fuzzing))] { let denom = read_denom(storage, token)?.ok_or_else(|| { - storage::Error::SimpleMessage( + StorageError::SimpleMessage( "No denomination found in storage for the given token", ) })?; - denom_amount.scale(denom).map_err(storage::Error::new) + denom_amount.scale(denom).map_err(StorageError::new) } #[cfg(fuzzing)] @@ -429,7 +424,7 @@ mod testing { use std::collections::BTreeMap; use namada_core::{address, token}; - use namada_storage::testing::TestStorage; + use namada_state::testing::TestStorage; use super::{ burn_tokens, credit_tokens, multi_transfer, read_balance, diff --git a/crates/trans_token/src/vp.rs b/crates/trans_token/src/vp.rs index af56a30823..59c32fc8a5 100644 --- a/crates/trans_token/src/vp.rs +++ b/crates/trans_token/src/vp.rs @@ -9,7 +9,6 @@ use namada_core::collections::HashMap; use namada_core::storage::{Key, KeySeg}; use namada_core::token::Amount; use namada_state::StateRead; -use namada_storage::StorageRead; use namada_systems::{governance, parameters}; use namada_tx::action::{ Action, Bond, ClaimRewards, GovAction, PosAction, Read, Withdraw, @@ -25,6 +24,7 @@ use crate::storage_key::{ is_any_minted_balance_key, is_any_minter_key, is_any_token_balance_key, is_any_token_parameter_key, minter_key, }; +use crate::StorageRead; /// The owner of some balance change. #[derive(Copy, Clone, Eq, PartialEq)] @@ -430,8 +430,7 @@ mod tests { use namada_ibc::trace::ibc_token; use namada_parameters::storage::get_native_token_transferable_key; use namada_state::testing::TestState; - use namada_state::StorageWrite; - use namada_storage::TxIndex; + use namada_state::{StorageWrite, TxIndex}; use namada_tx::action::Write; use namada_tx::data::TxType; use namada_tx::{Authorization, BatchedTx, Code, Data, Section, Tx}; diff --git a/wasm/Cargo.lock b/wasm/Cargo.lock index 549483674d..ab29aecb84 100644 --- a/wasm/Cargo.lock +++ b/wasm/Cargo.lock @@ -3868,7 +3868,6 @@ dependencies = [ "namada_core", "namada_gas", "namada_state", - "namada_storage", "namada_systems", "namada_tx", "namada_vp", @@ -3980,7 +3979,6 @@ dependencies = [ "namada_events", "namada_macros", "namada_shielded_token", - "namada_storage", "namada_systems", "namada_trans_token", "proptest", @@ -3995,7 +3993,6 @@ dependencies = [ "namada_core", "namada_events", "namada_state", - "namada_storage", "namada_systems", "namada_tx", "namada_vp", diff --git a/wasm_for_tests/Cargo.lock b/wasm_for_tests/Cargo.lock index 7c4c064992..46ebbb0645 100644 --- a/wasm_for_tests/Cargo.lock +++ b/wasm_for_tests/Cargo.lock @@ -2093,7 +2093,6 @@ dependencies = [ "namada_core", "namada_gas", "namada_state", - "namada_storage", "namada_systems", "namada_tx", "namada_vp", @@ -2173,7 +2172,6 @@ dependencies = [ "namada_events", "namada_macros", "namada_shielded_token", - "namada_storage", "namada_systems", "namada_trans_token", "serde", @@ -2187,7 +2185,6 @@ dependencies = [ "namada_core", "namada_events", "namada_state", - "namada_storage", "namada_systems", "namada_tx", "namada_vp", From 720efff9425ccab12b245192db08db51cc5e26fa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Tue, 20 Aug 2024 16:20:47 +0100 Subject: [PATCH 24/73] vm_env: remove unused deps and re-export public api --- Cargo.lock | 2 -- crates/vm_env/.gitignore | 10 ---------- crates/vm_env/Cargo.toml | 2 -- crates/vm_env/src/lib.rs | 4 ++-- wasm/Cargo.lock | 2 -- wasm_for_tests/Cargo.lock | 2 -- 6 files changed, 2 insertions(+), 20 deletions(-) delete mode 100644 crates/vm_env/.gitignore diff --git a/Cargo.lock b/Cargo.lock index ef930406a3..743262b962 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5558,8 +5558,6 @@ dependencies = [ name = "namada_vm_env" version = "0.43.0" dependencies = [ - "borsh", - "masp_primitives", "namada_core", ] diff --git a/crates/vm_env/.gitignore b/crates/vm_env/.gitignore deleted file mode 100644 index 65d4c18e2d..0000000000 --- a/crates/vm_env/.gitignore +++ /dev/null @@ -1,10 +0,0 @@ -# Generated by Cargo -# will have compiled files and executables -debug/ -target/ - -# These are backup files generated by rustfmt -**/*.rs.bk - -# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html -Cargo.lock \ No newline at end of file diff --git a/crates/vm_env/Cargo.toml b/crates/vm_env/Cargo.toml index 29f5882eb2..ee3ea5f763 100644 --- a/crates/vm_env/Cargo.toml +++ b/crates/vm_env/Cargo.toml @@ -17,5 +17,3 @@ default = [] [dependencies] namada_core = { path = "../core" } -borsh.workspace = true -masp_primitives.workspace = true diff --git a/crates/vm_env/src/lib.rs b/crates/vm_env/src/lib.rs index e1ec6ecff9..f20ae1b23d 100644 --- a/crates/vm_env/src/lib.rs +++ b/crates/vm_env/src/lib.rs @@ -17,8 +17,8 @@ clippy::print_stderr )] -use borsh::BorshDeserialize; -use namada_core::internal::{HostEnvResult, KeyVal}; +use namada_core::borsh::BorshDeserialize; +pub use namada_core::internal::{HostEnvResult, KeyVal}; /// Transaction environment imports pub mod tx { diff --git a/wasm/Cargo.lock b/wasm/Cargo.lock index ab29aecb84..34644b10c0 100644 --- a/wasm/Cargo.lock +++ b/wasm/Cargo.lock @@ -4093,8 +4093,6 @@ dependencies = [ name = "namada_vm_env" version = "0.43.0" dependencies = [ - "borsh", - "masp_primitives", "namada_core", ] diff --git a/wasm_for_tests/Cargo.lock b/wasm_for_tests/Cargo.lock index 46ebbb0645..59b566474c 100644 --- a/wasm_for_tests/Cargo.lock +++ b/wasm_for_tests/Cargo.lock @@ -2255,8 +2255,6 @@ dependencies = [ name = "namada_vm_env" version = "0.43.0" dependencies = [ - "borsh", - "masp_primitives", "namada_core", ] From 72f5d87295cc0e26f51d1474d85c8a9c0d3699b0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Wed, 21 Aug 2024 10:59:04 +0100 Subject: [PATCH 25/73] ibc: remove direct `namada_storage` dep --- Cargo.lock | 1 - crates/ibc/Cargo.toml | 8 +++-- crates/ibc/src/actions.rs | 21 ++++++------- crates/ibc/src/context/storage.rs | 12 ++++---- crates/ibc/src/lib.rs | 50 ++++++++++++++----------------- wasm/Cargo.lock | 1 - wasm_for_tests/Cargo.lock | 1 - 7 files changed, 44 insertions(+), 50 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 743262b962..4911ad3a8f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4988,7 +4988,6 @@ dependencies = [ "namada_parameters", "namada_proof_of_stake", "namada_state", - "namada_storage", "namada_systems", "namada_token", "namada_tx", diff --git a/crates/ibc/Cargo.toml b/crates/ibc/Cargo.toml index d424a1bb2e..39f0c77965 100644 --- a/crates/ibc/Cargo.toml +++ b/crates/ibc/Cargo.toml @@ -18,7 +18,12 @@ migrations = [ "namada_migrations", "linkme", ] -testing = ["namada_core/testing", "ibc-testkit", "proptest"] +testing = [ + "namada_core/testing", + "namada_state/testing", + "ibc-testkit", + "proptest", +] arbitrary = ["dep:arbitrary", "namada_core/arbitrary", "namada_token/arbitrary"] [dependencies] @@ -28,7 +33,6 @@ namada_gas = { path = "../gas" } namada_macros = {path = "../macros"} namada_migrations = {path = "../migrations", optional = true} namada_state = { path = "../state" } -namada_storage = { path = "../storage" } namada_systems = { path = "../systems" } namada_tx = { path = "../tx" } namada_vp = { path = "../vp" } diff --git a/crates/ibc/src/actions.rs b/crates/ibc/src/actions.rs index 51400163a7..f6619eb34a 100644 --- a/crates/ibc/src/actions.rs +++ b/crates/ibc/src/actions.rs @@ -18,8 +18,8 @@ use namada_core::tendermint::Time as TmTime; use namada_core::token::Amount; use namada_events::EmitEvents; use namada_state::{ - BlockHeight, Epoch, Epochs, ResultExt, State, StorageError, StorageRead, - StorageResult, StorageWrite, + BlockHeader, BlockHeight, Epoch, Epochs, Key, ResultExt, State, + StorageError, StorageRead, StorageResult, StorageWrite, TxIndex, }; use namada_systems::{parameters, trans_token}; @@ -43,20 +43,17 @@ where { type PrefixIter<'iter> = ::PrefixIter<'iter> where Self: 'iter; - fn read_bytes( - &self, - key: &namada_storage::Key, - ) -> StorageResult>> { + fn read_bytes(&self, key: &Key) -> StorageResult>> { self.state.read_bytes(key) } - fn has_key(&self, key: &namada_storage::Key) -> StorageResult { + fn has_key(&self, key: &Key) -> StorageResult { self.state.has_key(key) } fn iter_prefix<'iter>( &'iter self, - prefix: &namada_storage::Key, + prefix: &Key, ) -> StorageResult> { self.state.iter_prefix(prefix) } @@ -79,7 +76,7 @@ where fn get_block_header( &self, height: BlockHeight, - ) -> StorageResult> { + ) -> StorageResult> { StorageRead::get_block_header(self.state, height) } @@ -91,7 +88,7 @@ where self.state.get_pred_epochs() } - fn get_tx_index(&self) -> StorageResult { + fn get_tx_index(&self) -> StorageResult { self.state.get_tx_index() } @@ -106,13 +103,13 @@ where { fn write_bytes( &mut self, - key: &namada_storage::Key, + key: &Key, val: impl AsRef<[u8]>, ) -> StorageResult<()> { self.state.write_bytes(key, val) } - fn delete(&mut self, key: &namada_storage::Key) -> StorageResult<()> { + fn delete(&mut self, key: &Key) -> StorageResult<()> { self.state.delete(key) } } diff --git a/crates/ibc/src/context/storage.rs b/crates/ibc/src/context/storage.rs index deb05b0470..f485ef77cf 100644 --- a/crates/ibc/src/context/storage.rs +++ b/crates/ibc/src/context/storage.rs @@ -3,7 +3,7 @@ pub use ics23::ProofSpec; use namada_core::address::Address; use namada_core::token::Amount; -use namada_storage::{Error, StorageRead, StorageWrite}; +use namada_state::{StorageRead, StorageResult, StorageWrite}; use crate::event::IbcEvent; @@ -19,7 +19,7 @@ pub trait IbcStorageContext { fn storage_mut(&mut self) -> &mut Self::Storage; /// Emit an IBC event - fn emit_ibc_event(&mut self, event: IbcEvent) -> Result<(), Error>; + fn emit_ibc_event(&mut self, event: IbcEvent) -> StorageResult<()>; /// Transfer token fn transfer_token( @@ -28,7 +28,7 @@ pub trait IbcStorageContext { dest: &Address, token: &Address, amount: Amount, - ) -> Result<(), Error>; + ) -> StorageResult<()>; /// Mint token fn mint_token( @@ -36,7 +36,7 @@ pub trait IbcStorageContext { target: &Address, token: &Address, amount: Amount, - ) -> Result<(), Error>; + ) -> StorageResult<()>; /// Burn token fn burn_token( @@ -44,10 +44,10 @@ pub trait IbcStorageContext { target: &Address, token: &Address, amount: Amount, - ) -> Result<(), Error>; + ) -> StorageResult<()>; /// Insert the verifier - fn insert_verifier(&mut self, verifier: &Address) -> Result<(), Error>; + fn insert_verifier(&mut self, verifier: &Address) -> StorageResult<()>; /// Logging fn log_string(&self, message: String); diff --git a/crates/ibc/src/lib.rs b/crates/ibc/src/lib.rs index 91b9037e80..75780eb13f 100644 --- a/crates/ibc/src/lib.rs +++ b/crates/ibc/src/lib.rs @@ -91,7 +91,7 @@ use namada_core::token::Amount; use namada_events::EmitEvents; use namada_state::{ DBIter, Key, ResultExt, State, StorageError, StorageHasher, StorageRead, - StorageWrite, WlState, DB, + StorageResult, StorageWrite, WlState, DB, }; use namada_systems::ibc::ChangedBalances; use namada_systems::trans_token; @@ -139,7 +139,7 @@ pub enum Error { #[error("Invalid chain ID: {0}")] ChainId(IdentifierError), #[error("Verifier insertion error: {0}")] - Verifier(namada_storage::Error), + Verifier(StorageError), } struct IbcTransferInfo { @@ -154,13 +154,13 @@ struct IbcTransferInfo { } impl TryFrom for IbcTransferInfo { - type Error = namada_storage::Error; + type Error = StorageError; fn try_from( message: IbcMsgTransfer, ) -> std::result::Result { let packet_data = serde_json::to_vec(&message.packet_data) - .map_err(namada_storage::Error::new)?; + .map_err(StorageError::new)?; let ibc_traces = vec![message.packet_data.token.denom.to_string()]; let amount = message .packet_data @@ -183,13 +183,13 @@ impl TryFrom for IbcTransferInfo { } impl TryFrom for IbcTransferInfo { - type Error = namada_storage::Error; + type Error = StorageError; fn try_from( message: IbcMsgNftTransfer, ) -> std::result::Result { let packet_data = serde_json::to_vec(&message.packet_data) - .map_err(namada_storage::Error::new)?; + .map_err(StorageError::new)?; let ibc_traces = message .packet_data .token_ids @@ -223,14 +223,13 @@ where { fn try_extract_masp_tx_from_envelope( tx_data: &[u8], - ) -> namada_storage::Result> - { + ) -> StorageResult> { let msg = decode_message::(tx_data) .into_storage_result() .ok(); let tx = if let Some(IbcMessage::Envelope(ref envelope)) = msg { Some(extract_masp_tx_from_envelope(envelope).ok_or_else(|| { - namada_storage::Error::new_const( + StorageError::new_const( "Missing MASP transaction in IBC message", ) })?) @@ -245,7 +244,7 @@ where tx_data: &[u8], mut accum: ChangedBalances, keys_changed: &BTreeSet, - ) -> namada_storage::Result { + ) -> StorageResult { let msg = decode_message::(tx_data) .into_storage_result() .ok(); @@ -285,7 +284,7 @@ where let packet_data = serde_json::from_slice::( &msg.packet.data, ) - .map_err(namada_storage::Error::new)?; + .map_err(StorageError::new)?; let receiver = packet_data.receiver.to_string(); let addr = TAddrData::Ibc(receiver.clone()); accum.decoder.insert(ibc_taddr(receiver), addr); @@ -308,7 +307,7 @@ where serde_json::from_slice::( &msg.packet.data, ) - .map_err(namada_storage::Error::new)?; + .map_err(StorageError::new)?; let receiver = packet_data.receiver.to_string(); let addr = TAddrData::Ibc(receiver.clone()); accum.decoder.insert(ibc_taddr(receiver), addr); @@ -343,7 +342,7 @@ fn check_ibc_transfer( storage: &S, ibc_transfer: &IbcTransferInfo, keys_changed: &BTreeSet, -) -> namada_storage::Result<()> +) -> StorageResult<()> where S: StorageRead, { @@ -361,7 +360,7 @@ where storage::commitment_key(src_port_id, src_channel_id, sequence); if !keys_changed.contains(&commitment_key) { - return Err(namada_storage::Error::new_alloc(format!( + return Err(StorageError::new_alloc(format!( "Expected IBC transfer didn't happen: Port ID {src_port_id}, \ Channel ID {src_channel_id}, Sequence {sequence}" ))); @@ -371,7 +370,7 @@ where // IBC VP isn't triggered. let actual: PacketCommitment = storage .read_bytes(&commitment_key)? - .ok_or(namada_storage::Error::new_alloc(format!( + .ok_or(StorageError::new_alloc(format!( "Packet commitment doesn't exist: Port ID {src_port_id}, Channel \ ID {src_channel_id}, Sequence {sequence}" )))? @@ -382,7 +381,7 @@ where timeout_timestamp, ); if actual != expected { - return Err(namada_storage::Error::new_alloc(format!( + return Err(StorageError::new_alloc(format!( "Packet commitment mismatched: Port ID {src_port_id}, Channel ID \ {src_channel_id}, Sequence {sequence}" ))); @@ -395,14 +394,14 @@ where fn check_packet_receiving( msg: &IbcMsgRecvPacket, keys_changed: &BTreeSet, -) -> namada_storage::Result<()> { +) -> StorageResult<()> { let receipt_key = storage::receipt_key( &msg.packet.port_id_on_b, &msg.packet.chan_id_on_b, msg.packet.seq_on_a, ); if !keys_changed.contains(&receipt_key) { - return Err(namada_storage::Error::new_alloc(format!( + return Err(StorageError::new_alloc(format!( "The packet has not been received: Port ID {}, Channel ID {}, \ Sequence {}", msg.packet.port_id_on_b, @@ -419,7 +418,7 @@ fn apply_transfer_msg( mut accum: ChangedBalances, ibc_transfer: &IbcTransferInfo, keys_changed: &BTreeSet, -) -> namada_storage::Result +) -> StorageResult where S: StorageRead, { @@ -449,8 +448,7 @@ where .unwrap_or(ValueSum::zero()); accum.post.insert( ibc_taddr, - checked!(post_entry - &delta) - .map_err(namada_storage::Error::new)?, + checked!(post_entry - &delta).map_err(StorageError::new)?, ); } // Record an increase to the balance of a specific IBC receiver @@ -461,8 +459,7 @@ where .unwrap_or(ValueSum::zero()); accum.post.insert( receiver, - checked!(post_entry + &delta) - .map_err(namada_storage::Error::new)?, + checked!(post_entry + &delta).map_err(StorageError::new)?, ); } @@ -475,7 +472,7 @@ fn is_receiving_success( dst_port_id: &PortId, dst_channel_id: &ChannelId, sequence: Sequence, -) -> namada_storage::Result +) -> StorageResult where S: StorageRead, { @@ -503,7 +500,7 @@ fn apply_recv_msg( ibc_traces: Vec, amount: Amount, keys_changed: &BTreeSet, -) -> namada_storage::Result +) -> StorageResult where S: StorageRead, { @@ -549,8 +546,7 @@ where .unwrap_or(ValueSum::zero()); accum.pre.insert( ibc_taddr, - checked!(pre_entry + &delta) - .map_err(namada_storage::Error::new)?, + checked!(pre_entry + &delta).map_err(StorageError::new)?, ); } } diff --git a/wasm/Cargo.lock b/wasm/Cargo.lock index 34644b10c0..ee92990b35 100644 --- a/wasm/Cargo.lock +++ b/wasm/Cargo.lock @@ -3694,7 +3694,6 @@ dependencies = [ "namada_gas", "namada_macros", "namada_state", - "namada_storage", "namada_systems", "namada_tx", "namada_vp", diff --git a/wasm_for_tests/Cargo.lock b/wasm_for_tests/Cargo.lock index 59b566474c..754f1c2be4 100644 --- a/wasm_for_tests/Cargo.lock +++ b/wasm_for_tests/Cargo.lock @@ -1996,7 +1996,6 @@ dependencies = [ "namada_gas", "namada_macros", "namada_state", - "namada_storage", "namada_systems", "namada_tx", "namada_vp", From 821e5188dc852ca994807767a3877fed267b1228 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Wed, 21 Aug 2024 11:31:51 +0100 Subject: [PATCH 26/73] token: refactor imports --- crates/core/src/masp.rs | 5 +---- crates/light_sdk/src/transaction/transfer.rs | 4 ++-- crates/node/src/protocol.rs | 5 ++--- crates/sdk/src/signing.rs | 6 ++++-- crates/shielded_token/src/lib.rs | 1 + crates/token/src/lib.rs | 14 ++++++-------- crates/tx/src/action.rs | 6 +++--- crates/tx/src/types.rs | 12 ++++++------ 8 files changed, 25 insertions(+), 28 deletions(-) diff --git a/crates/core/src/masp.rs b/crates/core/src/masp.rs index 47b7daaf22..7bc4429b27 100644 --- a/crates/core/src/masp.rs +++ b/crates/core/src/masp.rs @@ -80,9 +80,6 @@ impl From for MaspTxId { } } -/// Wrapper for masp_primitive's TxId -pub type TxId = MaspTxId; - /// Wrapper type around `Epoch` for type safe operations involving the masp /// epoch #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] @@ -763,7 +760,7 @@ impl FromStr for MaspValue { /// The masp transactions' references of a given batch #[derive(Default, Clone, Serialize, Deserialize)] -pub struct MaspTxRefs(pub Vec); +pub struct MaspTxRefs(pub Vec); impl Display for MaspTxRefs { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { diff --git a/crates/light_sdk/src/transaction/transfer.rs b/crates/light_sdk/src/transaction/transfer.rs index 167db94ce3..bfceea1618 100644 --- a/crates/light_sdk/src/transaction/transfer.rs +++ b/crates/light_sdk/src/transaction/transfer.rs @@ -1,7 +1,7 @@ use namada_sdk::address::Address; use namada_sdk::hash::Hash; use namada_sdk::key::common; -use namada_sdk::masp::TxId; +use namada_sdk::masp::MaspTxId; use namada_sdk::token::transaction::Transaction; pub use namada_sdk::token::{DenominatedAmount, Transfer}; use namada_sdk::tx::data::GasLimit; @@ -26,7 +26,7 @@ impl TransferBuilder { /// Build a shielded transfer transaction from the given parameters pub fn shielded( - shielded_section_hash: TxId, + shielded_section_hash: MaspTxId, transaction: Transaction, args: GlobalArgs, ) -> Self { diff --git a/crates/node/src/protocol.rs b/crates/node/src/protocol.rs index b96b80545b..80fa62227b 100644 --- a/crates/node/src/protocol.rs +++ b/crates/node/src/protocol.rs @@ -14,7 +14,6 @@ use namada_sdk::events::EventLevel; use namada_sdk::gas::{self, Gas, GasMetering, TxGasMeter, VpGasMeter}; use namada_sdk::hash::Hash; use namada_sdk::ibc::{IbcTxDataHash, IbcTxDataRefs}; -use namada_sdk::masp::{MaspTxRefs, TxId}; use namada_sdk::parameters::get_gas_scale; use namada_sdk::state::{ DBIter, State, StorageHasher, StorageRead, TxWrites, WlState, DB, @@ -22,7 +21,7 @@ use namada_sdk::state::{ use namada_sdk::storage::TxIndex; use namada_sdk::token::event::{TokenEvent, TokenOperation}; use namada_sdk::token::utils::is_masp_transfer; -use namada_sdk::token::Amount; +use namada_sdk::token::{Amount, MaspTxId, MaspTxRefs}; use namada_sdk::tx::action::{self, Read}; use namada_sdk::tx::data::protocol::{ProtocolTx, ProtocolTxType}; use namada_sdk::tx::data::{ @@ -453,7 +452,7 @@ where /// Transaction result for masp transfer pub struct MaspTxResult { tx_result: BatchedTxResult, - masp_section_ref: Either, + masp_section_ref: Either, } /// Performs the required operation on a wrapper transaction: diff --git a/crates/sdk/src/signing.rs b/crates/sdk/src/signing.rs index 737fb34953..466e302548 100644 --- a/crates/sdk/src/signing.rs +++ b/crates/sdk/src/signing.rs @@ -15,7 +15,9 @@ use namada_core::address::{Address, ImplicitAddress, InternalAddress, MASP}; use namada_core::arith::checked; use namada_core::collections::{HashMap, HashSet}; use namada_core::key::*; -use namada_core::masp::{AssetData, ExtendedViewingKey, PaymentAddress, TxId}; +use namada_core::masp::{ + AssetData, ExtendedViewingKey, MaspTxId, PaymentAddress, +}; use namada_core::token::{Amount, DenominatedAmount}; use namada_governance::storage::proposal::{ InitProposalData, ProposalType, VoteProposalData, @@ -901,7 +903,7 @@ fn proposal_type_to_ledger_vector( // builder. fn find_masp_builder<'a>( tx: &'a Tx, - shielded_section_hash: Option, + shielded_section_hash: Option, asset_types: &mut HashMap, ) -> Result, std::io::Error> { for section in &tx.sections { diff --git a/crates/shielded_token/src/lib.rs b/crates/shielded_token/src/lib.rs index 5592d22504..6d6b36d113 100644 --- a/crates/shielded_token/src/lib.rs +++ b/crates/shielded_token/src/lib.rs @@ -29,6 +29,7 @@ use std::str::FromStr; pub use masp_primitives::transaction; use namada_core::borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; pub use namada_core::dec::Dec; +pub use namada_core::masp::{MaspEpoch, MaspTxId, MaspTxRefs, MaspValue}; pub use namada_state::{ ConversionLeaf, ConversionState, Key, OptionExt, ResultExt, StorageError, StorageRead, StorageResult, StorageWrite, WithConversionState, diff --git a/crates/token/src/lib.rs b/crates/token/src/lib.rs index b8dad40671..a7e6953150 100644 --- a/crates/token/src/lib.rs +++ b/crates/token/src/lib.rs @@ -18,7 +18,11 @@ clippy::print_stderr )] +use std::collections::BTreeMap; + +use namada_core::address::Address; use namada_core::borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; +use namada_events::EmitEvents; use namada_macros::BorshDeserializer; pub use namada_shielded_token::*; use namada_systems::parameters; @@ -82,12 +86,6 @@ pub mod storage_key { } } -use std::collections::BTreeMap; - -use namada_core::address::Address; -use namada_core::masp::TxId; -use namada_events::EmitEvents; - /// Initialize parameters for the token in storage during the genesis block. pub fn write_params( params: &Option, @@ -170,12 +168,12 @@ pub struct Transfer { /// Targets of this transfer pub targets: BTreeMap, /// Hash of tx section that contains the MASP transaction - pub shielded_section_hash: Option, + pub shielded_section_hash: Option, } impl Transfer { /// Create a MASP transaction - pub fn masp(hash: TxId) -> Self { + pub fn masp(hash: MaspTxId) -> Self { Self { shielded_section_hash: Some(hash), ..Self::default() diff --git a/crates/tx/src/action.rs b/crates/tx/src/action.rs index a25049623b..553a579a45 100644 --- a/crates/tx/src/action.rs +++ b/crates/tx/src/action.rs @@ -8,7 +8,7 @@ use std::fmt; use namada_core::address::Address; use namada_core::borsh::{BorshDeserialize, BorshSerialize}; -use namada_core::masp::TxId; +use namada_core::masp::MaspTxId; use namada_core::storage::KeySeg; use namada_core::{address, storage}; @@ -68,7 +68,7 @@ pub enum PgfAction { #[derive(Clone, Debug, BorshDeserialize, BorshSerialize, PartialEq)] pub enum MaspAction { /// The hash of the masp [`crate::types::Section`] - MaspSectionRef(TxId), + MaspSectionRef(MaspTxId), /// A required authorizer for the transaction MaspAuthorizer(Address), } @@ -125,7 +125,7 @@ fn storage_key() -> storage::Key { /// [`Actions`]. If more than one [`MaspAction`] is found we return an error pub fn get_masp_section_ref( actions: &Actions, -) -> Result, &'static str> { +) -> Result, &'static str> { let masp_sections: Vec<_> = actions .iter() .filter_map(|action| { diff --git a/crates/tx/src/types.rs b/crates/tx/src/types.rs index b3947cf9f4..883dc03658 100644 --- a/crates/tx/src/types.rs +++ b/crates/tx/src/types.rs @@ -19,7 +19,7 @@ use namada_core::borsh::{ use namada_core::chain::{BlockHeight, ChainId}; use namada_core::collections::{HashMap, HashSet}; use namada_core::key::*; -use namada_core::masp::{AssetData, TxId}; +use namada_core::masp::{AssetData, MaspTxId}; use namada_core::storage::TxIndex; use namada_core::time::DateTimeUtc; use namada_macros::BorshDeserializer; @@ -742,7 +742,7 @@ impl From for Vec { )] pub struct MaspBuilder { /// The MASP transaction that this section witnesses - pub target: TxId, + pub target: MaspTxId, /// The decoded set of asset types used by the transaction. Useful for /// offline wallets trying to display AssetTypes. pub asset_types: HashSet, @@ -830,7 +830,7 @@ impl arbitrary::Arbitrary<'_> for MaspBuilder { arbitrary::size_hint::and_all( &[ ::size_hint(depth), - ::size_hint(depth), + ::size_hint(depth), as arbitrary::Arbitrary>::size_hint(depth), ::size_hint(depth), ], @@ -1282,10 +1282,10 @@ impl Tx { } /// Get the transaction section with the given hash - pub fn get_masp_section(&self, hash: &TxId) -> Option<&Transaction> { + pub fn get_masp_section(&self, hash: &MaspTxId) -> Option<&Transaction> { for section in &self.sections { if let Section::MaspTx(masp) = section { - if TxId::from(masp.txid()) == *hash { + if MaspTxId::from(masp.txid()) == *hash { return Some(masp); } } @@ -1679,7 +1679,7 @@ impl Tx { pub fn add_masp_tx_section( &mut self, tx: Transaction, - ) -> (&mut Self, TxId) { + ) -> (&mut Self, MaspTxId) { let txid = tx.txid(); self.add_section(Section::MaspTx(tx)); (self, txid.into()) From f85ebdea0640a2fb8fb37a6de49900d52cf0ae36 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Wed, 21 Aug 2024 11:42:51 +0100 Subject: [PATCH 27/73] parameters: remove direct `namada_storage` dep --- Cargo.lock | 1 - crates/parameters/Cargo.toml | 5 +-- crates/parameters/src/lib.rs | 55 +++++++++++-------------- crates/parameters/src/storage.rs | 30 ++++++-------- crates/parameters/src/vp.rs | 3 +- crates/parameters/src/wasm_allowlist.rs | 11 +++-- wasm/Cargo.lock | 1 - wasm_for_tests/Cargo.lock | 1 - 8 files changed, 46 insertions(+), 61 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4911ad3a8f..776605fdd7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5131,7 +5131,6 @@ dependencies = [ "namada_core", "namada_macros", "namada_state", - "namada_storage", "namada_systems", "namada_tx", "namada_vp", diff --git a/crates/parameters/Cargo.toml b/crates/parameters/Cargo.toml index 5e44fde53d..de2ad221d6 100644 --- a/crates/parameters/Cargo.toml +++ b/crates/parameters/Cargo.toml @@ -16,14 +16,13 @@ version.workspace = true default = [] testing = [ "namada_core/testing", - "namada_storage/testing", + "namada_state/testing", ] [dependencies] namada_core = { path = "../core" } namada_macros = { path = "../macros" } namada_state = { path = "../state" } -namada_storage = { path = "../storage" } namada_systems = { path = "../systems" } namada_tx = { path = "../tx" } namada_vp = { path = "../vp" } @@ -32,4 +31,4 @@ smooth-operator.workspace = true thiserror.workspace = true [dev-dependencies] -namada_storage = { path = "../storage", features = ["testing"] } +namada_state = { path = "../state", features = ["testing"] } diff --git a/crates/parameters/src/lib.rs b/crates/parameters/src/lib.rs index e5b68ac6cc..d924f4a483 100644 --- a/crates/parameters/src/lib.rs +++ b/crates/parameters/src/lib.rs @@ -29,7 +29,9 @@ use namada_core::chain::BlockHeight; pub use namada_core::parameters::ProposalBytes; use namada_core::time::DurationSecs; use namada_core::{hints, token}; -use namada_storage::{ResultExt, StorageRead, StorageWrite}; +use namada_state::{ + Key, ResultExt, StorageError, StorageRead, StorageResult, StorageWrite, +}; pub use namada_systems::parameters::*; pub use storage::{get_gas_scale, get_max_block_gas}; use thiserror::Error; @@ -40,7 +42,7 @@ pub use wasm_allowlist::{is_tx_allowed, is_vp_allowed}; pub struct Store(PhantomData); impl Keys for Store { - fn implicit_vp_key() -> namada_core::storage::Key { + fn implicit_vp_key() -> Key { storage::get_implicit_vp_key() } } @@ -99,7 +101,7 @@ pub const ADDRESS: Address = Address::Internal(InternalAddress::Parameters); #[derive(Error, Debug)] pub enum ReadError { #[error("Storage error: {0}")] - StorageError(namada_storage::Error), + StorageError(StorageError), #[error("Storage type error: {0}")] StorageTypeError(namada_core::storage::Error), #[error("Protocol parameters are missing, they must be always set")] @@ -110,7 +112,7 @@ pub enum ReadError { #[derive(Error, Debug)] pub enum WriteError { #[error("Storage error: {0}")] - StorageError(namada_storage::Error), + StorageError(StorageError), #[error("Serialize error: {0}")] SerializeError(String), } @@ -119,7 +121,7 @@ pub enum WriteError { pub fn init_storage( parameters: &Parameters, storage: &mut S, -) -> namada_storage::Result<()> +) -> StorageResult<()> where S: StorageRead + StorageWrite, { @@ -210,7 +212,7 @@ where pub fn update_vp_allowlist_parameter( storage: &mut S, value: Vec, -) -> namada_storage::Result<()> +) -> StorageResult<()> where S: StorageRead + StorageWrite, { @@ -229,7 +231,7 @@ where pub fn update_tx_allowlist_parameter( storage: &mut S, value: Vec, -) -> namada_storage::Result<()> +) -> StorageResult<()> where S: StorageRead + StorageWrite, { @@ -248,7 +250,7 @@ where pub fn update_epoch_parameter( storage: &mut S, value: &EpochDuration, -) -> namada_storage::Result<()> +) -> StorageResult<()> where S: StorageRead + StorageWrite, { @@ -261,7 +263,7 @@ where pub fn update_epochs_per_year_parameter( storage: &mut S, value: &u64, -) -> namada_storage::Result<()> +) -> StorageResult<()> where S: StorageRead + StorageWrite, { @@ -273,7 +275,7 @@ where pub fn update_implicit_vp( storage: &mut S, implicit_vp: &[u8], -) -> namada_storage::Result<()> +) -> StorageResult<()> where S: StorageRead + StorageWrite, { @@ -284,9 +286,7 @@ where } /// Read the epochs per year parameter from store -pub fn read_epochs_per_year_parameter( - storage: &S, -) -> namada_storage::Result +pub fn read_epochs_per_year_parameter(storage: &S) -> StorageResult where S: StorageRead, { @@ -300,7 +300,7 @@ where /// Read the epoch duration parameter from store pub fn read_epoch_duration_parameter( storage: &S, -) -> namada_storage::Result +) -> StorageResult where S: StorageRead, { @@ -315,7 +315,7 @@ where /// Read the masp epoch multiplier parameter from store pub fn read_masp_epoch_multiplier_parameter( storage: &S, -) -> namada_storage::Result +) -> StorageResult where S: StorageRead, { @@ -331,7 +331,7 @@ where pub fn read_gas_cost( storage: &S, token: &Address, -) -> namada_storage::Result> +) -> StorageResult> where S: StorageRead, { @@ -343,7 +343,7 @@ where } /// Read the number of epochs per year parameter -pub fn read_epochs_per_year(storage: &S) -> namada_storage::Result +pub fn read_epochs_per_year(storage: &S) -> StorageResult where S: StorageRead, { @@ -355,9 +355,7 @@ where } /// Retrieve the `max_proposal_bytes` consensus parameter from storage. -pub fn read_max_proposal_bytes( - storage: &S, -) -> namada_storage::Result +pub fn read_max_proposal_bytes(storage: &S) -> StorageResult where S: StorageRead, { @@ -370,7 +368,7 @@ where /// Read all the parameters from storage. Returns the parameters and gas /// cost. -pub fn read(storage: &S) -> namada_storage::Result +pub fn read(storage: &S) -> StorageResult where S: StorageRead, { @@ -479,10 +477,7 @@ where } /// Validate the size of a tx. -pub fn validate_tx_bytes( - storage: &S, - tx_size: usize, -) -> namada_storage::Result +pub fn validate_tx_bytes(storage: &S, tx_size: usize) -> StorageResult where S: StorageRead, { @@ -499,7 +494,7 @@ pub fn native_erc20_key() -> storage::Key { /// Initialize parameters to the storage for testing #[cfg(any(test, feature = "testing"))] -pub fn init_test_storage(storage: &mut S) -> namada_storage::Result<()> +pub fn init_test_storage(storage: &mut S) -> StorageResult<()> where S: StorageRead + StorageWrite, { @@ -532,7 +527,7 @@ pub fn estimate_max_block_time_from_blocks( storage: &S, last_block_height: BlockHeight, num_blocks_to_read: u64, -) -> namada_storage::Result> +) -> StorageResult> where S: StorageRead, { @@ -583,7 +578,7 @@ where /// based on chain parameters. pub fn estimate_max_block_time_from_parameters( storage: &S, -) -> namada_storage::Result +) -> StorageResult where S: StorageRead, { @@ -613,7 +608,7 @@ pub fn estimate_max_block_time_from_blocks_and_params( storage: &S, last_block_height: BlockHeight, num_blocks_to_read: u64, -) -> namada_storage::Result +) -> StorageResult where S: StorageRead, { @@ -631,7 +626,7 @@ where mod tests { use namada_core::chain::BlockHeader; use namada_core::time::DateTimeUtc; - use namada_storage::testing::TestStorage; + use namada_state::testing::TestStorage; use super::*; diff --git a/crates/parameters/src/storage.rs b/crates/parameters/src/storage.rs index f0435f7e6d..4de92ba473 100644 --- a/crates/parameters/src/storage.rs +++ b/crates/parameters/src/storage.rs @@ -4,7 +4,7 @@ use namada_core::address::Address; use namada_core::storage::DbKeySeg; pub use namada_core::storage::Key; use namada_macros::StorageKeys; -use namada_storage::StorageRead; +use namada_state::{StorageError, StorageRead, StorageResult}; use super::ADDRESS; @@ -158,26 +158,22 @@ pub fn get_gas_cost_key() -> Key { /// Helper function to retrieve the `max_block_gas` protocol parameter from /// storage -pub fn get_max_block_gas( - storage: &impl StorageRead, -) -> std::result::Result { - storage.read(&get_max_block_gas_key())?.ok_or( - namada_storage::Error::SimpleMessage( +pub fn get_max_block_gas(storage: &impl StorageRead) -> StorageResult { + storage + .read(&get_max_block_gas_key())? + .ok_or(StorageError::SimpleMessage( "Missing max_block_gas parameter from storage", - ), - ) + )) } /// Helper function to retrieve the `gas_scale` protocol parameter from /// storage -pub fn get_gas_scale( - storage: &impl StorageRead, -) -> std::result::Result { - storage.read(&get_gas_scale_key())?.ok_or( - namada_storage::Error::SimpleMessage( +pub fn get_gas_scale(storage: &impl StorageRead) -> StorageResult { + storage + .read(&get_gas_scale_key())? + .ok_or(StorageError::SimpleMessage( "Missing gas_scale parameter from storage", - ), - ) + )) } /// Storage key used for the flag to enable the native token transfer @@ -189,9 +185,9 @@ pub fn get_native_token_transferable_key() -> Key { /// parameter from storage pub fn is_native_token_transferable( storage: &impl StorageRead, -) -> std::result::Result { +) -> StorageResult { storage.read(&get_native_token_transferable_key())?.ok_or( - namada_storage::Error::SimpleMessage( + StorageError::SimpleMessage( "Missing is_native_token_transferable parameter from storage", ), ) diff --git a/crates/parameters/src/vp.rs b/crates/parameters/src/vp.rs index b738bb61aa..6978fe988e 100644 --- a/crates/parameters/src/vp.rs +++ b/crates/parameters/src/vp.rs @@ -5,8 +5,7 @@ use std::marker::PhantomData; use namada_core::address::Address; use namada_core::booleans::BoolResultUnitExt; -use namada_core::storage::Key; -use namada_state::StateRead; +use namada_state::{Key, StateRead}; use namada_systems::governance; use namada_tx::BatchedTxRef; use namada_vp::native_vp::{ diff --git a/crates/parameters/src/wasm_allowlist.rs b/crates/parameters/src/wasm_allowlist.rs index c22b4b6fc1..1dad4e02a6 100644 --- a/crates/parameters/src/wasm_allowlist.rs +++ b/crates/parameters/src/wasm_allowlist.rs @@ -1,6 +1,5 @@ use namada_core::hash::Hash; -use namada_core::storage; -use namada_storage::{Result, StorageRead}; +use namada_state::{Key, StorageRead, StorageResult}; use crate::storage::{ get_tx_allowlist_storage_key, get_vp_allowlist_storage_key, @@ -8,7 +7,7 @@ use crate::storage::{ /// Check if the given tx code `Hash` is in the allowlist. When the allowlist is /// empty it always returns true. -pub fn is_tx_allowed(storage: &S, tx_hash: &Hash) -> Result +pub fn is_tx_allowed(storage: &S, tx_hash: &Hash) -> StorageResult where S: StorageRead, { @@ -18,7 +17,7 @@ where /// Check if the given VP code `Hash` is in the allowlist. When the allowlist is /// empty it always returns true. -pub fn is_vp_allowed(storage: &S, vp_hash: &Hash) -> Result +pub fn is_vp_allowed(storage: &S, vp_hash: &Hash) -> StorageResult where S: StorageRead, { @@ -28,9 +27,9 @@ where fn is_allowed( storage: &S, - allowlist_key: storage::Key, + allowlist_key: Key, hash: &Hash, -) -> Result +) -> StorageResult where S: StorageRead, { diff --git a/wasm/Cargo.lock b/wasm/Cargo.lock index ee92990b35..61fca99c3d 100644 --- a/wasm/Cargo.lock +++ b/wasm/Cargo.lock @@ -3740,7 +3740,6 @@ dependencies = [ "namada_core", "namada_macros", "namada_state", - "namada_storage", "namada_systems", "namada_tx", "namada_vp", diff --git a/wasm_for_tests/Cargo.lock b/wasm_for_tests/Cargo.lock index 754f1c2be4..1fd63c3a3c 100644 --- a/wasm_for_tests/Cargo.lock +++ b/wasm_for_tests/Cargo.lock @@ -2041,7 +2041,6 @@ dependencies = [ "namada_core", "namada_macros", "namada_state", - "namada_storage", "namada_systems", "namada_tx", "namada_vp", From bcb072143f7e2a6e6cdce20d5ef98862c0b14917 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Wed, 21 Aug 2024 12:21:25 +0100 Subject: [PATCH 28/73] tx_prelude: refactors --- Cargo.lock | 3 +- crates/light_sdk/src/transaction/transfer.rs | 8 +-- crates/shielded_token/src/lib.rs | 2 +- crates/tx_prelude/.gitignore | 10 --- crates/tx_prelude/Cargo.toml | 3 +- crates/tx_prelude/src/account.rs | 2 +- crates/tx_prelude/src/ibc.rs | 15 ++-- crates/tx_prelude/src/key.rs | 2 +- crates/tx_prelude/src/lib.rs | 74 +++++++++----------- crates/tx_prelude/src/pgf.rs | 4 +- crates/tx_prelude/src/proof_of_stake.rs | 8 +-- crates/tx_prelude/src/token.rs | 4 +- crates/vm/src/host_env.rs | 4 +- wasm/Cargo.lock | 3 +- wasm/tx_change_bridge_pool/src/lib.rs | 2 +- wasm_for_tests/Cargo.lock | 3 +- 16 files changed, 60 insertions(+), 87 deletions(-) delete mode 100644 crates/tx_prelude/.gitignore diff --git a/Cargo.lock b/Cargo.lock index 776605fdd7..968bdb0240 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5499,7 +5499,6 @@ name = "namada_tx_prelude" version = "0.43.0" dependencies = [ "borsh", - "masp_primitives", "namada_account", "namada_core", "namada_events", @@ -5509,7 +5508,7 @@ dependencies = [ "namada_macros", "namada_parameters", "namada_proof_of_stake", - "namada_storage", + "namada_state", "namada_token", "namada_tx", "namada_tx_env", diff --git a/crates/light_sdk/src/transaction/transfer.rs b/crates/light_sdk/src/transaction/transfer.rs index bfceea1618..66d4f5aba7 100644 --- a/crates/light_sdk/src/transaction/transfer.rs +++ b/crates/light_sdk/src/transaction/transfer.rs @@ -1,9 +1,9 @@ use namada_sdk::address::Address; use namada_sdk::hash::Hash; use namada_sdk::key::common; -use namada_sdk::masp::MaspTxId; -use namada_sdk::token::transaction::Transaction; -pub use namada_sdk::token::{DenominatedAmount, Transfer}; +pub use namada_sdk::token::{ + DenominatedAmount, MaspTransaction, MaspTxId, Transfer, +}; use namada_sdk::tx::data::GasLimit; use namada_sdk::tx::{Authorization, Tx, TxError, TX_TRANSFER_WASM}; @@ -27,7 +27,7 @@ impl TransferBuilder { /// Build a shielded transfer transaction from the given parameters pub fn shielded( shielded_section_hash: MaspTxId, - transaction: Transaction, + transaction: MaspTransaction, args: GlobalArgs, ) -> Self { let data = Transfer::masp(shielded_section_hash); diff --git a/crates/shielded_token/src/lib.rs b/crates/shielded_token/src/lib.rs index 6d6b36d113..1328c359dd 100644 --- a/crates/shielded_token/src/lib.rs +++ b/crates/shielded_token/src/lib.rs @@ -26,7 +26,7 @@ pub mod vp; use std::str::FromStr; -pub use masp_primitives::transaction; +pub use masp_primitives::transaction::Transaction as MaspTransaction; use namada_core::borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; pub use namada_core::dec::Dec; pub use namada_core::masp::{MaspEpoch, MaspTxId, MaspTxRefs, MaspValue}; diff --git a/crates/tx_prelude/.gitignore b/crates/tx_prelude/.gitignore deleted file mode 100644 index 65d4c18e2d..0000000000 --- a/crates/tx_prelude/.gitignore +++ /dev/null @@ -1,10 +0,0 @@ -# Generated by Cargo -# will have compiled files and executables -debug/ -target/ - -# These are backup files generated by rustfmt -**/*.rs.bk - -# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html -Cargo.lock \ No newline at end of file diff --git a/crates/tx_prelude/Cargo.toml b/crates/tx_prelude/Cargo.toml index a15aac9bfd..98833c9923 100644 --- a/crates/tx_prelude/Cargo.toml +++ b/crates/tx_prelude/Cargo.toml @@ -26,14 +26,13 @@ namada_ibc = { path = "../ibc" } namada_macros = { path = "../macros" } namada_parameters = { path = "../parameters" } namada_proof_of_stake = { path = "../proof_of_stake" } -namada_storage = { path = "../storage" } +namada_state = { path = "../state" } namada_token = { path = "../token" } namada_tx = { path = "../tx", default-features = false } namada_tx_env = { path = "../tx_env" } namada_vm_env = { path = "../vm_env" } borsh.workspace = true -masp_primitives.workspace = true [dev-dependencies] namada_token = { path = "../token", features = ["testing"] } diff --git a/crates/tx_prelude/src/account.rs b/crates/tx_prelude/src/account.rs index bcd08fd653..3784354734 100644 --- a/crates/tx_prelude/src/account.rs +++ b/crates/tx_prelude/src/account.rs @@ -10,7 +10,7 @@ pub fn init_account( ctx: &mut Ctx, owner: &Address, data: InitAccount, -) -> EnvResult<()> { +) -> Result<()> { namada_account::init_account_storage( ctx, owner, diff --git a/crates/tx_prelude/src/ibc.rs b/crates/tx_prelude/src/ibc.rs index bede0056ad..4a20dcae17 100644 --- a/crates/tx_prelude/src/ibc.rs +++ b/crates/tx_prelude/src/ibc.rs @@ -20,7 +20,7 @@ pub use namada_ibc::{ use namada_tx_env::TxEnv; use crate::token::transfer; -use crate::{Ctx, Error}; +use crate::{Ctx, Result}; /// IBC actions to handle an IBC message. The `verifiers` inserted into the set /// must be inserted into the tx context with `Ctx::insert_verifier` after tx @@ -54,10 +54,7 @@ impl IbcStorageContext for Ctx { super::log_string(message); } - fn emit_ibc_event( - &mut self, - event: IbcEvent, - ) -> std::result::Result<(), Error> { + fn emit_ibc_event(&mut self, event: IbcEvent) -> Result<()> { ::emit_event(self, event) } @@ -67,7 +64,7 @@ impl IbcStorageContext for Ctx { dest: &Address, token: &Address, amount: Amount, - ) -> std::result::Result<(), Error> { + ) -> Result<()> { transfer(self, src, dest, token, amount) } @@ -76,7 +73,7 @@ impl IbcStorageContext for Ctx { target: &Address, token: &Address, amount: Amount, - ) -> Result<(), Error> { + ) -> Result<()> { mint_tokens::<_, crate::token::Store<_>>(self, target, token, amount) } @@ -85,11 +82,11 @@ impl IbcStorageContext for Ctx { target: &Address, token: &Address, amount: Amount, - ) -> Result<(), Error> { + ) -> Result<()> { burn_tokens::<_, crate::token::Store<_>>(self, target, token, amount) } - fn insert_verifier(&mut self, addr: &Address) -> Result<(), Error> { + fn insert_verifier(&mut self, addr: &Address) -> Result<()> { TxEnv::insert_verifier(self, addr) } } diff --git a/crates/tx_prelude/src/key.rs b/crates/tx_prelude/src/key.rs index 7f499b8254..5aa1b6cafe 100644 --- a/crates/tx_prelude/src/key.rs +++ b/crates/tx_prelude/src/key.rs @@ -6,6 +6,6 @@ use super::*; /// Reveal a PK of an implicit account - the PK is written into the storage /// of the address derived from the PK. -pub fn reveal_pk(ctx: &mut Ctx, pk: &common::PublicKey) -> EnvResult<()> { +pub fn reveal_pk(ctx: &mut Ctx, pk: &common::PublicKey) -> Result<()> { namada_account::reveal_pk(ctx, pk) } diff --git a/crates/tx_prelude/src/lib.rs b/crates/tx_prelude/src/lib.rs index 8b8d9d409a..abe4e7d8d1 100644 --- a/crates/tx_prelude/src/lib.rs +++ b/crates/tx_prelude/src/lib.rs @@ -23,7 +23,6 @@ pub mod token; use core::slice; use std::marker::PhantomData; -use masp_primitives::transaction::Transaction; use namada_account::AccountPublicKeysMap; pub use namada_core::address::Address; pub use namada_core::borsh::{ @@ -42,10 +41,11 @@ use namada_events::{EmitEvents, Event, EventToEmit, EventType}; pub use namada_governance::storage as gov_storage; pub use namada_macros::transaction; pub use namada_parameters::storage as parameters_storage; -pub use namada_storage::{ - collections, iter_prefix, iter_prefix_bytes, Error, OptionExt, ResultExt, - StorageRead, StorageWrite, +pub use namada_state::{ + collections, iter_prefix, iter_prefix_bytes, OptionExt, ResultExt, + StorageError as Error, StorageRead, StorageResult as Result, StorageWrite, }; +use namada_token::MaspTransaction; pub use namada_tx::{action, data as transaction, BatchedTx, Section, Tx}; pub use namada_tx_env::TxEnv; use namada_vm_env::tx::*; @@ -123,10 +123,7 @@ impl Ctx { } /// Get the transaction data for the specified inner tx - pub fn get_tx_data( - &mut self, - batched_tx: &BatchedTx, - ) -> EnvResult> { + pub fn get_tx_data(&mut self, batched_tx: &BatchedTx) -> Result> { let BatchedTx { tx, ref cmt } = batched_tx; tx.data(cmt).ok_or_err_msg("Missing data").map_err(|err| { @@ -136,12 +133,8 @@ impl Ctx { } } -/// Result of `TxEnv`, `namada_storage::StorageRead` or -/// `namada_storage::StorageWrite` method call -pub type EnvResult = Result; - /// Transaction result -pub type TxResult = EnvResult<()>; +pub type TxResult = Result<()>; /// Storage key-val pair iterator #[derive(Debug)] @@ -150,21 +143,21 @@ pub struct KeyValIterator(pub u64, pub PhantomData); impl StorageRead for Ctx { type PrefixIter<'iter> = KeyValIterator<(String, Vec)>; - fn read_bytes(&self, key: &storage::Key) -> Result>, Error> { + fn read_bytes(&self, key: &storage::Key) -> Result>> { let key = key.to_string(); let read_result = unsafe { namada_tx_read(key.as_ptr() as _, key.len() as _) }; Ok(read_from_buffer(read_result, namada_tx_result_buffer)) } - fn has_key(&self, key: &storage::Key) -> Result { + fn has_key(&self, key: &storage::Key) -> Result { let key = key.to_string(); let found = unsafe { namada_tx_has_key(key.as_ptr() as _, key.len() as _) }; Ok(HostEnvResult::is_success(found)) } - fn get_chain_id(&self) -> Result { + fn get_chain_id(&self) -> Result { let result = Vec::with_capacity(CHAIN_ID_LENGTH); unsafe { namada_tx_get_chain_id(result.as_ptr() as _); @@ -175,14 +168,14 @@ impl StorageRead for Ctx { .expect("Cannot convert the ID string")) } - fn get_block_height(&self) -> Result { + fn get_block_height(&self) -> Result { Ok(BlockHeight(unsafe { namada_tx_get_block_height() })) } fn get_block_header( &self, height: BlockHeight, - ) -> Result, Error> { + ) -> Result> { let read_result = unsafe { namada_tx_get_block_header(height.0) }; match read_from_buffer(read_result, namada_tx_result_buffer) { Some(value) => Ok(Some( @@ -193,11 +186,11 @@ impl StorageRead for Ctx { } } - fn get_block_epoch(&self) -> Result { + fn get_block_epoch(&self) -> Result { Ok(Epoch(unsafe { namada_tx_get_block_epoch() })) } - fn get_pred_epochs(&self) -> Result { + fn get_pred_epochs(&self) -> Result { let read_result = unsafe { namada_tx_get_pred_epochs() }; let bytes = read_from_buffer(read_result, namada_tx_result_buffer) .ok_or(Error::SimpleMessage( @@ -207,7 +200,7 @@ impl StorageRead for Ctx { } /// Get the native token address - fn get_native_token(&self) -> Result { + fn get_native_token(&self) -> Result
{ let result = Vec::with_capacity(address::ADDRESS_LEN); unsafe { namada_tx_get_native_token(result.as_ptr() as _); @@ -223,7 +216,7 @@ impl StorageRead for Ctx { fn iter_prefix<'iter>( &'iter self, prefix: &storage::Key, - ) -> Result, Error> { + ) -> Result> { let prefix = prefix.to_string(); let iter_id = unsafe { namada_tx_iter_prefix(prefix.as_ptr() as _, prefix.len() as _) @@ -234,7 +227,7 @@ impl StorageRead for Ctx { fn iter_next<'iter>( &'iter self, iter: &mut Self::PrefixIter<'iter>, - ) -> Result)>, Error> { + ) -> Result)>> { let read_result = unsafe { namada_tx_iter_next(iter.0) }; Ok(read_key_val_bytes_from_buffer( read_result, @@ -242,7 +235,7 @@ impl StorageRead for Ctx { )) } - fn get_tx_index(&self) -> Result { + fn get_tx_index(&self) -> Result { let tx_index = unsafe { namada_tx_get_tx_index() }; Ok(TxIndex(tx_index)) } @@ -253,7 +246,7 @@ impl StorageWrite for Ctx { &mut self, key: &storage::Key, val: impl AsRef<[u8]>, - ) -> namada_storage::Result<()> { + ) -> Result<()> { let key = key.to_string(); unsafe { namada_tx_write( @@ -266,7 +259,7 @@ impl StorageWrite for Ctx { Ok(()) } - fn delete(&mut self, key: &storage::Key) -> namada_storage::Result<()> { + fn delete(&mut self, key: &storage::Key) -> Result<()> { let key = key.to_string(); unsafe { namada_tx_delete(key.as_ptr() as _, key.len() as _) }; Ok(()) @@ -294,10 +287,7 @@ impl EmitEvents for Ctx { } impl TxEnv for Ctx { - fn read_bytes_temp( - &self, - key: &storage::Key, - ) -> Result>, Error> { + fn read_bytes_temp(&self, key: &storage::Key) -> Result>> { let key = key.to_string(); let read_result = unsafe { namada_tx_read_temp(key.as_ptr() as _, key.len() as _) }; @@ -308,7 +298,7 @@ impl TxEnv for Ctx { &mut self, key: &storage::Key, val: impl AsRef<[u8]>, - ) -> Result<(), Error> { + ) -> Result<()> { let key = key.to_string(); unsafe { namada_tx_write_temp( @@ -321,7 +311,7 @@ impl TxEnv for Ctx { Ok(()) } - fn insert_verifier(&mut self, addr: &Address) -> Result<(), Error> { + fn insert_verifier(&mut self, addr: &Address) -> Result<()> { let addr = addr.encode(); unsafe { namada_tx_insert_verifier(addr.as_ptr() as _, addr.len() as _) @@ -334,7 +324,7 @@ impl TxEnv for Ctx { code_hash: impl AsRef<[u8]>, code_tag: &Option, entropy_source: &[u8], - ) -> Result { + ) -> Result
{ let code_hash = code_hash.as_ref(); let code_tag = code_tag.serialize_to_vec(); let result = Vec::with_capacity(address::ESTABLISHED_ADDRESS_BYTES_LEN); @@ -364,7 +354,7 @@ impl TxEnv for Ctx { addr: &Address, code_hash: impl AsRef<[u8]>, code_tag: &Option, - ) -> Result<(), Error> { + ) -> Result<()> { let addr = addr.encode(); let code_hash = code_hash.as_ref(); let code_tag = code_tag.serialize_to_vec(); @@ -381,19 +371,19 @@ impl TxEnv for Ctx { Ok(()) } - fn emit_event(&mut self, event: E) -> Result<(), Error> { + fn emit_event(&mut self, event: E) -> Result<()> { let event: Event = event.into(); let event = borsh::to_vec(&event).unwrap(); unsafe { namada_tx_emit_event(event.as_ptr() as _, event.len() as _) }; Ok(()) } - fn charge_gas(&mut self, used_gas: u64) -> Result<(), Error> { + fn charge_gas(&mut self, used_gas: u64) -> Result<()> { unsafe { namada_tx_charge_gas(used_gas) }; Ok(()) } - fn get_events(&self, event_type: &EventType) -> Result, Error> { + fn get_events(&self, event_type: &EventType) -> Result> { let event_type = event_type.to_string(); let read_result = unsafe { namada_tx_get_events( @@ -419,7 +409,7 @@ impl namada_tx::action::Read for Ctx { fn read_temp( &self, key: &storage::Key, - ) -> Result, Self::Err> { + ) -> Result> { TxEnv::read_temp(self, key) } } @@ -429,7 +419,7 @@ impl namada_tx::action::Write for Ctx { &mut self, key: &storage::Key, val: T, - ) -> Result<(), Self::Err> { + ) -> Result<()> { TxEnv::write_temp(self, key, val) } } @@ -438,7 +428,7 @@ impl namada_tx::action::Write for Ctx { pub fn verify_signatures_of_pks( tx: &Tx, pks: Vec, -) -> EnvResult { +) -> Result { // Require signatures from all the given keys let threshold = u8::try_from(pks.len()).into_storage_result()?; let public_keys_index_map = AccountPublicKeysMap::from_iter(pks); @@ -462,8 +452,8 @@ pub fn verify_signatures_of_pks( /// Update the masp note commitment tree in storage with the new notes pub fn update_masp_note_commitment_tree( - transaction: &Transaction, -) -> EnvResult { + transaction: &MaspTransaction, +) -> Result { // Serialize transaction let transaction = transaction.serialize_to_vec(); diff --git a/crates/tx_prelude/src/pgf.rs b/crates/tx_prelude/src/pgf.rs index 517cac5d43..e62f0481cf 100644 --- a/crates/tx_prelude/src/pgf.rs +++ b/crates/tx_prelude/src/pgf.rs @@ -8,7 +8,7 @@ use super::*; pub fn update_steward_commission( ctx: &mut Ctx, data: UpdateStewardCommission, -) -> EnvResult<()> { +) -> Result<()> { namada_governance::pgf::storage::update_commission( ctx, data.steward, @@ -19,7 +19,7 @@ pub fn update_steward_commission( } /// Remove a steward -pub fn remove_steward(ctx: &mut Ctx, data: &Address) -> EnvResult<()> { +pub fn remove_steward(ctx: &mut Ctx, data: &Address) -> Result<()> { namada_governance::pgf::storage::remove_steward(ctx, data)?; Ok(()) diff --git a/crates/tx_prelude/src/proof_of_stake.rs b/crates/tx_prelude/src/proof_of_stake.rs index 33e239b381..575cddc22c 100644 --- a/crates/tx_prelude/src/proof_of_stake.rs +++ b/crates/tx_prelude/src/proof_of_stake.rs @@ -60,7 +60,7 @@ impl Ctx { source: Option<&Address>, validator: &Address, amount: token::Amount, - ) -> EnvResult { + ) -> Result { // The tx must be authorized by the source address let verifier = source.as_ref().unwrap_or(&validator); self.insert_verifier(verifier)?; @@ -89,7 +89,7 @@ impl Ctx { &mut self, source: Option<&Address>, validator: &Address, - ) -> EnvResult { + ) -> Result { // The tx must be authorized by the source address let verifier = source.as_ref().unwrap_or(&validator); self.insert_verifier(verifier)?; @@ -201,7 +201,7 @@ impl Ctx { &mut self, source: Option<&Address>, validator: &Address, - ) -> EnvResult { + ) -> Result { // The tx must be authorized by the source address let verifier = source.as_ref().unwrap_or(&validator); self.insert_verifier(verifier)?; @@ -239,7 +239,7 @@ impl Ctx { avatar, name, }: BecomeValidator, - ) -> EnvResult
{ + ) -> Result
{ let current_epoch = self.get_block_epoch()?; let eth_cold_key = key::common::PublicKey::Secp256k1(eth_cold_key); let eth_hot_key = key::common::PublicKey::Secp256k1(eth_hot_key); diff --git a/crates/tx_prelude/src/token.rs b/crates/tx_prelude/src/token.rs index 8c6e4f50ad..6718ebdfd3 100644 --- a/crates/tx_prelude/src/token.rs +++ b/crates/tx_prelude/src/token.rs @@ -14,7 +14,7 @@ pub use namada_token::{ }; use namada_tx_env::TxEnv; -use crate::{Ctx, TxResult}; +use crate::{Ctx, Result, TxResult}; /// A transparent token transfer that can be used in a transaction. pub fn transfer( @@ -57,7 +57,7 @@ pub fn multi_transfer( ctx: &mut Ctx, sources: &BTreeMap<(Address, Address), Amount>, dests: &BTreeMap<(Address, Address), Amount>, -) -> crate::EnvResult> { +) -> Result> { let debited_accounts = namada_token::multi_transfer(ctx, sources, dests)?; let mut evt_sources = BTreeMap::new(); diff --git a/crates/vm/src/host_env.rs b/crates/vm/src/host_env.rs index 134ebb26a7..1e4d86f52f 100644 --- a/crates/vm/src/host_env.rs +++ b/crates/vm/src/host_env.rs @@ -30,7 +30,7 @@ use namada_token::storage_key::{ is_any_minted_balance_key, is_any_minter_key, is_any_token_balance_key, is_any_token_parameter_key, }; -use namada_token::transaction::Transaction; +use namada_token::MaspTransaction; use namada_tx::data::TxSentinel; use namada_tx::{BatchedTx, BatchedTxRef, Tx, TxCommitments}; use namada_vp::vp_host_fns; @@ -2185,7 +2185,7 @@ where .map_err(|e| TxRuntimeError::MemoryError(Box::new(e)))?; tx_charge_gas::(env, gas)?; - let transaction = Transaction::try_from_slice(&serialized_transaction) + let transaction = MaspTransaction::try_from_slice(&serialized_transaction) .map_err(TxRuntimeError::EncodingError)?; match namada_token::utils::update_note_commitment_tree( diff --git a/wasm/Cargo.lock b/wasm/Cargo.lock index 61fca99c3d..ff9d872023 100644 --- a/wasm/Cargo.lock +++ b/wasm/Cargo.lock @@ -4041,7 +4041,6 @@ name = "namada_tx_prelude" version = "0.43.0" dependencies = [ "borsh", - "masp_primitives", "namada_account", "namada_core", "namada_events", @@ -4051,7 +4050,7 @@ dependencies = [ "namada_macros", "namada_parameters", "namada_proof_of_stake", - "namada_storage", + "namada_state", "namada_token", "namada_tx", "namada_tx_env", diff --git a/wasm/tx_change_bridge_pool/src/lib.rs b/wasm/tx_change_bridge_pool/src/lib.rs index 1933cf3346..7aabcce2a3 100644 --- a/wasm/tx_change_bridge_pool/src/lib.rs +++ b/wasm/tx_change_bridge_pool/src/lib.rs @@ -44,7 +44,7 @@ fn apply_tx(ctx: &mut Ctx, tx_data: BatchedTx) -> TxResult { Ok(()) } -fn native_erc20_address(ctx: &mut Ctx) -> EnvResult { +fn native_erc20_address(ctx: &mut Ctx) -> Result { debug_log!("Trying to get wnam key for Bridge pool transfer"); let addr = ctx .read(&native_erc20_key()) diff --git a/wasm_for_tests/Cargo.lock b/wasm_for_tests/Cargo.lock index 1fd63c3a3c..184c3f9f03 100644 --- a/wasm_for_tests/Cargo.lock +++ b/wasm_for_tests/Cargo.lock @@ -2232,7 +2232,6 @@ name = "namada_tx_prelude" version = "0.43.0" dependencies = [ "borsh", - "masp_primitives", "namada_account", "namada_core", "namada_events", @@ -2242,7 +2241,7 @@ dependencies = [ "namada_macros", "namada_parameters", "namada_proof_of_stake", - "namada_storage", + "namada_state", "namada_token", "namada_tx", "namada_tx_env", From afdcc24dc0a09c78f5a4bc861b5ec56fac56eca2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Thu, 22 Aug 2024 14:13:07 +0100 Subject: [PATCH 29/73] refactor errors wrapping with namada_storage::Error/Result --- .../ethereum_bridge/src/vp/bridge_pool_vp.rs | 91 ++-- .../ethereum_bridge/src/vp/eth_bridge_vp.rs | 30 +- crates/ethereum_bridge/src/vp/mod.rs | 6 +- crates/ethereum_bridge/src/vp/nut_vp.rs | 36 +- crates/governance/src/parameters.rs | 4 +- crates/governance/src/pgf/inflation.rs | 6 +- crates/governance/src/pgf/parameters.rs | 4 +- crates/governance/src/pgf/storage/mod.rs | 19 +- crates/governance/src/storage/mod.rs | 36 +- crates/governance/src/vp/mod.rs | 310 +++++------- crates/governance/src/vp/pgf.rs | 51 +- crates/ibc/src/actions.rs | 49 +- crates/ibc/src/context/common.rs | 6 +- crates/ibc/src/context/storage.rs | 12 +- crates/ibc/src/lib.rs | 10 +- crates/ibc/src/parameters.rs | 4 +- crates/ibc/src/storage.rs | 58 +-- crates/ibc/src/trace.rs | 11 +- crates/ibc/src/vp/context.rs | 12 +- crates/ibc/src/vp/mod.rs | 127 ++--- crates/merkle_tree/src/lib.rs | 23 +- crates/node/src/dry_run_tx.rs | 13 +- crates/node/src/lib.rs | 6 +- crates/node/src/protocol.rs | 468 +++++++++--------- crates/node/src/shell/finalize_block.rs | 19 +- crates/node/src/shell/governance.rs | 14 +- crates/node/src/shell/init_chain.rs | 4 +- crates/node/src/shell/mod.rs | 20 +- crates/node/src/shell/process_proposal.rs | 2 +- crates/node/src/shell/snapshots.rs | 6 +- crates/node/src/shell/utils.rs | 5 +- crates/parameters/src/lib.rs | 54 +- crates/parameters/src/storage.rs | 14 +- crates/parameters/src/vp.rs | 26 +- crates/parameters/src/wasm_allowlist.rs | 12 +- crates/proof_of_stake/src/epoched.rs | 74 ++- crates/proof_of_stake/src/error.rs | 24 +- crates/proof_of_stake/src/lib.rs | 16 +- crates/proof_of_stake/src/queries.rs | 32 +- crates/proof_of_stake/src/rewards.rs | 26 +- crates/proof_of_stake/src/slashing.rs | 56 +-- crates/proof_of_stake/src/storage.rs | 103 ++-- .../src/validator_set_update.rs | 32 +- crates/proof_of_stake/src/vp.rs | 94 ++-- crates/shielded_token/src/conversion.rs | 26 +- crates/shielded_token/src/lib.rs | 4 +- crates/shielded_token/src/storage.rs | 10 +- crates/shielded_token/src/utils.rs | 14 +- crates/shielded_token/src/validation.rs | 26 +- crates/shielded_token/src/vp.rs | 201 +++----- crates/state/src/host_env.rs | 12 +- crates/state/src/in_memory.rs | 14 +- crates/state/src/lib.rs | 43 +- crates/state/src/wl_state.rs | 78 ++- crates/state/src/write_log.rs | 8 +- crates/storage/src/error.rs | 81 ++- crates/tests/src/native_vp/mod.rs | 4 +- crates/tests/src/vm_host_env/ibc.rs | 16 +- crates/tests/src/vm_host_env/mod.rs | 7 +- crates/token/src/lib.rs | 11 +- crates/trans_token/src/lib.rs | 2 +- crates/trans_token/src/storage.rs | 69 ++- crates/trans_token/src/vp.rs | 98 +--- crates/tx_prelude/src/lib.rs | 2 +- crates/vm/src/host_env.rs | 280 +++++------ crates/vm/src/memory.rs | 2 +- crates/vm/src/wasm/memory.rs | 6 + crates/vm/src/wasm/run.rs | 32 +- crates/vp/src/native_vp.rs | 167 +++---- crates/vp/src/vp_host_fns.rs | 94 ++-- crates/vp_prelude/.gitignore | 10 - crates/vp_prelude/src/lib.rs | 104 ++-- 72 files changed, 1493 insertions(+), 1953 deletions(-) delete mode 100644 crates/vp_prelude/.gitignore diff --git a/crates/ethereum_bridge/src/vp/bridge_pool_vp.rs b/crates/ethereum_bridge/src/vp/bridge_pool_vp.rs index 9f56970cd5..a91b854aa9 100644 --- a/crates/ethereum_bridge/src/vp/bridge_pool_vp.rs +++ b/crates/ethereum_bridge/src/vp/bridge_pool_vp.rs @@ -13,7 +13,6 @@ use std::borrow::Cow; use std::collections::BTreeSet; -use std::fmt::Debug; use std::marker::PhantomData; use borsh::BorshDeserialize; @@ -30,7 +29,7 @@ use namada_core::uint::I320; use namada_state::{ResultExt, StateRead}; use namada_systems::trans_token::{self as token, Amount}; use namada_tx::BatchedTxRef; -use namada_vp::native_vp::{self, Ctx, NativeVp, StorageReader, VpEvaluator}; +use namada_vp::native_vp::{Ctx, Error, NativeVp, StorageReader, VpEvaluator}; use crate::storage::bridge_pool::{ get_pending_key, is_bridge_pool_key, BRIDGE_POOL_ADDRESS, @@ -40,11 +39,6 @@ use crate::storage::parameters::read_native_erc20_address; use crate::storage::whitelist; use crate::ADDRESS as BRIDGE_ADDRESS; -#[derive(thiserror::Error, Debug)] -#[error("Bridge Pool VP error: {0}")] -/// Generic error that may be returned by the validity predicate -pub struct Error(#[from] native_vp::Error); - /// An [`Amount`] that has been updated with some delta value. #[derive(Copy, Clone)] struct AmountDelta { @@ -58,8 +52,7 @@ impl AmountDelta { /// Resolve the updated amount by applying the delta value. #[inline] fn resolve(self) -> Result { - checked!(self.delta + I320::from(self.base)) - .map_err(|e| Error(e.into())) + checked!(self.delta + I320::from(self.base)).map_err(Into::into) } } @@ -120,7 +113,7 @@ where delta: checked!(I320::from(after) - I320::from(before)).map_err( |error| { tracing::warn!(?error, %account_key, "reading pre value"); - Error(error.into()) + error }, )?, })) @@ -184,11 +177,10 @@ where } // some other error occurred while calculating // balance deltas - _ => Err(native_vp::Error::AllocMessage(format!( + _ => Err(Error::AllocMessage(format!( "Could not calculate the balance delta for {}", payer_account - )) - .into()), + ))), } } @@ -259,10 +251,7 @@ where suffix: whitelist::KeyType::Whitelisted, } .into(); - (&self.ctx) - .read_pre_value(&key) - .map_err(Error)? - .unwrap_or(false) + (&self.ctx).read_pre_value(&key)?.unwrap_or(false) }; if !wnam_whitelisted { tracing::debug!( @@ -287,10 +276,7 @@ where suffix: whitelist::KeyType::Cap, } .into(); - (&self.ctx) - .read_pre_value(&key) - .map_err(Error)? - .unwrap_or_default() + (&self.ctx).read_pre_value(&key)?.unwrap_or_default() }; if escrowed_balance > I320::from(wnam_cap) { tracing::debug!( @@ -516,9 +502,9 @@ fn sum_gas_and_token_amounts( .amount .checked_add(transfer.transfer.amount) .ok_or_else(|| { - Error(native_vp::Error::SimpleMessage( + Error::new_const( "Addition overflowed adding gas fee + transfer amount.", - )) + ) }) } @@ -530,8 +516,6 @@ where CA: 'static + Clone, TokenKeys: token::Keys, { - type Error = Error; - fn validate_tx( &'view self, batched_tx: &BatchedTxRef<'_>, @@ -546,85 +530,73 @@ where if !is_bridge_active_at( &self.ctx.pre(), self.ctx.state.in_mem().get_current_epoch().0, - ) - .map_err(Error)? - { + )? { tracing::debug!( "Rejecting transaction, since the Ethereum bridge is disabled." ); - return Err(native_vp::Error::SimpleMessage( + return Err(Error::new_const( "Rejecting transaction, since the Ethereum bridge is disabled.", - ) - .into()); + )); } let Some(tx_data) = batched_tx.tx.data(batched_tx.cmt) else { - return Err(native_vp::Error::SimpleMessage( - "No transaction data found", - ) - .into()); + return Err(Error::new_const("No transaction data found")); }; let transfer: PendingTransfer = BorshDeserialize::try_from_slice(&tx_data[..]) - .into_storage_result() - .map_err(Error)?; + .into_storage_result()?; let pending_key = get_pending_key(&transfer); // check that transfer is not already in the pool match (&self.ctx).read_pre_value::(&pending_key) { Ok(Some(_)) => { - let error = native_vp::Error::new_const( + let error = Error::new_const( "Rejecting transaction as the transfer is already in the \ Ethereum bridge pool.", - ) - .into(); + ); tracing::debug!("{error}"); return Err(error); } // NOTE: make sure we don't erase storage errors returned by the // ctx, as these may contain gas errors! - Err(e) => return Err(e.into()), + Err(e) => return Err(e), _ => {} } for key in keys_changed.iter().filter(|k| is_bridge_pool_key(k)) { if *key != pending_key { - let error = native_vp::Error::new_alloc(format!( + let error = Error::new_alloc(format!( "Rejecting transaction as it is attempting to change an \ incorrect key in the Ethereum bridge pool: {key}.\n \ Expected key: {pending_key}", - )) - .into(); + )); tracing::debug!("{error}"); return Err(error); } } let pending: PendingTransfer = (&self.ctx).read_post_value(&pending_key)?.ok_or_else(|| { - Error(native_vp::Error::SimpleMessage( + Error::new_const( "Rejecting transaction as the transfer wasn't added to \ the pool of pending transfers", - )) + ) })?; if pending != transfer { - let error = native_vp::Error::new_alloc(format!( + let error = Error::new_alloc(format!( "An incorrect transfer was added to the Ethereum bridge pool: \ {transfer:?}.\n Expected: {pending:?}", - )) - .into(); + )); tracing::debug!("{error}"); return Err(error); } // The deltas in the escrowed amounts we must check. - let wnam_address = - read_native_erc20_address(&self.ctx.pre()).map_err(Error)?; + let wnam_address = read_native_erc20_address(&self.ctx.pre())?; let escrow_checks = self.determine_escrow_checks(&wnam_address, &transfer)?; if !escrow_checks.validate::(keys_changed) { - let error = native_vp::Error::new_const( + let error = Error::new_const( // TODO(namada#3247): specify which storage changes are missing // or which ones are invalid "Invalid storage modifications in the Bridge pool", - ) - .into(); + ); tracing::debug!("{error}"); return Err(error); } @@ -634,10 +606,9 @@ where &transfer, escrow_checks.gas_check, )? { - return Err(native_vp::Error::new_const( + return Err(Error::new_const( "Gas was not correctly escrowed into the Bridge pool storage", - ) - .into()); + )); } // check the escrowed assets if transfer.transfer.asset == wnam_address { @@ -647,19 +618,17 @@ where escrow_checks.token_check, )? .ok_or_else(|| { - native_vp::Error::new_const( + Error::new_const( "The wrapped NAM tokens were not escrowed properly", ) - .into() }) } else { self.check_escrowed_toks(escrow_checks.token_check)? .ok_or_else(|| { - native_vp::Error::new_alloc(format!( + Error::new_alloc(format!( "The {} tokens were not escrowed properly", transfer.transfer.asset )) - .into() }) } .inspect(|_| { diff --git a/crates/ethereum_bridge/src/vp/eth_bridge_vp.rs b/crates/ethereum_bridge/src/vp/eth_bridge_vp.rs index 421420376b..28e8e2f9da 100644 --- a/crates/ethereum_bridge/src/vp/eth_bridge_vp.rs +++ b/crates/ethereum_bridge/src/vp/eth_bridge_vp.rs @@ -10,16 +10,13 @@ use namada_core::storage::Key; use namada_state::StateRead; use namada_systems::trans_token::{self as token, Amount}; use namada_tx::BatchedTxRef; -use namada_vp::native_vp::{self, Ctx, NativeVp, StorageReader, VpEvaluator}; +use namada_vp::native_vp::{ + self, Ctx, NativeVp, Result, StorageReader, VpEvaluator, +}; use crate::storage; use crate::storage::escrow_key; -/// Generic error that may be returned by the validity predicate -#[derive(thiserror::Error, Debug)] -#[error("Ethereum Bridge VP error: {0}")] -pub struct Error(#[from] native_vp::Error); - /// Validity predicate for the Ethereum bridge pub struct EthBridge<'ctx, S, CA, EVAL, TokenKeys> where @@ -49,7 +46,7 @@ where /// If the Ethereum bridge's escrow key was written to, we check /// that the NAM balance increased and that the Bridge pool VP has /// been triggered. - fn check_escrow(&self, verifiers: &BTreeSet
) -> Result<(), Error> { + fn check_escrow(&self, verifiers: &BTreeSet
) -> Result<()> { let escrow_key = TokenKeys::balance_key( &self.ctx.state.in_mem().native_token, &crate::ADDRESS, @@ -72,14 +69,12 @@ where "Bridge pool VP was not marked as a verifier of the \ transaction", ) - .into() }) } else { Err(native_vp::Error::new_const( "User tx attempted to decrease the amount of native tokens \ escrowed in the Ethereum Bridge's account", - ) - .into()) + )) } } } @@ -92,8 +87,6 @@ where EVAL: 'static + VpEvaluator<'ctx, S, CA, EVAL>, TokenKeys: token::Keys, { - type Error = Error; - /// Validate that a wasm transaction is permitted to change keys under this /// account. /// @@ -109,7 +102,7 @@ where _: &BatchedTxRef<'_>, keys_changed: &BTreeSet, verifiers: &BTreeSet
, - ) -> Result<(), Self::Error> { + ) -> Result<()> { tracing::debug!( keys_changed_len = keys_changed.len(), verifiers_len = verifiers.len(), @@ -137,7 +130,7 @@ where fn validate_changed_keys( nam_addr: &Address, keys_changed: &BTreeSet, -) -> Result<(), Error> { +) -> Result<()> { // acquire all keys that either changed our account, or that touched // nam balances let keys_changed: HashSet<_> = keys_changed @@ -153,8 +146,7 @@ fn validate_changed_keys( return Err(native_vp::Error::SimpleMessage( "No keys changed under our account so this validity predicate \ shouldn't have been triggered", - ) - .into()); + )); } tracing::debug!( relevant_keys.len = keys_changed.len(), @@ -164,8 +156,7 @@ fn validate_changed_keys( if !nam_escrow_addr_modified { let error = native_vp::Error::new_const( "The native token's escrow balance should have been modified", - ) - .into(); + ); tracing::debug!("{error}"); return Err(error); } @@ -175,8 +166,7 @@ fn validate_changed_keys( if !all_keys_are_nam_balance { let error = native_vp::Error::new_const( "Some modified keys were not a native token's balance key", - ) - .into(); + ); tracing::debug!("{error}"); return Err(error); } diff --git a/crates/ethereum_bridge/src/vp/mod.rs b/crates/ethereum_bridge/src/vp/mod.rs index 2771996bd8..e16646f73e 100644 --- a/crates/ethereum_bridge/src/vp/mod.rs +++ b/crates/ethereum_bridge/src/vp/mod.rs @@ -6,6 +6,6 @@ mod bridge_pool_vp; mod eth_bridge_vp; mod nut_vp; -pub use bridge_pool_vp::{BridgePool, Error as BridgePoolError}; -pub use eth_bridge_vp::{Error as EthBridgeError, EthBridge}; -pub use nut_vp::{Error as NutError, NonUsableTokens}; +pub use bridge_pool_vp::BridgePool; +pub use eth_bridge_vp::EthBridge; +pub use nut_vp::NonUsableTokens; diff --git a/crates/ethereum_bridge/src/vp/nut_vp.rs b/crates/ethereum_bridge/src/vp/nut_vp.rs index bfeb65ec2b..9316029a69 100644 --- a/crates/ethereum_bridge/src/vp/nut_vp.rs +++ b/crates/ethereum_bridge/src/vp/nut_vp.rs @@ -9,14 +9,9 @@ use namada_core::storage::Key; use namada_state::StateRead; use namada_systems::trans_token::{self as token, Amount}; use namada_tx::BatchedTxRef; -use namada_vp::native_vp::{self, Ctx, NativeVp, VpEvaluator}; +use namada_vp::native_vp::{self, Ctx, Error, NativeVp, Result, VpEvaluator}; use namada_vp::VpEnv; -/// Generic error that may be returned by the validity predicate -#[derive(thiserror::Error, Debug)] -#[error("Non-usable token VP error: {0}")] -pub struct Error(#[from] native_vp::Error); - /// Validity predicate for non-usable tokens. /// /// All this VP does is reject NUT transfers whose destination @@ -39,14 +34,12 @@ where CA: 'static + Clone, TokenKeys: token::Keys, { - type Error = Error; - fn validate_tx( &'view self, _: &BatchedTxRef<'_>, keys_changed: &BTreeSet, verifiers: &BTreeSet
, - ) -> Result<(), Self::Error> { + ) -> Result<()> { tracing::debug!( keys_changed_len = keys_changed.len(), verifiers_len = verifiers.len(), @@ -56,9 +49,8 @@ where verifiers .contains(&Address::Internal(InternalAddress::Multitoken)) .ok_or_else(|| { - let error = Error(native_vp::Error::new_const( - "Rejecting non-multitoken transfer tx", - )); + let error = + Error::new_const("Rejecting non-multitoken transfer tx"); tracing::debug!("{error}"); error })?; @@ -73,16 +65,10 @@ where }); for (changed_key, token_owner) in nut_owners { - let pre: Amount = self - .ctx - .read_pre(changed_key) - .map_err(Error)? - .unwrap_or_default(); - let post: Amount = self - .ctx - .read_post(changed_key) - .map_err(Error)? - .unwrap_or_default(); + let pre: Amount = + self.ctx.read_pre(changed_key)?.unwrap_or_default(); + let post: Amount = + self.ctx.read_post(changed_key)?.unwrap_or_default(); match token_owner { // the NUT balance of the bridge pool should increase @@ -98,8 +84,7 @@ where "Bridge pool balance should have increased. The \ previous balance was {pre:?}, the post balance \ is {post:?}.", - )) - .into()); + ))); } } // arbitrary addresses should have their balance decrease @@ -115,8 +100,7 @@ where "Balance should have decreased. The previous \ balance was {pre:?}, the post balance is \ {post:?}." - )) - .into()); + ))); } } } diff --git a/crates/governance/src/parameters.rs b/crates/governance/src/parameters.rs index 77a60cc8bc..0002715ba8 100644 --- a/crates/governance/src/parameters.rs +++ b/crates/governance/src/parameters.rs @@ -3,7 +3,7 @@ use namada_core::token; use namada_macros::BorshDeserializer; #[cfg(feature = "migrations")] use namada_migrations::*; -use namada_state::{StorageRead, StorageResult, StorageWrite}; +use namada_state::{Result, StorageRead, StorageWrite}; use super::storage::keys as goverance_storage; @@ -54,7 +54,7 @@ impl Default for GovernanceParameters { impl GovernanceParameters { /// Initialize governance parameters into storage - pub fn init_storage(&self, storage: &mut S) -> StorageResult<()> + pub fn init_storage(&self, storage: &mut S) -> Result<()> where S: StorageRead + StorageWrite, { diff --git a/crates/governance/src/pgf/inflation.rs b/crates/governance/src/pgf/inflation.rs index 6cc814bf17..35f5597096 100644 --- a/crates/governance/src/pgf/inflation.rs +++ b/crates/governance/src/pgf/inflation.rs @@ -1,7 +1,7 @@ //! PGF lib code. use namada_core::address::Address; -use namada_state::{StorageRead, StorageResult, StorageWrite}; +use namada_state::{Result, StorageRead, StorageWrite}; use namada_systems::{parameters, trans_token}; use crate::pgf::storage::{ @@ -13,12 +13,12 @@ use crate::storage::proposal::{PGFIbcTarget, PGFTarget}; pub fn apply_inflation( storage: &mut S, transfer_over_ibc: F, -) -> StorageResult<()> +) -> Result<()> where S: StorageWrite + StorageRead, Params: parameters::Read, TransToken: trans_token::Read + trans_token::Write, - F: Fn(&mut S, &Address, &Address, &PGFIbcTarget) -> StorageResult<()>, + F: Fn(&mut S, &Address, &Address, &PGFIbcTarget) -> Result<()>, { let pgf_parameters = get_parameters(storage)?; let staking_token = storage.get_native_token()?; diff --git a/crates/governance/src/pgf/parameters.rs b/crates/governance/src/pgf/parameters.rs index 8471c781fc..985f2d262c 100644 --- a/crates/governance/src/pgf/parameters.rs +++ b/crates/governance/src/pgf/parameters.rs @@ -6,7 +6,7 @@ use namada_core::dec::Dec; use namada_macros::BorshDeserializer; #[cfg(feature = "migrations")] use namada_migrations::*; -use namada_state::{StorageRead, StorageResult, StorageWrite}; +use namada_state::{Result, StorageRead, StorageWrite}; use serde::{Deserialize, Serialize}; use super::storage::keys as pgf_storage; @@ -51,7 +51,7 @@ impl Default for PgfParameters { impl PgfParameters { /// Initialize governance parameters into storage - pub fn init_storage(&self, storage: &mut S) -> StorageResult<()> + pub fn init_storage(&self, storage: &mut S) -> Result<()> where S: StorageRead + StorageWrite, { diff --git a/crates/governance/src/pgf/storage/mod.rs b/crates/governance/src/pgf/storage/mod.rs index 576c824aa8..e37a6532c7 100644 --- a/crates/governance/src/pgf/storage/mod.rs +++ b/crates/governance/src/pgf/storage/mod.rs @@ -8,7 +8,7 @@ pub mod steward; use namada_core::address::Address; use namada_core::collections::HashMap; use namada_core::dec::Dec; -use namada_state::{StorageRead, StorageResult, StorageWrite}; +use namada_state::{Result, StorageRead, StorageWrite}; use crate::pgf::parameters::PgfParameters; use crate::pgf::storage::keys as pgf_keys; @@ -16,7 +16,7 @@ use crate::pgf::storage::steward::StewardDetail; use crate::storage::proposal::StoragePgfFunding; /// Query the current pgf steward set -pub fn get_stewards(storage: &S) -> StorageResult> +pub fn get_stewards(storage: &S) -> Result> where S: StorageRead, { @@ -35,7 +35,7 @@ where pub fn get_steward( storage: &S, address: &Address, -) -> StorageResult> +) -> Result> where S: StorageRead, { @@ -43,7 +43,7 @@ where } /// Check if an address is a steward -pub fn is_steward(storage: &S, address: &Address) -> StorageResult +pub fn is_steward(storage: &S, address: &Address) -> Result where S: StorageRead, { @@ -51,10 +51,7 @@ where } /// Remove a steward -pub fn remove_steward( - storage: &mut S, - address: &Address, -) -> StorageResult<()> +pub fn remove_steward(storage: &mut S, address: &Address) -> Result<()> where S: StorageRead + StorageWrite, { @@ -66,7 +63,7 @@ where /// Query the current pgf continuous payments pub fn get_continuous_pgf_payments( storage: &S, -) -> StorageResult> +) -> Result> where S: StorageRead, { @@ -82,7 +79,7 @@ where } /// Query the pgf parameters -pub fn get_parameters(storage: &S) -> StorageResult +pub fn get_parameters(storage: &S) -> Result where S: StorageRead, { @@ -109,7 +106,7 @@ pub fn update_commission( storage: &mut S, address: Address, reward_distribution: HashMap, -) -> StorageResult<()> +) -> Result<()> where S: StorageRead + StorageWrite, { diff --git a/crates/governance/src/storage/mod.rs b/crates/governance/src/storage/mod.rs index c4b59181a2..2f7002f6b2 100644 --- a/crates/governance/src/storage/mod.rs +++ b/crates/governance/src/storage/mod.rs @@ -15,9 +15,7 @@ use namada_core::chain::Epoch; use namada_core::collections::HashSet; use namada_core::hash::Hash; use namada_core::token; -use namada_state::{ - iter_prefix, StorageError, StorageRead, StorageResult, StorageWrite, -}; +use namada_state::{iter_prefix, Error, Result, StorageRead, StorageWrite}; use namada_systems::trans_token; use crate::parameters::GovernanceParameters; @@ -35,7 +33,7 @@ pub fn init_proposal( data: &InitProposalData, content: Vec, code: Option>, -) -> StorageResult +) -> Result where S: StorageRead + StorageWrite, TransToken: trans_token::Write, @@ -61,7 +59,7 @@ where governance_keys::get_proposal_code_key(proposal_id); let proposal_code = - code.ok_or(StorageError::new_const("Missing proposal code"))?; + code.ok_or(Error::new_const("Missing proposal code"))?; storage.write(&proposal_code_key, proposal_code)?; } _ => storage.write(&proposal_type_key, data.r#type.clone())?, @@ -117,7 +115,7 @@ pub fn vote_proposal( storage: &mut S, data: VoteProposalData, delegation_targets: HashSet
, -) -> StorageResult<()> +) -> Result<()> where S: StorageRead + StorageWrite, { @@ -137,7 +135,7 @@ pub fn write_proposal_result( storage: &mut S, proposal_id: u64, proposal_result: ProposalResult, -) -> StorageResult<()> +) -> Result<()> where S: StorageRead + StorageWrite, { @@ -150,7 +148,7 @@ where pub fn get_proposal_by_id( storage: &S, id: u64, -) -> StorageResult> +) -> Result> where S: StorageRead, { @@ -196,10 +194,7 @@ where } /// Query all the votes for a proposal_id -pub fn get_proposal_votes( - storage: &S, - proposal_id: u64, -) -> StorageResult> +pub fn get_proposal_votes(storage: &S, proposal_id: u64) -> Result> where S: StorageRead, { @@ -234,10 +229,7 @@ where } /// Check if an accepted proposal is being executed -pub fn is_proposal_accepted( - storage: &S, - tx_data: &[u8], -) -> StorageResult +pub fn is_proposal_accepted(storage: &S, tx_data: &[u8]) -> Result where S: StorageRead, { @@ -255,7 +247,7 @@ where pub fn get_proposal_code( storage: &S, proposal_id: u64, -) -> StorageResult>> +) -> Result>> where S: StorageRead, { @@ -267,7 +259,7 @@ where pub fn get_proposal_author( storage: &S, proposal_id: u64, -) -> StorageResult> +) -> Result> where S: StorageRead, { @@ -276,7 +268,7 @@ where } /// Get governance parameters -pub fn get_parameters(storage: &S) -> StorageResult +pub fn get_parameters(storage: &S) -> Result where S: StorageRead, { @@ -318,7 +310,7 @@ where } /// Get governance "max_proposal_period" parameter -pub fn get_max_proposal_period(storage: &S) -> StorageResult +pub fn get_max_proposal_period(storage: &S) -> Result where S: StorageRead, { @@ -332,7 +324,7 @@ where pub fn get_proposal_result( storage: &S, proposal_id: u64, -) -> StorageResult> +) -> Result> where S: StorageRead, { @@ -345,7 +337,7 @@ where pub fn load_proposals( storage: &S, current_epoch: Epoch, -) -> StorageResult> +) -> Result> where S: StorageRead, { diff --git a/crates/governance/src/vp/mod.rs b/crates/governance/src/vp/mod.rs index dffe0a8626..acb7ce6ba8 100644 --- a/crates/governance/src/vp/mod.rs +++ b/crates/governance/src/vp/mod.rs @@ -7,7 +7,7 @@ use std::collections::BTreeSet; use std::marker::PhantomData; use borsh::BorshDeserialize; -use namada_core::arith::{self, checked}; +use namada_core::arith::checked; use namada_core::booleans::{BoolResultUnitExt, ResultBoolExt}; use namada_core::chain::Epoch; use namada_core::storage; @@ -15,8 +15,10 @@ use namada_state::{StateRead, StorageRead}; use namada_systems::{proof_of_stake, trans_token as token}; use namada_tx::action::{Action, GovAction, Read}; use namada_tx::BatchedTxRef; -use namada_vp::native_vp::{Ctx, CtxPreStorageRead, NativeVp, VpEvaluator}; -use namada_vp::{native_vp, VpEnv}; +use namada_vp::native_vp::{ + Ctx, CtxPreStorageRead, Error, NativeVp, Result, VpEvaluator, +}; +use namada_vp::VpEnv; use thiserror::Error; use self::utils::ReadType; @@ -26,9 +28,6 @@ use crate::storage::{is_proposal_accepted, keys as gov_storage}; use crate::utils::is_valid_validator_voting_period; use crate::ProposalVote; -/// for handling Governance NativeVP errors -pub type Result = std::result::Result; - /// The governance internal address pub const ADDRESS: Address = Address::Internal(InternalAddress::Governance); @@ -37,15 +36,17 @@ pub const MAX_PGF_ACTIONS: usize = 20; #[allow(missing_docs)] #[derive(Error, Debug)] -pub enum Error { - #[error("Governance VP error: {0}")] - NativeVpError(#[from] native_vp::Error), +pub enum VpError { #[error( "Action {0} not authorized by {1} which is not part of verifier set" )] Unauthorized(&'static str, Address), - #[error("Arithmetic {0}")] - Arith(#[from] arith::Error), +} + +impl From for Error { + fn from(value: VpError) -> Self { + Error::new(value) + } } /// Governance VP @@ -68,8 +69,6 @@ where PoS: proof_of_stake::Read>, TokenKeys: token::Keys, { - type Error = Error; - fn validate_tx( &'view self, tx_data: &BatchedTxRef<'_>, @@ -80,10 +79,7 @@ where self.is_valid_init_proposal_key_set(keys_changed)?; if !is_valid_keys_set { tracing::info!("Invalid changed governance key set"); - return Err(native_vp::Error::new_const( - "Invalid changed governance key set", - ) - .into()); + return Err(Error::new_const("Invalid changed governance key set")); }; // Is VP triggered by a governance proposal? @@ -108,10 +104,9 @@ where tracing::info!( "Rejecting tx without any action written to temp storage" ); - return Err(native_vp::Error::new_const( + return Err(Error::new_const( "Rejecting tx without any action written to temp storage", - ) - .into()); + )); } // Check action authorization @@ -123,10 +118,11 @@ where tracing::info!( "Unauthorized GovAction::InitProposal" ); - return Err(Error::Unauthorized( + return Err(VpError::Unauthorized( "InitProposal", author, - )); + ) + .into()); } } GovAction::VoteProposal { id: _, voter } => { @@ -134,10 +130,11 @@ where tracing::info!( "Unauthorized GovAction::VoteProposal" ); - return Err(Error::Unauthorized( + return Err(VpError::Unauthorized( "VoteProposal", voter, - )); + ) + .into()); } } }, @@ -187,17 +184,13 @@ where } (KeyType::PARAMETER, _) => self.is_valid_parameter(tx_data), (KeyType::BALANCE, _) => self.is_valid_balance(&native_token), - (KeyType::UNKNOWN_GOVERNANCE, _) => { - Err(native_vp::Error::new_alloc(format!( - "Unkown governance key change: {key}" - )) - .into()) - } + (KeyType::UNKNOWN_GOVERNANCE, _) => Err(Error::new_alloc( + format!("Unkown governance key change: {key}"), + )), (KeyType::UNKNOWN, _) => Ok(()), - _ => Err(native_vp::Error::new_alloc(format!( + _ => Err(Error::new_alloc(format!( "Unkown governance key change: {key}" - )) - .into()), + ))), }; result.inspect_err(|err| { @@ -284,24 +277,22 @@ where let pre_voting_end_epoch: Epoch = self.force_read(&voting_end_epoch_key, ReadType::Pre)?; - let voter = gov_storage::get_voter_address(key).ok_or( - native_vp::Error::new_alloc(format!( - "Failed to parse a voter from the vote key {key}", - )), - )?; + let voter = + gov_storage::get_voter_address(key).ok_or(Error::new_alloc( + format!("Failed to parse a voter from the vote key {key}",), + ))?; let validator = gov_storage::get_vote_delegation_address(key).ok_or( - native_vp::Error::new_alloc(format!( + Error::new_alloc(format!( "Failed to parse a validator from the vote key {key}", )), )?; // Invalid proposal id if pre_counter <= proposal_id { - let error = native_vp::Error::new_alloc(format!( + let error = Error::new_alloc(format!( "Invalid proposal ID. Expected {pre_counter} or lower, got \ {proposal_id}" - )) - .into(); + )); tracing::info!("{error}"); return Err(error); } @@ -316,10 +307,9 @@ where .force_read::(&vote_key, ReadType::Post) .is_err() { - return Err(native_vp::Error::new_alloc(format!( + return Err(Error::new_alloc(format!( "Vote key is not valid: {key}" - )) - .into()); + ))); } // No checks for the target validators, since ultimately whether the @@ -335,11 +325,10 @@ where pre_voting_end_epoch, false, ) { - let error = native_vp::Error::new_alloc(format!( + let error = Error::new_alloc(format!( "Voted outside voting window. Current epoch: {current_epoch}, \ start: {pre_voting_start_epoch}, end: {pre_voting_end_epoch}." - )) - .into(); + )); tracing::info!("{error}"); return Err(error); } @@ -354,13 +343,12 @@ where pre_voting_end_epoch, ) .ok_or_else(|| { - native_vp::Error::new_alloc(format!( + Error::new_alloc(format!( "Validator {voter} voted outside of the voting period. \ Current epoch: {current_epoch}, pre voting start epoch: \ {pre_voting_start_epoch}, pre voting end epoch: \ {pre_voting_end_epoch}." )) - .into() }); } @@ -372,11 +360,10 @@ where )?; if !is_delegator { - return Err(native_vp::Error::new_alloc(format!( + return Err(Error::new_alloc(format!( "Address {voter} is neither a validator nor a delegator at \ the beginning of epoch {pre_voting_start_epoch}." - )) - .into()); + ))); } Ok(()) @@ -391,11 +378,10 @@ where let has_pre_content: bool = self.ctx.has_key_pre(&content_key)?; if has_pre_content { - return Err(native_vp::Error::new_alloc(format!( + return Err(Error::new_alloc(format!( "Proposal with id {proposal_id} already had content written \ to storage." - )) - .into()); + ))); } let max_content_length: usize = @@ -406,11 +392,10 @@ where let is_valid = post_content_bytes.len() <= max_content_length; if !is_valid { - let error = native_vp::Error::new_alloc(format!( + let error = Error::new_alloc(format!( "Max content length {max_content_length}, got {}.", post_content_bytes.len() - )) - .into(); + )); tracing::info!("{error}"); return Err(error); } @@ -445,10 +430,9 @@ where // we allow only a single steward to be added if total_stewards_added > 1 { - Err(native_vp::Error::new_const( + Err(Error::new_const( "Only one steward is allowed to be added per proposal", - ) - .into()) + )) } else if total_stewards_added == 0 { let is_valid_total_pgf_actions = stewards.len() < MAX_PGF_ACTIONS; @@ -456,12 +440,11 @@ where return if is_valid_total_pgf_actions { Ok(()) } else { - return Err(native_vp::Error::new_alloc(format!( + return Err(Error::new_alloc(format!( "Maximum number of steward actions \ ({MAX_PGF_ACTIONS}) exceeded ({})", stewards.len() - )) - .into()); + ))); }; } else if let Some(address) = stewards_added.first() { let author_key = gov_storage::get_author_key(proposal_id); @@ -470,41 +453,35 @@ where let is_valid_author = address.eq(&author); if !is_valid_author { - return Err(native_vp::Error::new_alloc(format!( + return Err(Error::new_alloc(format!( "Author {author} does not match added steward \ address {address}", - )) - .into()); + ))); } let stewards_addresses_are_unique = stewards.len() == all_pgf_action_addresses; if !stewards_addresses_are_unique { - return Err(native_vp::Error::new_const( + return Err(Error::new_const( "Non-unique modified steward addresses", - ) - .into()); + )); } let is_valid_total_pgf_actions = all_pgf_action_addresses < MAX_PGF_ACTIONS; if !is_valid_total_pgf_actions { - return Err(native_vp::Error::new_alloc(format!( + return Err(Error::new_alloc(format!( "Maximum number of steward actions \ ({MAX_PGF_ACTIONS}) exceeded \ ({all_pgf_action_addresses})", - )) - .into()); + ))); } return Ok(()); } else { - return Err(native_vp::Error::new_const( - "Invalid PGF proposal", - ) - .into()); + return Err(Error::new_const("Invalid PGF proposal")); } } ProposalType::PGFPayment(fundings) => { @@ -540,12 +517,11 @@ where let is_total_fundings_valid = fundings.len() < MAX_PGF_ACTIONS; if !is_total_fundings_valid { - return Err(native_vp::Error::new_alloc(format!( + return Err(Error::new_alloc(format!( "Maximum number of funding actions \ ({MAX_PGF_ACTIONS}) exceeded ({})", fundings.len() - )) - .into()); + ))); } // check that they are unique by checking that the set of add @@ -558,10 +534,9 @@ where )? == fundings.len(); if !are_continuous_fundings_unique { - return Err(native_vp::Error::new_const( + return Err(Error::new_const( "Non-unique modified fundings", - ) - .into()); + )); } // can't remove and add the same target in the same proposal @@ -571,11 +546,10 @@ where == 0; are_targets_unique.ok_or_else(|| { - native_vp::Error::new_const( + Error::new_const( "One or more payment targets were added and removed \ in the same proposal", ) - .into() }) } // Default proposal condition are checked already for all other @@ -592,11 +566,10 @@ where self.force_read(&proposal_type_key, ReadType::Post)?; if !proposal_type.is_default_with_wasm() { - return Err(native_vp::Error::new_alloc(format!( + return Err(Error::new_alloc(format!( "Proposal with id {proposal_id} modified a proposal code key, \ but its type is not default.", - )) - .into()); + ))); } let code_key = gov_storage::get_proposal_code_key(proposal_id); @@ -605,11 +578,10 @@ where let has_pre_code: bool = self.ctx.has_key_pre(&code_key)?; if has_pre_code { - return Err(native_vp::Error::new_alloc(format!( + return Err(Error::new_alloc(format!( "Proposal with id {proposal_id} already had wasm code written \ to storage in its slot.", - )) - .into()); + ))); } let max_proposal_length: usize = @@ -620,13 +592,12 @@ where let wasm_code_below_max_len = post_code.len() <= max_proposal_length; if !wasm_code_below_max_len { - return Err(native_vp::Error::new_alloc(format!( + return Err(Error::new_alloc(format!( "Proposal with id {proposal_id} wrote wasm code with length \ {} to storage, but the max allowed length is \ {max_proposal_length}.", post_code.len(), - )) - .into()); + ))); } Ok(()) @@ -646,11 +617,10 @@ where let has_pre_activation_epoch = self.ctx.has_key_pre(&activation_epoch_key)?; if has_pre_activation_epoch { - return Err(native_vp::Error::new_alloc(format!( + return Err(Error::new_alloc(format!( "Proposal with id {proposal_id} already had a grace epoch \ written to storage in its slot.", - )) - .into()); + ))); } let start_epoch: Epoch = @@ -671,26 +641,20 @@ where let has_post_committing_epoch = self.ctx.has_key_post(&committing_epoch_key)?; if !has_post_committing_epoch { - let error = native_vp::Error::new_const( - "Committing proposal key is missing present", - ) - .into(); + let error = + Error::new_const("Committing proposal key is missing present"); tracing::info!("{error}"); return Err(error); } let is_valid_activation_epoch = end_epoch < activation_epoch - && checked!(activation_epoch - end_epoch) - .map_err(|e| Error::NativeVpError(e.into()))? - .0 - >= min_grace_epochs; + && checked!(activation_epoch - end_epoch)?.0 >= min_grace_epochs; if !is_valid_activation_epoch { - let error = native_vp::Error::new_alloc(format!( + let error = Error::new_alloc(format!( "Expected min duration between the end and grace epoch \ {min_grace_epochs}, but got activation = {activation_epoch}, \ end = {end_epoch}", - )) - .into(); + )); tracing::info!("{error}"); return Err(error); } @@ -698,12 +662,11 @@ where && checked!(activation_epoch.0 - start_epoch.0)? <= max_proposal_period; if !is_valid_max_proposal_period { - let error = native_vp::Error::new_alloc(format!( + let error = Error::new_alloc(format!( "Expected max duration between the start and grace epoch \ {max_proposal_period}, but got activation = \ {activation_epoch}, start = {start_epoch}", - )) - .into(); + )); tracing::info!("{error}"); return Err(error); } @@ -725,24 +688,22 @@ where let has_pre_start_epoch = self.ctx.has_key_pre(&start_epoch_key)?; if has_pre_start_epoch { - let error = native_vp::Error::new_alloc(format!( + let error = Error::new_alloc(format!( "Failed to validate start epoch. Proposal with id \ {proposal_id} already had a pre_start epoch written to \ storage in its slot.", - )) - .into(); + )); tracing::info!("{error}"); return Err(error); } let has_pre_end_epoch = self.ctx.has_key_pre(&end_epoch_key)?; if has_pre_end_epoch { - let error = native_vp::Error::new_alloc(format!( + let error = Error::new_alloc(format!( "Failed to validate start epoch. Proposal with id \ {proposal_id} already had a pre_end epoch written to storage \ in its slot.", - )) - .into(); + )); tracing::info!("{error}"); return Err(error); } @@ -755,45 +716,39 @@ where self.force_read(&min_period_parameter_key, ReadType::Pre)?; if end_epoch <= start_epoch { - return Err(native_vp::Error::new_alloc(format!( + return Err(Error::new_alloc(format!( "Ending epoch {end_epoch} cannot be lower than or equal to \ the starting epoch {start_epoch} of the proposal with id \ {proposal_id}.", - )) - .into()); + ))); } if start_epoch <= current_epoch { - return Err(native_vp::Error::new_alloc(format!( + return Err(Error::new_alloc(format!( "Starting epoch {start_epoch} cannot be lower than or equal \ to the current epoch {current_epoch} of the proposal with id \ {proposal_id}.", - )) - .into()); + ))); } let latency: u64 = self.force_read(&max_latency_paramater_key, ReadType::Pre)?; if checked!(start_epoch.0 - current_epoch.0)? > latency { - return Err(native_vp::Error::new_alloc(format!( + return Err(Error::new_alloc(format!( "Starting epoch {start_epoch} of the proposal with id \ {proposal_id} is too far in the future (more than {latency} \ epochs away from the current epoch {current_epoch}).", - )) - .into()); + ))); } - let proposal_meets_min_period = checked!(end_epoch - start_epoch) - .map_err(|e| Error::NativeVpError(e.into()))? - .0 - >= min_period; + let proposal_meets_min_period = + checked!(end_epoch - start_epoch)?.0 >= min_period; if !proposal_meets_min_period { - return Err(native_vp::Error::new_alloc(format!( + return Err(Error::new_alloc(format!( "Proposal with id {proposal_id} does not meet the required \ minimum period of {min_period} epochs. Starting epoch is \ {start_epoch}, and ending epoch is {end_epoch}.", - )) - .into()); + ))); } Ok(()) @@ -813,22 +768,20 @@ where let has_pre_start_epoch = self.ctx.has_key_pre(&start_epoch_key)?; if has_pre_start_epoch { - let error = native_vp::Error::new_alloc(format!( + let error = Error::new_alloc(format!( "Failed to validate end epoch. Proposal with id {proposal_id} \ already had a pre_start epoch written to storage in its slot.", - )) - .into(); + )); tracing::info!("{error}"); return Err(error); } let has_pre_end_epoch = self.ctx.has_key_pre(&end_epoch_key)?; if has_pre_end_epoch { - let error = native_vp::Error::new_alloc(format!( + let error = Error::new_alloc(format!( "Failed to validate end epoch. Proposal with id {proposal_id} \ already had a pre_end epoch written to storage in its slot.", - )) - .into(); + )); tracing::info!("{error}"); return Err(error); } @@ -843,28 +796,25 @@ where self.force_read(&max_period_parameter_key, ReadType::Pre)?; if end_epoch <= start_epoch || start_epoch <= current_epoch { - let error = native_vp::Error::new_alloc(format!( + let error = Error::new_alloc(format!( "Proposal with id {proposal_id}'s end epoch ({end_epoch}) \ must be after the start epoch ({start_epoch}), and the start \ epoch must be after the current epoch ({current_epoch})." - )) - .into(); + )); tracing::info!("{error}"); return Err(error); } - let diff = checked!(end_epoch - start_epoch) - .map_err(|e| Error::NativeVpError(e.into()))?; + let diff = checked!(end_epoch - start_epoch)?; let valid_voting_period = diff.0 >= min_period && diff.0 <= max_period; valid_voting_period.ok_or_else(|| { - native_vp::Error::new_alloc(format!( + Error::new_alloc(format!( "Proposal with id {proposal_id} must have a voting period \ with a minimum of {min_period} epochs, and a maximum of \ {max_period} epochs. The starting epoch is {start_epoch}, \ and the ending epoch is {end_epoch}.", )) - .into() }) } @@ -894,21 +844,20 @@ where let is_post_funds_greater_than_minimum = post_funds >= min_funds_parameter; is_post_funds_greater_than_minimum.ok_or_else(|| { - Error::NativeVpError(native_vp::Error::new_alloc(format!( + Error::new_alloc(format!( "Funds must be greater than the minimum funds of {}", min_funds_parameter.native_denominated() - ))) + )) })?; let post_balance_is_same = post_balance == post_funds; post_balance_is_same.ok_or_else(|| { - native_vp::Error::new_alloc(format!( + Error::new_alloc(format!( "Funds and the balance of the governance account have \ diverged: funds {} != balance {}", post_funds.native_denominated(), post_balance.native_denominated() )) - .into() }) }, // there was some non-zero balance in the governance account @@ -916,23 +865,20 @@ where let is_post_funds_greater_than_minimum = post_funds >= min_funds_parameter; is_post_funds_greater_than_minimum.ok_or_else(|| { - Error::NativeVpError(native_vp::Error::new_alloc(format!( + Error::new_alloc(format!( "Funds {} must be greater than the minimum funds of {}", post_funds.native_denominated(), min_funds_parameter.native_denominated() - ))) + )) })?; let is_valid_funds = post_balance >= pre_balance - && checked!(post_balance - pre_balance) - .map_err(|e| Error::NativeVpError(e.into()))? - == post_funds; + && checked!(post_balance - pre_balance)? == post_funds; is_valid_funds.ok_or_else(|| { - native_vp::Error::new_alloc(format!( + Error::new_alloc(format!( "Invalid funds {} have been written to storage", post_funds.native_denominated() )) - .into() }) }, ) @@ -954,19 +900,16 @@ where let balance_is_valid = if let Some(pre_balance) = pre_balance { post_balance > pre_balance - && checked!(post_balance - pre_balance) - .map_err(|e| Error::NativeVpError(e.into()))? - >= min_funds_parameter + && checked!(post_balance - pre_balance)? >= min_funds_parameter } else { post_balance >= min_funds_parameter }; balance_is_valid.ok_or_else(|| { - native_vp::Error::new_alloc(format!( + Error::new_alloc(format!( "Invalid balance {} has been written to storage", post_balance.native_denominated() )) - .into() }) } @@ -980,30 +923,27 @@ where let has_pre_author = self.ctx.has_key_pre(&author_key)?; if has_pre_author { - return Err(native_vp::Error::new_alloc(format!( + return Err(Error::new_alloc(format!( "Proposal with id {proposal_id} already had an author written \ to storage" - )) - .into()); + ))); } let author = self.force_read(&author_key, ReadType::Post)?; - namada_account::exists(&self.ctx.pre(), &author) - .map_err(Error::NativeVpError) - .true_or_else(|| { - native_vp::Error::new_alloc(format!( + namada_account::exists(&self.ctx.pre(), &author).true_or_else( + || { + Error::new_alloc(format!( "No author account {author} could be found for the \ proposal with id {proposal_id}" )) - .into() - })?; + }, + )?; verifiers.contains(&author).ok_or_else(|| { - native_vp::Error::new_alloc(format!( + Error::new_alloc(format!( "The VP of the proposal with id {proposal_id}'s author \ {author} should have been triggered" )) - .into() }) } @@ -1018,11 +958,10 @@ where let valid_counter = expected_counter == post_counter; valid_counter.ok_or_else(|| { - native_vp::Error::new_alloc(format!( + Error::new_alloc(format!( "Invalid proposal counter. Expected {expected_counter}, but \ got {post_counter} instead." )) - .into() }) } @@ -1039,11 +978,10 @@ where let pre_counter_is_lower = pre_counter < post_counter; pre_counter_is_lower.ok_or_else(|| { - native_vp::Error::new_alloc(format!( + Error::new_alloc(format!( "The value of the previous counter {pre_counter} must be \ lower than the value of the new counter {post_counter}." )) - .into() }) } @@ -1055,22 +993,19 @@ where let BatchedTxRef { tx, cmt } = batched_tx; tx.data(cmt).map_or_else( || { - Err(native_vp::Error::new_const( + Err(Error::new_const( "Governance parameter changes require tx data to be \ present", - ) - .into()) + )) }, |data| { - is_proposal_accepted(&self.ctx.pre(), data.as_ref()) - .map_err(Error::NativeVpError)? + is_proposal_accepted(&self.ctx.pre(), data.as_ref())? .ok_or_else(|| { - native_vp::Error::new_const( + Error::new_const( "Governance parameter changes can only be \ performed by a governance proposal that has been \ accepted", ) - .into() }) }, ) @@ -1109,10 +1044,9 @@ where if let Some(data) = res { Ok(data) } else { - Err(native_vp::Error::new_alloc(format!( + Err(Error::new_alloc(format!( "Proposal field should not be empty: {key}" - )) - .into()) + ))) } } diff --git a/crates/governance/src/vp/pgf.rs b/crates/governance/src/vp/pgf.rs index 0b473ef96a..33f1083209 100644 --- a/crates/governance/src/vp/pgf.rs +++ b/crates/governance/src/vp/pgf.rs @@ -7,30 +7,31 @@ use namada_core::storage::Key; use namada_state::StateRead; use namada_tx::action::{Action, PgfAction, Read}; use namada_tx::BatchedTxRef; -use namada_vp::native_vp::{self, Ctx, NativeVp, VpEvaluator}; +use namada_vp::native_vp::{self, Ctx, Error, NativeVp, Result, VpEvaluator}; use thiserror::Error; use crate::address::{Address, InternalAddress}; use crate::pgf::storage::keys as pgf_storage; use crate::{is_proposal_accepted, pgf}; -/// for handling Pgf NativeVP errors -pub type Result = std::result::Result; - /// The PGF internal address pub const ADDRESS: Address = Address::Internal(InternalAddress::Pgf); #[allow(missing_docs)] #[derive(Error, Debug)] -pub enum Error { - #[error("PGF VP error: Native VP error: {0}")] - NativeVpError(#[from] native_vp::Error), +pub enum VpError { #[error( "Action {0} not authorized by {1} which is not part of verifier set" )] Unauthorized(&'static str, Address), } +impl From for Error { + fn from(value: VpError) -> Self { + Error::new(value) + } +} + /// Pgf VP pub struct PgfVp<'ctx, S, CA, EVAL> where @@ -46,8 +47,6 @@ where CA: 'static + Clone, EVAL: 'static + VpEvaluator<'ctx, S, CA, EVAL>, { - type Error = Error; - fn validate_tx( &'view self, batched_tx: &BatchedTxRef<'_>, @@ -80,8 +79,7 @@ where ); return Err(native_vp::Error::new_const( "Rejecting tx without any action written to temp storage", - ) - .into()); + )); } // Check action authorization @@ -94,10 +92,11 @@ where "Unauthorized \ PgfAction::UpdateStewardCommission" ); - return Err(Error::Unauthorized( + return Err(VpError::Unauthorized( "UpdateStewardCommission", address, - )); + ) + .into()); } } PgfAction::ResignSteward(address) => { @@ -105,10 +104,11 @@ where tracing::info!( "Unauthorized PgfAction::ResignSteward" ); - return Err(Error::Unauthorized( + return Err(VpError::Unauthorized( "ResignSteward", address, - )); + ) + .into()); } } }, @@ -142,8 +142,7 @@ where return Err(native_vp::Error::new_const( "Stewards can only be added via governance \ proposals", - ) - .into()); + )); } pgf::storage::get_steward( @@ -160,7 +159,6 @@ where {steward_address} should have been \ triggered to check their signature" )) - .into() }, ) }, @@ -175,15 +173,13 @@ where {steward_address} should have been \ triggered to check their signature" ), - ) - .into()); + )); } steward.is_valid_reward_distribution().ok_or_else( || { native_vp::Error::new_const( "Steward commissions are invalid", ) - .into() }, ) }, @@ -191,15 +187,13 @@ where } KeyType::Fundings => Err(native_vp::Error::new_alloc(format!( "Cannot update PGF fundings key: {key}" - )) - .into()), + ))), KeyType::PgfInflationRate | KeyType::StewardInflationRate => { self.is_valid_parameter_change(batched_tx) } KeyType::UnknownPgf => Err(native_vp::Error::new_alloc( format!("Unknown PGF state update on key: {key}"), - ) - .into()), + )), KeyType::Unknown => Ok(()), } }) @@ -226,18 +220,15 @@ where || { Err(native_vp::Error::new_const( "PGF parameter changes require tx data to be present", - ) - .into()) + )) }, |data| { - is_proposal_accepted(&self.ctx.pre(), data.as_ref()) - .map_err(Error::NativeVpError)? + is_proposal_accepted(&self.ctx.pre(), data.as_ref())? .ok_or_else(|| { native_vp::Error::new_const( "PGF parameter changes can only be performed by a \ governance proposal that has been accepted", ) - .into() }) }, ) diff --git a/crates/ibc/src/actions.rs b/crates/ibc/src/actions.rs index f6619eb34a..8c793017a0 100644 --- a/crates/ibc/src/actions.rs +++ b/crates/ibc/src/actions.rs @@ -18,8 +18,8 @@ use namada_core::tendermint::Time as TmTime; use namada_core::token::Amount; use namada_events::EmitEvents; use namada_state::{ - BlockHeader, BlockHeight, Epoch, Epochs, Key, ResultExt, State, - StorageError, StorageRead, StorageResult, StorageWrite, TxIndex, + BlockHeader, BlockHeight, Epoch, Epochs, Key, Result, ResultExt, State, + StorageRead, StorageWrite, TxIndex, }; use namada_systems::{parameters, trans_token}; @@ -43,56 +43,56 @@ where { type PrefixIter<'iter> = ::PrefixIter<'iter> where Self: 'iter; - fn read_bytes(&self, key: &Key) -> StorageResult>> { + fn read_bytes(&self, key: &Key) -> Result>> { self.state.read_bytes(key) } - fn has_key(&self, key: &Key) -> StorageResult { + fn has_key(&self, key: &Key) -> Result { self.state.has_key(key) } fn iter_prefix<'iter>( &'iter self, prefix: &Key, - ) -> StorageResult> { + ) -> Result> { self.state.iter_prefix(prefix) } fn iter_next<'iter>( &'iter self, iter: &mut Self::PrefixIter<'iter>, - ) -> StorageResult)>> { + ) -> Result)>> { self.state.iter_next(iter) } - fn get_chain_id(&self) -> StorageResult { + fn get_chain_id(&self) -> Result { self.state.get_chain_id() } - fn get_block_height(&self) -> StorageResult { + fn get_block_height(&self) -> Result { self.state.get_block_height() } fn get_block_header( &self, height: BlockHeight, - ) -> StorageResult> { + ) -> Result> { StorageRead::get_block_header(self.state, height) } - fn get_block_epoch(&self) -> StorageResult { + fn get_block_epoch(&self) -> Result { self.state.get_block_epoch() } - fn get_pred_epochs(&self) -> StorageResult { + fn get_pred_epochs(&self) -> Result { self.state.get_pred_epochs() } - fn get_tx_index(&self) -> StorageResult { + fn get_tx_index(&self) -> Result { self.state.get_tx_index() } - fn get_native_token(&self) -> StorageResult
{ + fn get_native_token(&self) -> Result
{ self.state.get_native_token() } } @@ -101,15 +101,11 @@ impl StorageWrite for IbcProtocolContext<'_, S, Token> where S: State, { - fn write_bytes( - &mut self, - key: &Key, - val: impl AsRef<[u8]>, - ) -> StorageResult<()> { + fn write_bytes(&mut self, key: &Key, val: impl AsRef<[u8]>) -> Result<()> { self.state.write_bytes(key, val) } - fn delete(&mut self, key: &Key) -> StorageResult<()> { + fn delete(&mut self, key: &Key) -> Result<()> { self.state.delete(key) } } @@ -132,7 +128,7 @@ where self } - fn emit_ibc_event(&mut self, event: IbcEvent) -> Result<(), StorageError> { + fn emit_ibc_event(&mut self, event: IbcEvent) -> Result<()> { // There's no gas cost for protocol, we can ignore result self.state.write_log_mut().emit_event(event); Ok(()) @@ -145,7 +141,7 @@ where dest: &Address, token: &Address, amount: Amount, - ) -> Result<(), StorageError> { + ) -> Result<()> { Token::transfer(self.state, token, src, dest, amount) } @@ -155,7 +151,7 @@ where target: &Address, token: &Address, amount: Amount, - ) -> Result<(), StorageError> { + ) -> Result<()> { ibc_storage::mint_tokens_and_emit_event::<_, Token>( self.state, target, token, amount, ) @@ -167,14 +163,11 @@ where target: &Address, token: &Address, amount: Amount, - ) -> Result<(), StorageError> { + ) -> Result<()> { ibc_storage::burn_tokens::<_, Token>(self.state, target, token, amount) } - fn insert_verifier( - &mut self, - _verifier: &Address, - ) -> Result<(), StorageError> { + fn insert_verifier(&mut self, _verifier: &Address) -> Result<()> { Ok(()) } @@ -199,7 +192,7 @@ pub fn transfer_over_ibc<'a, S, Params, Token, Transfer>( token: &Address, source: &Address, target: &PGFIbcTarget, -) -> StorageResult<()> +) -> Result<()> where S: 'a + State + EmitEvents, Params: parameters::Read< diff --git a/crates/ibc/src/context/common.rs b/crates/ibc/src/context/common.rs index 8d04fb6f9c..aeba84a332 100644 --- a/crates/ibc/src/context/common.rs +++ b/crates/ibc/src/context/common.rs @@ -24,7 +24,7 @@ use namada_core::chain::BlockHeight; use namada_core::storage::Key; use namada_core::tendermint::Time as TmTime; use namada_core::token::Amount; -use namada_state::{StorageError, StorageRead, StorageWrite}; +use namada_state::{Error, StorageRead, StorageWrite}; use namada_systems::trans_token; use prost::Message; @@ -760,11 +760,11 @@ pub trait IbcCommonContext: IbcStorageContext { pub fn read_sequence( storage: &S, key: &Key, -) -> std::result::Result { +) -> std::result::Result { match storage.read_bytes(key)? { Some(value) => { let value: [u8; 8] = value.try_into().map_err(|_| { - StorageError::new_alloc(format!( + Error::new_alloc(format!( "The sequence value wasn't u64: Key {key}", )) })?; diff --git a/crates/ibc/src/context/storage.rs b/crates/ibc/src/context/storage.rs index f485ef77cf..2339f17a96 100644 --- a/crates/ibc/src/context/storage.rs +++ b/crates/ibc/src/context/storage.rs @@ -3,7 +3,7 @@ pub use ics23::ProofSpec; use namada_core::address::Address; use namada_core::token::Amount; -use namada_state::{StorageRead, StorageResult, StorageWrite}; +use namada_state::{Result, StorageRead, StorageWrite}; use crate::event::IbcEvent; @@ -19,7 +19,7 @@ pub trait IbcStorageContext { fn storage_mut(&mut self) -> &mut Self::Storage; /// Emit an IBC event - fn emit_ibc_event(&mut self, event: IbcEvent) -> StorageResult<()>; + fn emit_ibc_event(&mut self, event: IbcEvent) -> Result<()>; /// Transfer token fn transfer_token( @@ -28,7 +28,7 @@ pub trait IbcStorageContext { dest: &Address, token: &Address, amount: Amount, - ) -> StorageResult<()>; + ) -> Result<()>; /// Mint token fn mint_token( @@ -36,7 +36,7 @@ pub trait IbcStorageContext { target: &Address, token: &Address, amount: Amount, - ) -> StorageResult<()>; + ) -> Result<()>; /// Burn token fn burn_token( @@ -44,10 +44,10 @@ pub trait IbcStorageContext { target: &Address, token: &Address, amount: Amount, - ) -> StorageResult<()>; + ) -> Result<()>; /// Insert the verifier - fn insert_verifier(&mut self, verifier: &Address) -> StorageResult<()>; + fn insert_verifier(&mut self, verifier: &Address) -> Result<()>; /// Logging fn log_string(&self, message: String); diff --git a/crates/ibc/src/lib.rs b/crates/ibc/src/lib.rs index 75780eb13f..b2b99a7cf6 100644 --- a/crates/ibc/src/lib.rs +++ b/crates/ibc/src/lib.rs @@ -90,8 +90,8 @@ use namada_core::masp_primitives::transaction::components::ValueSum; use namada_core::token::Amount; use namada_events::EmitEvents; use namada_state::{ - DBIter, Key, ResultExt, State, StorageError, StorageHasher, StorageRead, - StorageResult, StorageWrite, WlState, DB, + DBIter, Error as StorageError, Key, Result as StorageResult, ResultExt, + State, StorageHasher, StorageRead, StorageWrite, WlState, DB, }; use namada_systems::ibc::ChangedBalances; use namada_systems::trans_token; @@ -142,6 +142,12 @@ pub enum Error { Verifier(StorageError), } +impl From for StorageError { + fn from(value: Error) -> Self { + StorageError::new(value) + } +} + struct IbcTransferInfo { src_port_id: PortId, src_channel_id: ChannelId, diff --git a/crates/ibc/src/parameters.rs b/crates/ibc/src/parameters.rs index f09f964aba..811516eddd 100644 --- a/crates/ibc/src/parameters.rs +++ b/crates/ibc/src/parameters.rs @@ -2,7 +2,7 @@ use namada_core::borsh::{BorshDeserialize, BorshSerialize}; use namada_core::token::Amount; -use namada_state::{StorageResult, StorageWrite}; +use namada_state::{Result, StorageWrite}; #[derive(Clone, Debug, BorshSerialize, BorshDeserialize)] /// Governance parameter structure @@ -24,7 +24,7 @@ impl Default for IbcParameters { impl IbcParameters { /// Initialize IBC parameters into storage - pub fn init_storage(&self, storage: &mut S) -> StorageResult<()> + pub fn init_storage(&self, storage: &mut S) -> Result<()> where S: StorageWrite, { diff --git a/crates/ibc/src/storage.rs b/crates/ibc/src/storage.rs index 6909ae99ec..38b0d7e905 100644 --- a/crates/ibc/src/storage.rs +++ b/crates/ibc/src/storage.rs @@ -17,9 +17,9 @@ use namada_core::address::{Address, InternalAddress}; use namada_core::storage::{DbKeySeg, Key, KeySeg}; use namada_core::token::Amount; use namada_events::EmitEvents; -use namada_state::{StorageRead, StorageResult, StorageWrite}; +pub use namada_state::{Error, Result}; +use namada_state::{StorageRead, StorageWrite}; use namada_systems::trans_token; -use thiserror::Error; use crate::event::TOKEN_EVENT_DESCRIPTOR; use crate::parameters::IbcParameters; @@ -39,20 +39,6 @@ const THROUGHPUT_LIMIT: &str = "throughput_limit"; const DEPOSIT: &str = "deposit"; const WITHDRAW: &str = "withdraw"; -#[allow(missing_docs)] -#[derive(Error, Debug)] -pub enum Error { - #[error("Storage key error: {0}")] - StorageKey(namada_core::storage::Error), - #[error("Invalid Key: {0}")] - InvalidKey(String), - #[error("Invalid IBC trace: {0}")] - InvalidIbcTrace(String), -} - -/// IBC storage functions result -pub type Result = std::result::Result; - /// Mint IBC tokens. This function doesn't emit event (see /// `mint_tokens_and_emit_event` below) pub fn mint_tokens( @@ -60,7 +46,7 @@ pub fn mint_tokens( target: &Address, token: &Address, amount: Amount, -) -> StorageResult<()> +) -> Result<()> where S: StorageRead + StorageWrite, Token: trans_token::Keys + trans_token::Read + trans_token::Write, @@ -81,7 +67,7 @@ pub fn mint_tokens_and_emit_event( target: &Address, token: &Address, amount: Amount, -) -> StorageResult<()> +) -> Result<()> where S: StorageRead + StorageWrite + EmitEvents, Token: trans_token::Keys @@ -108,7 +94,7 @@ pub fn burn_tokens( target: &Address, token: &Address, amount: Amount, -) -> StorageResult<()> +) -> Result<()> where S: StorageRead + StorageWrite + EmitEvents, Token: @@ -129,7 +115,7 @@ where /// Returns a key of the IBC-related data pub fn ibc_key(path: impl AsRef) -> Result { - let path = Key::parse(path).map_err(Error::StorageKey)?; + let path = Key::parse(path)?; let addr = Address::Internal(InternalAddress::Ibc); let key = Key::from(addr.to_db_key()); Ok(key.join(&path)) @@ -343,9 +329,9 @@ pub fn client_id(key: &Key) -> Result { && prefix == "clients" => { ClientId::from_str(&client_id.raw()) - .map_err(|e| Error::InvalidKey(e.to_string())) + .map_err(|e| Error::new_alloc(e.to_string())) } - _ => Err(Error::InvalidKey(format!( + _ => Err(Error::new_alloc(format!( "The key doesn't have a client ID: {}", key ))), @@ -367,9 +353,9 @@ pub fn consensus_height(key: &Key) -> Result { && module == "consensusStates" => { Height::from_str(height) - .map_err(|e| Error::InvalidKey(e.to_string())) + .map_err(|e| Error::new_alloc(e.to_string())) } - _ => Err(Error::InvalidKey(format!( + _ => Err(Error::new_alloc(format!( "The key doesn't have a consensus height: {}", key ))), @@ -388,9 +374,9 @@ pub fn connection_id(key: &Key) -> Result { && prefix == "connections" => { ConnectionId::from_str(&conn_id.raw()) - .map_err(|e| Error::InvalidKey(e.to_string())) + .map_err(|e| Error::new_alloc(e.to_string())) } - _ => Err(Error::InvalidKey(format!( + _ => Err(Error::new_alloc(format!( "The key doesn't have a connection ID: {}", key ))), @@ -417,12 +403,12 @@ pub fn port_channel_id(key: &Key) -> Result<(PortId, ChannelId)> { && module1 == "channels" => { let port_id = PortId::from_str(&port.raw()) - .map_err(|e| Error::InvalidKey(e.to_string()))?; + .map_err(|e| Error::new_alloc(e.to_string()))?; let channel_id = ChannelId::from_str(&channel.raw()) - .map_err(|e| Error::InvalidKey(e.to_string()))?; + .map_err(|e| Error::new_alloc(e.to_string()))?; Ok((port_id, channel_id)) } - _ => Err(Error::InvalidKey(format!( + _ => Err(Error::new_alloc(format!( "The key doesn't have port ID and channel ID: Key {}", key ))), @@ -454,14 +440,14 @@ pub fn port_channel_sequence_id( && module2 == "sequences" => { let port_id = PortId::from_str(&port_id.raw()) - .map_err(|e| Error::InvalidKey(e.to_string()))?; + .map_err(|e| Error::new_alloc(e.to_string()))?; let channel_id = ChannelId::from_str(&channel_id.raw()) - .map_err(|e| Error::InvalidKey(e.to_string()))?; + .map_err(|e| Error::new_alloc(e.to_string()))?; let seq = Sequence::from_str(&seq_index.raw()) - .map_err(|e| Error::InvalidKey(e.to_string()))?; + .map_err(|e| Error::new_alloc(e.to_string()))?; Ok((port_id, channel_id, seq)) } - _ => Err(Error::InvalidKey(format!( + _ => Err(Error::new_alloc(format!( "The key doesn't have port ID, channel ID and sequence number: \ Key {}", key, @@ -481,9 +467,9 @@ pub fn port_id(key: &Key) -> Result { && prefix == "ports" => { PortId::from_str(&port_id.raw()) - .map_err(|e| Error::InvalidKey(e.to_string())) + .map_err(|e| Error::new_alloc(e.to_string())) } - _ => Err(Error::InvalidKey(format!( + _ => Err(Error::new_alloc(format!( "The key doesn't have a port ID: Key {}", key ))), @@ -591,7 +577,7 @@ pub fn mint_limit_key(token: &Address) -> Key { pub fn get_limits( storage: &S, token: &Address, -) -> StorageResult<(Amount, Amount)> { +) -> Result<(Amount, Amount)> { let mint_limit_key = mint_limit_key(token); let mint_limit: Option = storage.read(&mint_limit_key)?; let throughput_limit_key = throughput_limit_key(token); diff --git a/crates/ibc/src/trace.rs b/crates/ibc/src/trace.rs index bceff94fcb..60b770a3ac 100644 --- a/crates/ibc/src/trace.rs +++ b/crates/ibc/src/trace.rs @@ -11,7 +11,7 @@ use namada_core::address::{Address, InternalAddress, HASH_LEN, SHA_HASH_LEN}; use namada_core::ibc::IbcTokenHash; use sha2::{Digest, Sha256}; -use crate::storage::Error; +use crate::storage::{Error, Result}; /// Hash the denom #[inline] @@ -57,23 +57,20 @@ pub fn ibc_trace_for_nft( } /// Convert the given IBC trace to [`Address`] -pub fn convert_to_address( - ibc_trace: impl AsRef, -) -> Result { +pub fn convert_to_address(ibc_trace: impl AsRef) -> Result
{ if ibc_trace.as_ref().contains('/') { // validation if is_ibc_denom(&ibc_trace).is_none() && is_nft_trace(&ibc_trace).is_none() { - return Err(Error::InvalidIbcTrace(format!( + return Err(Error::new_alloc(format!( "This is not IBC denom and NFT trace: {}", ibc_trace.as_ref() ))); } Ok(ibc_token(ibc_trace.as_ref())) } else { - Address::decode(ibc_trace.as_ref()) - .map_err(|e| Error::InvalidIbcTrace(e.to_string())) + Ok(Address::decode(ibc_trace.as_ref())?) } } diff --git a/crates/ibc/src/vp/context.rs b/crates/ibc/src/vp/context.rs index e196b3ee9b..40f319b0a2 100644 --- a/crates/ibc/src/vp/context.rs +++ b/crates/ibc/src/vp/context.rs @@ -11,10 +11,8 @@ use namada_core::storage::{Key, TxIndex}; use namada_events::Event; use namada_gas::MEMORY_ACCESS_GAS_PER_BYTE; use namada_state::write_log::StorageModification; -pub use namada_state::StorageResult as Result; -use namada_state::{ - PrefixIter, StateRead, StorageError, StorageRead, StorageWrite, -}; +pub use namada_state::Result; +use namada_state::{Error, PrefixIter, StateRead, StorageRead, StorageWrite}; use namada_systems::trans_token::{self as token, Amount}; use namada_vp::native_vp::{CtxPreStorageRead, VpEvaluator}; use namada_vp::VpEnv; @@ -111,9 +109,9 @@ Self: 'iter; .charge_gas(checked!(len * MEMORY_ACCESS_GAS_PER_BYTE)?)?; Ok(None) } - Some(StorageModification::InitAccount { .. }) => Err( - StorageError::new_const("InitAccount shouldn't be inserted"), - ), + Some(StorageModification::InitAccount { .. }) => { + Err(Error::new_const("InitAccount shouldn't be inserted")) + } None => { let len = key.len() as u64; self.ctx diff --git a/crates/ibc/src/vp/mod.rs b/crates/ibc/src/vp/mod.rs index 2bead88e02..07817f0035 100644 --- a/crates/ibc/src/vp/mod.rs +++ b/crates/ibc/src/vp/mod.rs @@ -14,18 +14,16 @@ use context::{ PseudoExecutionContext, PseudoExecutionStorage, VpValidationContext, }; use namada_core::address::Address; -use namada_core::arith::{self, checked}; +use namada_core::arith::checked; use namada_core::collections::HashSet; use namada_core::storage::Key; use namada_gas::{IBC_ACTION_EXECUTE_GAS, IBC_ACTION_VALIDATE_GAS}; use namada_state::write_log::StorageModification; -use namada_state::StateRead; +use namada_state::{Error, Result, StateRead}; use namada_systems::trans_token::{self as token, Amount}; use namada_systems::{governance, parameters, proof_of_stake}; use namada_tx::BatchedTxRef; -use namada_vp::native_vp::{ - self, Ctx, CtxPreStorageRead, NativeVp, VpEvaluator, -}; +use namada_vp::native_vp::{Ctx, CtxPreStorageRead, NativeVp, VpEvaluator}; use namada_vp::VpEnv; use thiserror::Error; @@ -44,29 +42,29 @@ use crate::{ #[allow(missing_docs)] #[derive(Error, Debug)] -pub enum Error { - #[error("IBC VP error: Native VP error: {0}")] - NativeVpError(#[from] native_vp::Error), +pub enum VpError { #[error("IBC VP error: Decoding error: {0}")] Decoding(#[from] std::io::Error), #[error("IBC VP error: governance proposal change is invalid")] InvalidGovernanceChange, #[error("IBC VP error: IBC message is required as transaction data")] NoTxData, - #[error("IBC VP error: IBC action error: {0}")] - IbcAction(#[from] ActionError), #[error("IBC VP error: State change error: {0}")] StateChange(String), #[error("IBC VP error: IBC event error: {0}")] IbcEvent(String), #[error("IBC rate limit: {0}")] RateLimit(String), - #[error("Arithmetic {0}")] - Arith(#[from] arith::Error), } /// IBC functions result -pub type VpResult = std::result::Result; +pub type VpResult = std::result::Result; + +impl From for Error { + fn from(value: VpError) -> Self { + Error::new(value) + } +} /// IBC VP pub struct Ibc< @@ -141,14 +139,12 @@ where PoS: proof_of_stake::Read>, Transfer: BorshDeserialize, { - type Error = Error; - fn validate_tx( &'view self, batched_tx: &BatchedTxRef<'_>, keys_changed: &BTreeSet, _verifiers: &BTreeSet
, - ) -> VpResult<()> { + ) -> Result<()> { // Is VP triggered by a governance proposal? if Gov::is_proposal_accepted( &self.ctx.pre(), @@ -163,8 +159,10 @@ where return Ok(()); } - let tx_data = - batched_tx.tx.data(batched_tx.cmt).ok_or(Error::NoTxData)?; + let tx_data = batched_tx + .tx + .data(batched_tx.cmt) + .ok_or(VpError::NoTxData)?; // Pseudo execution and compare them self.validate_state(&tx_data, keys_changed)?; @@ -236,7 +234,7 @@ where &'view self, tx_data: &[u8], keys_changed: &BTreeSet, - ) -> VpResult<()> { + ) -> Result<()> { let exec_ctx = PseudoExecutionContext::<'_, '_, S, CA, EVAL, Token>::new( self.ctx.pre(), @@ -255,26 +253,22 @@ where let module = NftTransferModule::<_, Token>::new(ctx.clone()); actions.add_transfer_module(module); // Charge gas for the expensive execution - self.ctx - .charge_gas(IBC_ACTION_EXECUTE_GAS) - .map_err(Error::NativeVpError)?; + self.ctx.charge_gas(IBC_ACTION_EXECUTE_GAS)?; actions.execute::(tx_data)?; let changed_ibc_keys: HashSet<&Key> = keys_changed.iter().filter(|k| is_ibc_key(k)).collect(); if changed_ibc_keys.len() != ctx.borrow().get_changed_keys().len() { - return Err(Error::StateChange(format!( + return Err(VpError::StateChange(format!( "The changed keys mismatched: Actual {:?}, Expected {:?}", changed_ibc_keys, ctx.borrow().get_changed_keys(), - ))); + )) + .into()); } for key in changed_ibc_keys { - let actual = self - .ctx - .read_bytes_post(key) - .map_err(Error::NativeVpError)?; + let actual = self.ctx.read_bytes_post(key)?; match_value(key, actual, ctx.borrow().get_changed_value(key))?; } @@ -288,16 +282,17 @@ where let ctx_borrow = ctx.borrow(); let expected: BTreeSet<_> = ctx_borrow.storage.event.iter().collect(); if actual != expected { - return Err(Error::IbcEvent(format!( + return Err(VpError::IbcEvent(format!( "The IBC event is invalid: Actual {actual:?}, Expected \ {expected:?}", - ))); + )) + .into()); } Ok(()) } - fn validate_with_msg(&'view self, tx_data: &[u8]) -> VpResult<()> { + fn validate_with_msg(&'view self, tx_data: &[u8]) -> Result<()> { let validation_ctx = VpValidationContext::new(self.ctx.pre()); let ctx = Rc::new(RefCell::new(validation_ctx)); // Use an empty verifiers set placeholder for validation, this is only @@ -313,25 +308,19 @@ where let module = NftTransferModule::<_, Token>::new(ctx); actions.add_transfer_module(module); // Charge gas for the expensive validation - self.ctx - .charge_gas(IBC_ACTION_VALIDATE_GAS) - .map_err(Error::NativeVpError)?; - actions - .validate::(tx_data) - .map_err(Error::IbcAction) + self.ctx.charge_gas(IBC_ACTION_VALIDATE_GAS)?; + Ok(actions.validate::(tx_data)?) } /// Retrieve the validation params - pub fn validation_params(&'view self) -> VpResult { + pub fn validation_params(&'view self) -> Result { use std::str::FromStr; - let chain_id = self.ctx.get_chain_id().map_err(Error::NativeVpError)?; + let chain_id = self.ctx.get_chain_id()?; let proof_specs = namada_state::ics23_specs::ibc_proof_specs::<::H>(); - let pipeline_len = - PoS::pipeline_len(&self.ctx.pre()).map_err(Error::NativeVpError)?; + let pipeline_len = PoS::pipeline_len(&self.ctx.pre())?; let epoch_duration = - ParamsPre::epoch_duration_parameter(&self.ctx.pre()) - .map_err(Error::NativeVpError)?; + ParamsPre::epoch_duration_parameter(&self.ctx.pre())?; let unbonding_period_secs = checked!(pipeline_len * epoch_duration.min_duration.0)?; Ok(ValidationParams { @@ -348,7 +337,7 @@ where }) } - fn validate_trace(&self, keys_changed: &BTreeSet) -> VpResult<()> { + fn validate_trace(&self, keys_changed: &BTreeSet) -> Result<()> { for key in keys_changed { if let Some((_, hash)) = is_ibc_trace_key(key) { match self.ctx.read_post::(key).map_err(|e| { @@ -379,7 +368,7 @@ where Ok(()) } - fn check_limits(&self, keys_changed: &BTreeSet) -> VpResult { + fn check_limits(&self, keys_changed: &BTreeSet) -> Result { let tokens: BTreeSet<&Address> = keys_changed .iter() .filter_map(|k| { @@ -388,49 +377,41 @@ where .collect(); for token in tokens { let (mint_limit, throughput_limit) = - get_limits(&self.ctx.pre(), token) - .map_err(Error::NativeVpError)?; + get_limits(&self.ctx.pre(), token)?; // Check the supply let mint_amount_key = mint_amount_key(token); - let minted: Amount = self - .ctx - .read_post(&mint_amount_key) - .map_err(Error::NativeVpError)? - .unwrap_or_default(); + let minted: Amount = + self.ctx.read_post(&mint_amount_key)?.unwrap_or_default(); if mint_limit < minted { - return Err(Error::RateLimit(format!( + return Err(VpError::RateLimit(format!( "Transfer exceeding the mint limit is not allowed: Mint \ limit {mint_limit}, minted amount {minted}" - ))); + )) + .into()); } // Check the rate limit let throughput = self.calc_throughput(token)?; if throughput_limit < throughput { - return Err(Error::RateLimit(format!( + return Err(VpError::RateLimit(format!( "Transfer exceeding the per-epoch throughput limit is not \ allowed: Per-epoch throughput limit {throughput_limit}, \ actual throughput {throughput}" - ))); + )) + .into()); } } Ok(true) } - fn calc_throughput(&self, token: &Address) -> VpResult { + fn calc_throughput(&self, token: &Address) -> Result { let deposit_key = deposit_key(token); - let deposit: Amount = self - .ctx - .read_post(&deposit_key) - .map_err(Error::NativeVpError)? - .unwrap_or_default(); + let deposit: Amount = + self.ctx.read_post(&deposit_key)?.unwrap_or_default(); let withdraw_key = withdraw_key(token); - let withdraw: Amount = self - .ctx - .read_post(&withdraw_key) - .map_err(Error::NativeVpError)? - .unwrap_or_default(); + let withdraw: Amount = + self.ctx.read_post(&withdraw_key)?.unwrap_or_default(); let throughput = if deposit < withdraw { withdraw .checked_sub(deposit) @@ -454,18 +435,18 @@ fn match_value( if v == *value { Ok(()) } else { - Err(Error::StateChange(format!( + Err(VpError::StateChange(format!( "The value mismatched: Key {} actual {:?}, expected {:?}", key, v, value ))) } } - (Some(_), _) => Err(Error::StateChange(format!( + (Some(_), _) => Err(VpError::StateChange(format!( "The value was invalid: Key {}", key ))), (None, Some(StorageModification::Delete)) => Ok(()), - (None, _) => Err(Error::StateChange(format!( + (None, _) => Err(VpError::StateChange(format!( "The key was deleted unexpectedly: Key {}", key ))), @@ -1141,7 +1122,8 @@ mod tests { let result = ibc .validate_tx(&batched_tx, &keys_changed, &verifiers) .unwrap_err(); - assert_matches!(result, Error::StateChange(_)); + let error = result.downcast_ref::().unwrap(); + assert_matches!(error, VpError::StateChange(_)); } #[test] @@ -1468,7 +1450,8 @@ mod tests { let result = ibc .validate_tx(&batched_tx, &keys_changed, &verifiers) .unwrap_err(); - assert_matches!(result, Error::IbcEvent(_)); + let error = result.downcast_ref::().unwrap(); + assert_matches!(error, VpError::IbcEvent(_)); } #[test] diff --git a/crates/merkle_tree/src/lib.rs b/crates/merkle_tree/src/lib.rs index f9f2f37e49..6ddb1305fd 100644 --- a/crates/merkle_tree/src/lib.rs +++ b/crates/merkle_tree/src/lib.rs @@ -43,8 +43,7 @@ pub use namada_core::hash::{Hash, StorageHasher}; pub use namada_core::keccak::KeccakHash; pub use namada_core::storage::Key; use namada_core::storage::{ - self, DbKeySeg, Error as StorageError, KeySeg, StringKey, TreeBytes, - TreeKeyError, IBC_KEY_LIMIT, + self, DbKeySeg, KeySeg, StringKey, TreeBytes, TreeKeyError, IBC_KEY_LIMIT, }; use namada_core::{decode, DecodeError}; use namada_macros::BorshDeserializer; @@ -116,7 +115,7 @@ impl From for MembershipProof { #[derive(Error, Debug)] pub enum Error { #[error("Invalid key: {0}")] - InvalidKey(StorageError), + InvalidKey(#[from] namada_core::storage::Error), #[error("Invalid key for merkle tree: {0}")] InvalidMerkleKey(String), #[error("Storage tree key error: {0}")] @@ -141,6 +140,12 @@ pub enum Error { RootValidationError, } +impl From for Error { + fn from(error: MtError) -> Self { + Error::MerkleTree(error.to_string()) + } +} + /// Result for functions that may fail type Result = std::result::Result; @@ -950,18 +955,6 @@ impl<'a> MerkleTreeStoresWrite<'a> { } } -impl From for Error { - fn from(error: StorageError) -> Self { - Error::InvalidKey(error) - } -} - -impl From for Error { - fn from(error: MtError) -> Self { - Error::MerkleTree(error.to_string()) - } -} - /// A storage key existence or non-existence proof #[derive(Debug)] pub struct Proof { diff --git a/crates/node/src/dry_run_tx.rs b/crates/node/src/dry_run_tx.rs index 797718b5cc..794b509d12 100644 --- a/crates/node/src/dry_run_tx.rs +++ b/crates/node/src/dry_run_tx.rs @@ -7,7 +7,7 @@ use namada_sdk::gas::{GasMetering, TxGasMeter}; use namada_sdk::parameters; use namada_sdk::queries::{EncodedResponseQuery, RequestQuery}; use namada_sdk::state::{ - DBIter, ResultExt, StorageHasher, StorageResult, TxIndex, DB, + DBIter, Result, ResultExt, StorageHasher, TxIndex, DB, }; use namada_sdk::tx::data::{ DryRunResult, ExtendedTxResult, GasLimit, TxResult, TxType, @@ -25,7 +25,7 @@ pub fn dry_run_tx( mut vp_wasm_cache: VpCache, mut tx_wasm_cache: TxCache, request: &RequestQuery, -) -> StorageResult +) -> Result where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, @@ -235,7 +235,7 @@ mod test { data: Option>, height: Option, prove: bool, - ) -> Result { + ) -> std::result::Result { let data = data.unwrap_or_default(); let height = height.unwrap_or_default(); // Handle a path by invoking the `RPC.handle` directly with the @@ -276,7 +276,10 @@ mod test { }) } - async fn perform(&self, _request: R) -> Result + async fn perform( + &self, + _request: R, + ) -> std::result::Result where R: namada_sdk::tendermint_rpc::SimpleRequest, { @@ -285,7 +288,7 @@ mod test { } #[tokio::test] - async fn test_shell_queries_router_with_client() -> StorageResult<()> { + async fn test_shell_queries_router_with_client() -> Result<()> { // Initialize the `TestClient` let mut client = TestClient::new(RPC); // store the wasm code diff --git a/crates/node/src/lib.rs b/crates/node/src/lib.rs index 38cdf78c6f..0da59b88e8 100644 --- a/crates/node/src/lib.rs +++ b/crates/node/src/lib.rs @@ -621,7 +621,7 @@ fn start_abci_broadcaster_shell( setup_data: RunAuxSetup, config: config::Ledger, ) -> ( - task::JoinHandle>, + task::JoinHandle>, task::JoinHandle<()>, thread::JoinHandle<()>, ) { @@ -735,7 +735,7 @@ async fn run_abci( service_handle: tokio::sync::broadcast::Sender<()>, proxy_app_address: SocketAddr, abort_recv: tokio::sync::oneshot::Receiver<()>, -) -> shell::Result<()> { +) -> shell::ShellResult<()> { // Split it into components. let (consensus, mempool, snapshot, info) = split::service(abci_service, 5); @@ -774,7 +774,7 @@ async fn run_abci( fn start_tendermint( spawner: &mut AbortableSpawner, config: &config::Ledger, -) -> task::JoinHandle> { +) -> task::JoinHandle> { let tendermint_dir = config.cometbft_dir(); let chain_id = config.chain_id.clone(); let proxy_app_address = config.cometbft.proxy_app.to_string(); diff --git a/crates/node/src/protocol.rs b/crates/node/src/protocol.rs index 80fa62227b..4012d77506 100644 --- a/crates/node/src/protocol.rs +++ b/crates/node/src/protocol.rs @@ -33,10 +33,7 @@ use namada_sdk::validation::{ EthBridgeNutVp, EthBridgePoolVp, EthBridgeVp, GovernanceVp, IbcVp, MaspVp, MultitokenVp, NativeVpCtx, ParametersVp, PgfVp, PosVp, }; -use namada_sdk::{ - eth_bridge, governance, ibc, parameters, proof_of_stake, state, storage, - token, -}; +use namada_sdk::{governance, parameters, state, storage, token}; use namada_vm::wasm::{TxCache, VpCache}; use namada_vm::{self, wasm, WasmCacheAccess}; use namada_vote_ext::EthereumTxData; @@ -55,7 +52,7 @@ pub enum Error { #[error("State error: {0}")] StateError(state::Error), #[error("Storage error: {0}")] - StorageError(state::StorageError), + Error(state::Error), #[error("Wrapper tx runner error: {0}")] WrapperRunnerError(String), #[error("Transaction runner error: {0}")] @@ -78,28 +75,8 @@ pub enum Error { VpRunnerError(wasm::run::Error), #[error("The address {0} doesn't exist")] MissingAddress(Address), - #[error("IBC native VP: {0}")] - IbcNativeVpError(ibc::vp::Error), - #[error("PoS native VP: {0}")] - PosNativeVpError(proof_of_stake::vp::Error), - #[error("PoS native VP panicked")] - PosNativeVpRuntime, - #[error("Parameters native VP: {0}")] - ParametersNativeVpError(parameters::vp::Error), - #[error("Multitoken native VP: {0}")] - MultitokenNativeVpError(token::vp::MultitokenError), - #[error("Governance native VP error: {0}")] - GovernanceNativeVpError(governance::vp::Error), - #[error("Pgf native VP error: {0}")] - PgfNativeVpError(governance::vp::pgf::Error), - #[error("Ethereum bridge native VP error: {0:?}")] - EthBridgeNativeVpError(eth_bridge::vp::EthBridgeError), - #[error("Ethereum bridge pool native VP error: {0:?}")] - BridgePoolNativeVpError(eth_bridge::vp::BridgePoolError), - #[error("Non usable tokens native VP error: {0:?}")] - NutNativeVpError(eth_bridge::vp::NutError), - #[error("MASP native VP error: {0}")] - MaspNativeVpError(token::vp::MaspError), + #[error("Native VP error: {0}")] + NativeVpError(state::Error), #[error("Access to an internal address {0:?} is forbidden")] AccessForbidden(InternalAddress), } @@ -559,7 +536,7 @@ where &wrapper.fee.token, shell_params.state, ) - .map_err(Error::StorageError)?; + .map_err(Error::Error)?; #[cfg(not(fuzzing))] let balance = token::read_balance( @@ -567,7 +544,7 @@ where &wrapper.fee.token, &wrapper.fee_payer(), ) - .map_err(Error::StorageError)?; + .map_err(Error::Error)?; // Use half of the max value to make the balance check pass // sometimes with arbitrary fees @@ -652,7 +629,7 @@ where &wrapper.fee.token, block_proposer, ) - .map_err(Error::StorageError)? + .map_err(Error::Error)? .into(), ); @@ -727,7 +704,7 @@ where .expect("Error reading the storage") .expect("Missing masp fee payment gas limit in storage") .min(tx_gas_meter.borrow().tx_gas_limit.into()); - let gas_scale = get_gas_scale(&**state).map_err(Error::StorageError)?; + let gas_scale = get_gas_scale(&**state).map_err(Error::Error)?; let mut gas_meter = TxGasMeter::new( Gas::from_whole_units(max_gas_limit.into(), gas_scale).ok_or_else( @@ -848,9 +825,7 @@ fn get_optional_masp_ref>( let actions = state.read_actions().map_err(Error::StateError)?; action::get_masp_section_ref(&actions) .map_err(|msg| { - Error::StateError(state::Error::Temporary { - error: msg.to_string(), - }) + Error::StateError(state::Error::new_alloc(msg.to_string())) })? .map(Either::Left) }; @@ -875,7 +850,7 @@ where .map_err(|err| { state.write_log_mut().drop_tx(); - Error::StorageError(err) + Error::Error(err) }) } @@ -902,14 +877,14 @@ where &wrapper.fee.token, shell_params.state, ) - .map_err(Error::StorageError)?; + .map_err(Error::Error)?; let balance = token::read_balance( shell_params.state, &wrapper.fee.token, &wrapper.fee_payer(), ) - .map_err(Error::StorageError)?; + .map_err(Error::Error)?; checked!(balance - fees).map_or_else( |_| { @@ -927,7 +902,7 @@ where &wrapper.fee.token, &wrapper.fee_payer(), ) - .map_err(Error::StorageError)?; + .map_err(Error::Error)?; checked!(balance - fees).map_or_else( |_| { @@ -1184,232 +1159,233 @@ where S: 'static + State + Sync, CA: 'static + WasmCacheAccess + Sync, { - let vps_result = - verifiers - .par_iter() - .try_fold( - || (VpsResult::default(), Gas::from(0)), - |(mut result, mut vps_gas), addr| { - let gas_meter = RefCell::new( - VpGasMeter::new_from_tx_meter(tx_gas_meter), - ); - let tx_accepted = - match &addr { - Address::Implicit(_) | Address::Established(_) => { - let (vp_hash, gas) = state - .validity_predicate::>(addr) - .map_err(Error::StateError)?; - gas_meter.borrow_mut().consume(gas).map_err( - |err| Error::GasError(err.to_string()), - )?; - let Some(vp_code_hash) = vp_hash else { - return Err(Error::MissingAddress( - addr.clone(), - )); - }; - - wasm::run::vp( - vp_code_hash, - batched_tx, - tx_index, - addr, - state, - &gas_meter, - &keys_changed, - &verifiers, - vp_wasm_cache.clone(), - ) - .map_err( - |err| { - match err { - wasm::run::Error::GasError(msg) => Error::GasError(msg), - wasm::run::Error::InvalidSectionSignature(msg) => { - Error::InvalidSectionSignature(msg) - } - _ => Error::VpRunnerError(err), - } - }, - ) + let vps_result = verifiers + .par_iter() + .try_fold( + || (VpsResult::default(), Gas::from(0)), + |(mut result, mut vps_gas), addr| { + let gas_meter = + RefCell::new(VpGasMeter::new_from_tx_meter(tx_gas_meter)); + let tx_accepted = match &addr { + Address::Implicit(_) | Address::Established(_) => { + let (vp_hash, gas) = state + .validity_predicate::>(addr) + .map_err(Error::StateError)?; + gas_meter + .borrow_mut() + .consume(gas) + .map_err(|err| Error::GasError(err.to_string()))?; + let Some(vp_code_hash) = vp_hash else { + return Err(Error::MissingAddress(addr.clone())); + }; + + wasm::run::vp( + vp_code_hash, + batched_tx, + tx_index, + addr, + state, + &gas_meter, + &keys_changed, + &verifiers, + vp_wasm_cache.clone(), + ) + .map_err(|err| match err { + wasm::run::Error::GasError(msg) => { + Error::GasError(msg) } - Address::Internal(internal_addr) => { - let ctx = NativeVpCtx::new( - addr, - state, - batched_tx.tx, - batched_tx.cmt, - tx_index, - &gas_meter, - &keys_changed, - &verifiers, - vp_wasm_cache.clone(), - ); - - match internal_addr { - InternalAddress::PoS => { - let pos = PosVp::new(ctx); - pos.validate_tx( - batched_tx, - &keys_changed, - &verifiers, - ) - .map_err(Error::PosNativeVpError) - } - InternalAddress::Ibc => { - let ibc = IbcVp::new(ctx); - ibc.validate_tx( - batched_tx, - &keys_changed, - &verifiers, - ) - .map_err(Error::IbcNativeVpError) - } - InternalAddress::Parameters => { - let parameters = ParametersVp::new(ctx); - parameters - .validate_tx( - batched_tx, - &keys_changed, - &verifiers, - ) - .map_err(Error::ParametersNativeVpError) - } - InternalAddress::PosSlashPool => Err( - Error::AccessForbidden((*internal_addr).clone()), - ), - InternalAddress::Governance => { - let governance = GovernanceVp::new(ctx); - governance - .validate_tx( - batched_tx, - &keys_changed, - &verifiers, - ) - .map_err(Error::GovernanceNativeVpError) - } - InternalAddress::Pgf => { - let pgf_vp = PgfVp::new(ctx); - pgf_vp - .validate_tx( - batched_tx, - &keys_changed, - &verifiers, - ) - .map_err(Error::PgfNativeVpError) - } - InternalAddress::Multitoken => { - let multitoken = MultitokenVp::new(ctx); - multitoken - .validate_tx( - batched_tx, - &keys_changed, - &verifiers, - ) - .map_err(Error::MultitokenNativeVpError) - } - InternalAddress::Masp => { - let masp = MaspVp::new(ctx); - masp.validate_tx( - batched_tx, - &keys_changed, - &verifiers, - ) - .map_err(Error::MaspNativeVpError) - } - InternalAddress::EthBridge => { - let bridge = EthBridgeVp::new(ctx); - bridge - .validate_tx( + wasm::run::Error::InvalidSectionSignature(msg) => { + Error::InvalidSectionSignature(msg) + } + _ => Error::VpRunnerError(err), + }) + } + Address::Internal(internal_addr) => { + let ctx = NativeVpCtx::new( + addr, + state, + batched_tx.tx, + batched_tx.cmt, + tx_index, + &gas_meter, + &keys_changed, + &verifiers, + vp_wasm_cache.clone(), + ); + + match internal_addr { + InternalAddress::PoS => { + let pos = PosVp::new(ctx); + pos.validate_tx( batched_tx, &keys_changed, &verifiers, ) - .map_err(Error::EthBridgeNativeVpError) - } - InternalAddress::EthBridgePool => { - let bridge_pool = EthBridgePoolVp::new(ctx); - bridge_pool - .validate_tx( + .map_err(Error::NativeVpError) + } + InternalAddress::Ibc => { + let ibc = IbcVp::new(ctx); + ibc.validate_tx( batched_tx, &keys_changed, &verifiers, ) - .map_err(Error::BridgePoolNativeVpError) - } - InternalAddress::Nut(_) => { - let non_usable_tokens = EthBridgeNutVp::new(ctx); - non_usable_tokens - .validate_tx( + .map_err(Error::NativeVpError) + } + InternalAddress::Parameters => { + let parameters = ParametersVp::new(ctx); + parameters + .validate_tx( + batched_tx, + &keys_changed, + &verifiers, + ) + .map_err(Error::NativeVpError) + } + InternalAddress::PosSlashPool => { + Err(Error::AccessForbidden( + (*internal_addr).clone(), + )) + } + InternalAddress::Governance => { + let governance = GovernanceVp::new(ctx); + governance + .validate_tx( + batched_tx, + &keys_changed, + &verifiers, + ) + .map_err(Error::NativeVpError) + } + InternalAddress::Pgf => { + let pgf_vp = PgfVp::new(ctx); + pgf_vp + .validate_tx( + batched_tx, + &keys_changed, + &verifiers, + ) + .map_err(Error::NativeVpError) + } + InternalAddress::Multitoken => { + let multitoken = MultitokenVp::new(ctx); + multitoken + .validate_tx( + batched_tx, + &keys_changed, + &verifiers, + ) + .map_err(Error::NativeVpError) + } + InternalAddress::Masp => { + let masp = MaspVp::new(ctx); + masp.validate_tx( batched_tx, &keys_changed, &verifiers, ) - .map_err(Error::NutNativeVpError) - } - internal_addr @ (InternalAddress::IbcToken(_) - | InternalAddress::Erc20(_)) => { - // The address should be a part of a multitoken - // key - verifiers - .contains(&Address::Internal( - InternalAddress::Multitoken, - )) - .ok_or_else(|| { - Error::AccessForbidden( - internal_addr.clone(), + .map_err(Error::NativeVpError) + } + InternalAddress::EthBridge => { + let bridge = EthBridgeVp::new(ctx); + bridge + .validate_tx( + batched_tx, + &keys_changed, + &verifiers, ) - }) + .map_err(Error::NativeVpError) + } + InternalAddress::EthBridgePool => { + let bridge_pool = EthBridgePoolVp::new(ctx); + bridge_pool + .validate_tx( + batched_tx, + &keys_changed, + &verifiers, + ) + .map_err(Error::NativeVpError) + } + InternalAddress::Nut(_) => { + let non_usable_tokens = + EthBridgeNutVp::new(ctx); + non_usable_tokens + .validate_tx( + batched_tx, + &keys_changed, + &verifiers, + ) + .map_err(Error::NativeVpError) + } + internal_addr @ (InternalAddress::IbcToken(_) + | InternalAddress::Erc20(_)) => { + // The address should be a part of a multitoken + // key + verifiers + .contains(&Address::Internal( + InternalAddress::Multitoken, + )) + .ok_or_else(|| { + Error::AccessForbidden( + internal_addr.clone(), + ) + }) + } + InternalAddress::TempStorage => Err( + // Temp storage changes must never be committed + Error::AccessForbidden( + (*internal_addr).clone(), + ), + ), + InternalAddress::ReplayProtection => Err( + // Replay protection entries should never be + // written to + // via transactions + Error::AccessForbidden( + (*internal_addr).clone(), + ), + ), } - InternalAddress::TempStorage => Err( - // Temp storage changes must never be committed - Error::AccessForbidden((*internal_addr).clone()), - ), - InternalAddress::ReplayProtection => Err( - // Replay protection entries should never be - // written to - // via transactions - Error::AccessForbidden((*internal_addr).clone()), - ), } - } - }; + }; - tx_accepted.map_or_else( - |err| { - result - .status_flags - .insert(err.invalid_section_signature_flag()); - result.rejected_vps.insert(addr.clone()); - result.errors.push((addr.clone(), err.to_string())); - }, - |()| { - result.accepted_vps.insert(addr.clone()); - }, - ); + tx_accepted.map_or_else( + |err| { + result + .status_flags + .insert(err.invalid_section_signature_flag()); + result.rejected_vps.insert(addr.clone()); + result.errors.push((addr.clone(), err.to_string())); + }, + |()| { + result.accepted_vps.insert(addr.clone()); + }, + ); - // Execution of VPs can (and must) be short-circuited - // only in case of a gas overflow to prevent the - // transaction from consuming resources that have not - // been acquired in the corresponding wrapper tx. For - // all the other errors we keep evaluating the vps. This - // allows to display a consistent VpsResult across all - // nodes and find any invalid signatures - vps_gas = vps_gas - .checked_add(gas_meter.borrow().get_vp_consumed_gas()) - .ok_or(Error::GasError( - gas::Error::GasOverflow.to_string(), - ))?; - gas_meter - .borrow() - .check_vps_limit(vps_gas) - .map_err(|err| Error::GasError(err.to_string()))?; - - Ok((result, vps_gas)) - }, - ) - .try_reduce( - || (VpsResult::default(), Gas::from(0)), - |a, b| merge_vp_results(a, b, tx_gas_meter), - )?; + // Execution of VPs can (and must) be short-circuited + // only in case of a gas overflow to prevent the + // transaction from consuming resources that have not + // been acquired in the corresponding wrapper tx. For + // all the other errors we keep evaluating the vps. This + // allows to display a consistent VpsResult across all + // nodes and find any invalid signatures + vps_gas = vps_gas + .checked_add(gas_meter.borrow().get_vp_consumed_gas()) + .ok_or(Error::GasError( + gas::Error::GasOverflow.to_string(), + ))?; + gas_meter + .borrow() + .check_vps_limit(vps_gas) + .map_err(|err| Error::GasError(err.to_string()))?; + + Ok((result, vps_gas)) + }, + ) + .try_reduce( + || (VpsResult::default(), Gas::from(0)), + |a, b| merge_vp_results(a, b, tx_gas_meter), + )?; Ok(vps_result) } diff --git a/crates/node/src/shell/finalize_block.rs b/crates/node/src/shell/finalize_block.rs index 50eda190c8..9bf7e65809 100644 --- a/crates/node/src/shell/finalize_block.rs +++ b/crates/node/src/shell/finalize_block.rs @@ -18,7 +18,7 @@ use namada_sdk::proof_of_stake::storage::{ }; use namada_sdk::state::write_log::StorageModification; use namada_sdk::state::{ - ResultExt, StorageResult, StorageWrite, EPOCH_SWITCH_BLOCKS_DELAY, + Result, ResultExt, StorageWrite, EPOCH_SWITCH_BLOCKS_DELAY, }; use namada_sdk::storage::{BlockHeader, BlockResults, Epoch}; use namada_sdk::tx::data::protocol::ProtocolTxType; @@ -49,7 +49,7 @@ where pub fn finalize_block( &mut self, req: shim::request::FinalizeBlock, - ) -> Result { + ) -> ShellResult { let mut response = shim::response::FinalizeBlock::default(); // Begin the new block and check if a new epoch has begun @@ -1141,7 +1141,7 @@ fn token_finalize_block( storage: &mut S, events: &mut Vec, is_new_masp_epoch: bool, -) -> StorageResult<()> +) -> Result<()> where S: StorageWrite + StorageRead + token::WithConversionState, { @@ -1160,7 +1160,7 @@ fn pos_finalize_block( validator_set_update_epoch: Epoch, votes: Vec, byzantine_validators: Vec, -) -> StorageResult<()> +) -> Result<()> where S: StorageWrite + StorageRead, { @@ -1175,7 +1175,7 @@ where } /// Dependency-injection indirection for PGF inflation -fn pgf_apply_inflation(storage: &mut S) -> StorageResult<()> +fn pgf_apply_inflation(storage: &mut S) -> Result<()> where S: 'static + State + EmitEvents, { @@ -3953,7 +3953,7 @@ mod test_finalize_block { } #[test] - fn test_ledger_slashing() -> namada_sdk::state::StorageResult<()> { + fn test_ledger_slashing() -> namada_sdk::state::Result<()> { let num_validators = 7_u64; let (mut shell, _recv, _, _) = setup_with_cfg(SetupCfg { last_height: 0, @@ -4327,7 +4327,7 @@ mod test_finalize_block { /// NOTE: must call `get_default_true_votes` before every call to /// `next_block_for_inflation` #[test] - fn test_multiple_misbehaviors() -> namada_sdk::state::StorageResult<()> { + fn test_multiple_misbehaviors() -> namada_sdk::state::Result<()> { for num_validators in &[4_u64, 6_u64, 9_u64] { tracing::debug!("\nNUM VALIDATORS = {}", num_validators); test_multiple_misbehaviors_by_num_vals(*num_validators)?; @@ -4347,7 +4347,7 @@ mod test_finalize_block { /// 7) Discover misbehavior in epoch 4 fn test_multiple_misbehaviors_by_num_vals( num_validators: u64, - ) -> namada_sdk::state::StorageResult<()> { + ) -> namada_sdk::state::Result<()> { // Setup the network with pipeline_len = 2, unbonding_len = 4 // let num_validators = 8_u64; let (mut shell, _recv, _, _) = setup_with_cfg(SetupCfg { @@ -5163,8 +5163,7 @@ mod test_finalize_block { } #[test] - fn test_jail_validator_for_inactivity() - -> namada_sdk::state::StorageResult<()> { + fn test_jail_validator_for_inactivity() -> namada_sdk::state::Result<()> { let num_validators = 5_u64; let (mut shell, _recv, _, _) = setup_with_cfg(SetupCfg { last_height: 0, diff --git a/crates/node/src/shell/governance.rs b/crates/node/src/shell/governance.rs index 7b6a66e0b4..04842694c5 100644 --- a/crates/node/src/shell/governance.rs +++ b/crates/node/src/shell/governance.rs @@ -37,7 +37,7 @@ pub fn finalize_block( events: &mut impl EmitEvents, current_epoch: Epoch, is_new_epoch: bool, -) -> Result<()> +) -> ShellResult<()> where D: 'static + DB + for<'iter> DBIter<'iter> + Sync, H: 'static + StorageHasher + Sync, @@ -58,7 +58,7 @@ pub fn load_and_execute_governance_proposals( shell: &mut Shell, events: &mut impl EmitEvents, current_epoch: Epoch, -) -> Result +) -> ShellResult where D: DB + for<'iter> DBIter<'iter> + Sync + 'static, H: StorageHasher + Sync + 'static, @@ -75,7 +75,7 @@ fn execute_governance_proposals( shell: &mut Shell, events: &mut impl EmitEvents, proposal_ids: BTreeSet, -) -> Result +) -> ShellResult where D: DB + for<'iter> DBIter<'iter> + Sync + 'static, H: StorageHasher + Sync + 'static, @@ -300,7 +300,7 @@ fn compute_proposal_votes( params: &PosParams, proposal_id: u64, epoch: Epoch, -) -> namada_sdk::state::StorageResult +) -> namada_sdk::state::Result where S: StorageRead, { @@ -384,7 +384,7 @@ fn execute_default_proposal( shell: &mut Shell, id: u64, proposal_code: Vec, -) -> namada_sdk::state::StorageResult +) -> namada_sdk::state::Result where D: DB + for<'iter> DBIter<'iter> + Sync + 'static, H: StorageHasher + Sync + 'static, @@ -453,7 +453,7 @@ where fn execute_pgf_steward_proposal( storage: &mut S, stewards: BTreeSet>, -) -> Result +) -> ShellResult where S: StorageRead + StorageWrite, { @@ -504,7 +504,7 @@ fn execute_pgf_funding_proposal( token: &Address, fundings: BTreeSet, proposal_id: u64, -) -> Result +) -> ShellResult where D: DB + for<'iter> DBIter<'iter> + Sync + 'static, H: StorageHasher + Sync + 'static, diff --git a/crates/node/src/shell/init_chain.rs b/crates/node/src/shell/init_chain.rs index aa18d62c98..c962e7fa39 100644 --- a/crates/node/src/shell/init_chain.rs +++ b/crates/node/src/shell/init_chain.rs @@ -87,7 +87,7 @@ where init: request::InitChain, #[cfg(any(test, feature = "testing", feature = "benches"))] _num_validators: u64, - ) -> Result { + ) -> ShellResult { let mut response = response::InitChain::default(); let chain_id = self.state.in_mem().chain_id.as_str(); if chain_id != init.chain_id.as_str() { @@ -911,7 +911,7 @@ where } /// This should only be called after checking that `is_ok` returned false. - fn error_out(mut self) -> Result<()> { + fn error_out(mut self) -> ShellResult<()> { if self.is_ok() { return Ok(()); } diff --git a/crates/node/src/shell/mod.rs b/crates/node/src/shell/mod.rs index fa29d58052..9617e1cb66 100644 --- a/crates/node/src/shell/mod.rs +++ b/crates/node/src/shell/mod.rs @@ -121,7 +121,7 @@ pub enum Error { #[error("Error loading wasm: {0}")] LoadingWasm(String), #[error("Error reading from or writing to storage: {0}")] - Storage(#[from] namada_sdk::state::StorageError), + Storage(#[from] namada_sdk::state::Error), #[error("Transaction replay attempt: {0}")] ReplayAttempt(String), #[error("Error with snapshots: {0}")] @@ -144,9 +144,9 @@ impl From for TxResult { } } -pub type Result = std::result::Result; +pub type ShellResult = std::result::Result; -pub fn reset(config: config::Ledger) -> Result<()> { +pub fn reset(config: config::Ledger) -> ShellResult<()> { // simply nuke the DB files let db_path = &config.db_dir(); match std::fs::remove_dir_all(db_path) { @@ -158,7 +158,7 @@ pub fn reset(config: config::Ledger) -> Result<()> { Ok(()) } -pub fn rollback(config: config::Ledger) -> Result<()> { +pub fn rollback(config: config::Ledger) -> ShellResult<()> { // Rollback Tendermint state tracing::info!("Rollback Tendermint state"); let tendermint_block_height = @@ -171,7 +171,7 @@ pub fn rollback(config: config::Ledger) -> Result<()> { tracing::info!("Rollback Namada state"); db.rollback(tendermint_block_height) - .map_err(|e| Error::Storage(namada_sdk::state::StorageError::new(e))) + .map_err(|e| Error::Storage(namada_sdk::state::Error::new(e))) } #[derive(Debug)] @@ -1290,7 +1290,7 @@ where // because we're using domain types in InitChain, but FinalizeBlock is // shimmed with a different old type. The joy... mut validator_conv: F, - ) -> namada_sdk::state::StorageResult> + ) -> namada_sdk::state::Result> where F: FnMut(common::PublicKey, i64) -> V, { @@ -1347,7 +1347,7 @@ where pub fn replay_protection_checks( wrapper: &Tx, temp_state: &mut TempWlState<'_, D, H>, -) -> Result<()> +) -> ShellResult<()> where D: DB + for<'iter> DBIter<'iter> + Sync + 'static, H: StorageHasher + Sync + 'static, @@ -1387,7 +1387,7 @@ fn mempool_fee_check( shell_params: &mut ShellParams<'_, TempWlState<'static, D, H>, D, H, CA>, tx: &Tx, wrapper: &WrapperTx, -) -> Result<()> +) -> ShellResult<()> where D: DB + for<'iter> DBIter<'iter> + Sync + 'static, H: StorageHasher + Sync + 'static, @@ -1412,7 +1412,7 @@ pub fn fee_data_check( wrapper: &WrapperTx, minimum_gas_price: token::Amount, shell_params: &mut ShellParams<'_, TempWlState<'_, D, H>, D, H, CA>, -) -> Result<()> +) -> ShellResult<()> where D: DB + for<'iter> DBIter<'iter> + Sync + 'static, H: StorageHasher + Sync + 'static, @@ -1718,7 +1718,7 @@ pub mod test_utils { pub fn finalize_block( &mut self, req: FinalizeBlock, - ) -> Result> { + ) -> ShellResult> { match self.shell.finalize_block(req) { Ok(resp) => Ok(resp.events), Err(err) => Err(err), diff --git a/crates/node/src/shell/process_proposal.rs b/crates/node/src/shell/process_proposal.rs index ee2a3ed650..e7f5ee9db7 100644 --- a/crates/node/src/shell/process_proposal.rs +++ b/crates/node/src/shell/process_proposal.rs @@ -544,7 +544,7 @@ fn process_proposal_fee_check( tx_index: &TxIndex, proposer: &Address, shell_params: &mut ShellParams<'_, TempWlState<'static, D, H>, D, H, CA>, -) -> Result<()> +) -> ShellResult<()> where D: DB + for<'iter> DBIter<'iter> + Sync + 'static, H: StorageHasher + Sync + 'static, diff --git a/crates/node/src/shell/snapshots.rs b/crates/node/src/shell/snapshots.rs index 238eb8f6b2..4128bf20f5 100644 --- a/crates/node/src/shell/snapshots.rs +++ b/crates/node/src/shell/snapshots.rs @@ -2,7 +2,7 @@ use borsh_ext::BorshSerializeExt; use namada_sdk::hash::{Hash, Sha256Hasher}; use namada_sdk::state::BlockHeight; -use super::{Error, Result}; +use super::{Error, ShellResult}; use crate::facade::tendermint::abci::types::Snapshot; use crate::facade::tendermint::v0_37::abci::{ request as tm_request, response as tm_response, @@ -16,7 +16,7 @@ impl Shell { /// of chunks, as hash of each chunk, and a hash of the chunk /// metadata are provided so that syncing nodes can verify can verify /// snapshots they receive. - pub fn list_snapshots(&self) -> Result { + pub fn list_snapshots(&self) -> ShellResult { if self.blocks_between_snapshots.is_none() { Ok(Default::default()) } else { @@ -45,7 +45,7 @@ impl Shell { pub fn load_snapshot_chunk( &self, req: tm_request::LoadSnapshotChunk, - ) -> Result { + ) -> ShellResult { let chunk = DbSnapshot::load_chunk( BlockHeight(req.height.into()), u64::from(req.chunk), diff --git a/crates/node/src/shell/utils.rs b/crates/node/src/shell/utils.rs index f34773efa3..6e405f54c8 100644 --- a/crates/node/src/shell/utils.rs +++ b/crates/node/src/shell/utils.rs @@ -2,10 +2,7 @@ use borsh::BorshDeserialize; use namada_sdk::state::{self, StorageRead}; use namada_sdk::storage::Key; -pub(super) fn force_read( - storage: &S, - key: &Key, -) -> state::StorageResult +pub(super) fn force_read(storage: &S, key: &Key) -> state::Result where S: StorageRead, T: BorshDeserialize, diff --git a/crates/parameters/src/lib.rs b/crates/parameters/src/lib.rs index d924f4a483..fba32e6617 100644 --- a/crates/parameters/src/lib.rs +++ b/crates/parameters/src/lib.rs @@ -29,9 +29,7 @@ use namada_core::chain::BlockHeight; pub use namada_core::parameters::ProposalBytes; use namada_core::time::DurationSecs; use namada_core::{hints, token}; -use namada_state::{ - Key, ResultExt, StorageError, StorageRead, StorageResult, StorageWrite, -}; +use namada_state::{Error, Key, ResultExt, StorageRead, StorageWrite}; pub use namada_systems::parameters::*; pub use storage::{get_gas_scale, get_max_block_gas}; use thiserror::Error; @@ -101,7 +99,7 @@ pub const ADDRESS: Address = Address::Internal(InternalAddress::Parameters); #[derive(Error, Debug)] pub enum ReadError { #[error("Storage error: {0}")] - StorageError(StorageError), + Error(Error), #[error("Storage type error: {0}")] StorageTypeError(namada_core::storage::Error), #[error("Protocol parameters are missing, they must be always set")] @@ -112,16 +110,13 @@ pub enum ReadError { #[derive(Error, Debug)] pub enum WriteError { #[error("Storage error: {0}")] - StorageError(StorageError), + Error(Error), #[error("Serialize error: {0}")] SerializeError(String), } /// Initialize parameters in storage in the genesis block. -pub fn init_storage( - parameters: &Parameters, - storage: &mut S, -) -> StorageResult<()> +pub fn init_storage(parameters: &Parameters, storage: &mut S) -> Result<()> where S: StorageRead + StorageWrite, { @@ -212,7 +207,7 @@ where pub fn update_vp_allowlist_parameter( storage: &mut S, value: Vec, -) -> StorageResult<()> +) -> Result<()> where S: StorageRead + StorageWrite, { @@ -231,7 +226,7 @@ where pub fn update_tx_allowlist_parameter( storage: &mut S, value: Vec, -) -> StorageResult<()> +) -> Result<()> where S: StorageRead + StorageWrite, { @@ -250,7 +245,7 @@ where pub fn update_epoch_parameter( storage: &mut S, value: &EpochDuration, -) -> StorageResult<()> +) -> Result<()> where S: StorageRead + StorageWrite, { @@ -263,7 +258,7 @@ where pub fn update_epochs_per_year_parameter( storage: &mut S, value: &u64, -) -> StorageResult<()> +) -> Result<()> where S: StorageRead + StorageWrite, { @@ -272,10 +267,7 @@ where } /// Update the implicit VP parameter in storage. Return the gas cost. -pub fn update_implicit_vp( - storage: &mut S, - implicit_vp: &[u8], -) -> StorageResult<()> +pub fn update_implicit_vp(storage: &mut S, implicit_vp: &[u8]) -> Result<()> where S: StorageRead + StorageWrite, { @@ -286,7 +278,7 @@ where } /// Read the epochs per year parameter from store -pub fn read_epochs_per_year_parameter(storage: &S) -> StorageResult +pub fn read_epochs_per_year_parameter(storage: &S) -> Result where S: StorageRead, { @@ -298,9 +290,7 @@ where } /// Read the epoch duration parameter from store -pub fn read_epoch_duration_parameter( - storage: &S, -) -> StorageResult +pub fn read_epoch_duration_parameter(storage: &S) -> Result where S: StorageRead, { @@ -313,9 +303,7 @@ where } /// Read the masp epoch multiplier parameter from store -pub fn read_masp_epoch_multiplier_parameter( - storage: &S, -) -> StorageResult +pub fn read_masp_epoch_multiplier_parameter(storage: &S) -> Result where S: StorageRead, { @@ -331,7 +319,7 @@ where pub fn read_gas_cost( storage: &S, token: &Address, -) -> StorageResult> +) -> Result> where S: StorageRead, { @@ -343,7 +331,7 @@ where } /// Read the number of epochs per year parameter -pub fn read_epochs_per_year(storage: &S) -> StorageResult +pub fn read_epochs_per_year(storage: &S) -> Result where S: StorageRead, { @@ -355,7 +343,7 @@ where } /// Retrieve the `max_proposal_bytes` consensus parameter from storage. -pub fn read_max_proposal_bytes(storage: &S) -> StorageResult +pub fn read_max_proposal_bytes(storage: &S) -> Result where S: StorageRead, { @@ -368,7 +356,7 @@ where /// Read all the parameters from storage. Returns the parameters and gas /// cost. -pub fn read(storage: &S) -> StorageResult +pub fn read(storage: &S) -> Result where S: StorageRead, { @@ -477,7 +465,7 @@ where } /// Validate the size of a tx. -pub fn validate_tx_bytes(storage: &S, tx_size: usize) -> StorageResult +pub fn validate_tx_bytes(storage: &S, tx_size: usize) -> Result where S: StorageRead, { @@ -494,7 +482,7 @@ pub fn native_erc20_key() -> storage::Key { /// Initialize parameters to the storage for testing #[cfg(any(test, feature = "testing"))] -pub fn init_test_storage(storage: &mut S) -> StorageResult<()> +pub fn init_test_storage(storage: &mut S) -> Result<()> where S: StorageRead + StorageWrite, { @@ -527,7 +515,7 @@ pub fn estimate_max_block_time_from_blocks( storage: &S, last_block_height: BlockHeight, num_blocks_to_read: u64, -) -> StorageResult> +) -> Result> where S: StorageRead, { @@ -578,7 +566,7 @@ where /// based on chain parameters. pub fn estimate_max_block_time_from_parameters( storage: &S, -) -> StorageResult +) -> Result where S: StorageRead, { @@ -608,7 +596,7 @@ pub fn estimate_max_block_time_from_blocks_and_params( storage: &S, last_block_height: BlockHeight, num_blocks_to_read: u64, -) -> StorageResult +) -> Result where S: StorageRead, { diff --git a/crates/parameters/src/storage.rs b/crates/parameters/src/storage.rs index 4de92ba473..0e9eaff8a6 100644 --- a/crates/parameters/src/storage.rs +++ b/crates/parameters/src/storage.rs @@ -4,7 +4,7 @@ use namada_core::address::Address; use namada_core::storage::DbKeySeg; pub use namada_core::storage::Key; use namada_macros::StorageKeys; -use namada_state::{StorageError, StorageRead, StorageResult}; +use namada_state::{Error, Result, StorageRead}; use super::ADDRESS; @@ -158,20 +158,20 @@ pub fn get_gas_cost_key() -> Key { /// Helper function to retrieve the `max_block_gas` protocol parameter from /// storage -pub fn get_max_block_gas(storage: &impl StorageRead) -> StorageResult { +pub fn get_max_block_gas(storage: &impl StorageRead) -> Result { storage .read(&get_max_block_gas_key())? - .ok_or(StorageError::SimpleMessage( + .ok_or(Error::SimpleMessage( "Missing max_block_gas parameter from storage", )) } /// Helper function to retrieve the `gas_scale` protocol parameter from /// storage -pub fn get_gas_scale(storage: &impl StorageRead) -> StorageResult { +pub fn get_gas_scale(storage: &impl StorageRead) -> Result { storage .read(&get_gas_scale_key())? - .ok_or(StorageError::SimpleMessage( + .ok_or(Error::SimpleMessage( "Missing gas_scale parameter from storage", )) } @@ -185,9 +185,9 @@ pub fn get_native_token_transferable_key() -> Key { /// parameter from storage pub fn is_native_token_transferable( storage: &impl StorageRead, -) -> StorageResult { +) -> Result { storage.read(&get_native_token_transferable_key())?.ok_or( - StorageError::SimpleMessage( + Error::SimpleMessage( "Missing is_native_token_transferable parameter from storage", ), ) diff --git a/crates/parameters/src/vp.rs b/crates/parameters/src/vp.rs index 6978fe988e..3d34467a30 100644 --- a/crates/parameters/src/vp.rs +++ b/crates/parameters/src/vp.rs @@ -9,22 +9,11 @@ use namada_state::{Key, StateRead}; use namada_systems::governance; use namada_tx::BatchedTxRef; use namada_vp::native_vp::{ - self, Ctx, CtxPreStorageRead, NativeVp, VpEvaluator, + Ctx, CtxPreStorageRead, Error, NativeVp, Result, VpEvaluator, }; -use thiserror::Error; use crate::storage; -#[allow(missing_docs)] -#[derive(Error, Debug)] -pub enum Error { - #[error("Parameters VP error: Native VP error: {0}")] - NativeVpError(#[from] native_vp::Error), -} - -/// Parameters functions result -pub type Result = std::result::Result; - /// Parameters VP pub struct ParametersVp<'ctx, S, CA, EVAL, Gov> where @@ -44,8 +33,6 @@ where EVAL: 'static + VpEvaluator<'ctx, S, CA, EVAL>, Gov: governance::Read>, { - type Error = Error; - fn validate_tx( &'view self, batched_tx: &BatchedTxRef<'_>, @@ -57,23 +44,20 @@ where let data = if let Some(data) = batched_tx.tx.data(batched_tx.cmt) { data } else { - return Err(native_vp::Error::new_const( + return Err(Error::new_const( "Token parameter changes require tx data to be present", - ) - .into()); + )); }; match key_type { KeyType::PARAMETER | KeyType::UNKNOWN_PARAMETER => { - Gov::is_proposal_accepted(&self.ctx.pre(), &data) - .map_err(Error::NativeVpError)? + Gov::is_proposal_accepted(&self.ctx.pre(), &data)? .ok_or_else(|| { - native_vp::Error::new_alloc(format!( + Error::new_alloc(format!( "Attempted to change a protocol parameter \ from outside of a governance proposal, or \ from a non-accepted governance proposal: \ {key}", )) - .into() }) } KeyType::UNKNOWN => Ok(()), diff --git a/crates/parameters/src/wasm_allowlist.rs b/crates/parameters/src/wasm_allowlist.rs index 1dad4e02a6..63e51d1bf1 100644 --- a/crates/parameters/src/wasm_allowlist.rs +++ b/crates/parameters/src/wasm_allowlist.rs @@ -1,5 +1,5 @@ use namada_core::hash::Hash; -use namada_state::{Key, StorageRead, StorageResult}; +use namada_state::{Key, Result, StorageRead}; use crate::storage::{ get_tx_allowlist_storage_key, get_vp_allowlist_storage_key, @@ -7,7 +7,7 @@ use crate::storage::{ /// Check if the given tx code `Hash` is in the allowlist. When the allowlist is /// empty it always returns true. -pub fn is_tx_allowed(storage: &S, tx_hash: &Hash) -> StorageResult +pub fn is_tx_allowed(storage: &S, tx_hash: &Hash) -> Result where S: StorageRead, { @@ -17,7 +17,7 @@ where /// Check if the given VP code `Hash` is in the allowlist. When the allowlist is /// empty it always returns true. -pub fn is_vp_allowed(storage: &S, vp_hash: &Hash) -> StorageResult +pub fn is_vp_allowed(storage: &S, vp_hash: &Hash) -> Result where S: StorageRead, { @@ -25,11 +25,7 @@ where is_allowed(storage, key, vp_hash) } -fn is_allowed( - storage: &S, - allowlist_key: Key, - hash: &Hash, -) -> StorageResult +fn is_allowed(storage: &S, allowlist_key: Key, hash: &Hash) -> Result where S: StorageRead, { diff --git a/crates/proof_of_stake/src/epoched.rs b/crates/proof_of_stake/src/epoched.rs index 02cb014ef7..f1be50b107 100644 --- a/crates/proof_of_stake/src/epoched.rs +++ b/crates/proof_of_stake/src/epoched.rs @@ -17,7 +17,7 @@ use namada_systems::governance; use crate::parameters::PosParams; use crate::{ - read_pos_params, Epoch, LazyMap, NestedMap, StorageRead, StorageResult, + read_pos_params, Epoch, LazyMap, NestedMap, Result, StorageRead, StorageWrite, }; @@ -83,7 +83,7 @@ where storage: &mut S, value: Data, current_epoch: Epoch, - ) -> StorageResult<()> + ) -> Result<()> where S: StorageWrite + StorageRead, { @@ -99,7 +99,7 @@ where storage: &S, epoch: Epoch, params: &PosParams, - ) -> StorageResult> + ) -> Result> where S: StorageRead, { @@ -141,7 +141,7 @@ where value: Data, current_epoch: Epoch, offset: u64, - ) -> StorageResult<()> + ) -> Result<()> where S: StorageWrite + StorageRead, Gov: governance::Read, @@ -157,7 +157,7 @@ where value: Data, current_epoch: Epoch, offset: u64, - ) -> StorageResult<()> + ) -> Result<()> where S: StorageWrite + StorageRead, { @@ -177,7 +177,7 @@ where storage: &mut S, params: &PosParams, current_epoch: Epoch, - ) -> StorageResult<()> + ) -> Result<()> where S: StorageWrite + StorageRead, { @@ -249,7 +249,7 @@ where .unwrap() } - fn get_last_update(&self, storage: &S) -> StorageResult> + fn get_last_update(&self, storage: &S) -> Result> where S: StorageRead, { @@ -277,7 +277,7 @@ where .unwrap() } - fn get_oldest_epoch(&self, storage: &S) -> StorageResult> + fn get_oldest_epoch(&self, storage: &S) -> Result> where S: StorageRead, { @@ -289,7 +289,7 @@ where &self, storage: &mut S, new_oldest_epoch: Epoch, - ) -> StorageResult<()> + ) -> Result<()> where S: StorageRead + StorageWrite, { @@ -320,7 +320,7 @@ where } /// Initialize new nested data at the given epoch. - pub fn init(&self, storage: &mut S, epoch: Epoch) -> StorageResult<()> + pub fn init(&self, storage: &mut S, epoch: Epoch) -> Result<()> where S: StorageWrite + StorageRead, { @@ -336,10 +336,7 @@ where } /// Get the epoch of the most recent update - pub fn get_last_update( - &self, - storage: &S, - ) -> StorageResult> + pub fn get_last_update(&self, storage: &S) -> Result> where S: StorageRead, { @@ -352,7 +349,7 @@ where &self, storage: &mut S, current_epoch: Epoch, - ) -> StorageResult<()> + ) -> Result<()> where S: StorageWrite + StorageRead, { @@ -367,10 +364,7 @@ where } /// Get the oldest epoch at which data is stored - pub fn get_oldest_epoch( - &self, - storage: &S, - ) -> StorageResult> + pub fn get_oldest_epoch(&self, storage: &S) -> Result> where S: StorageRead, { @@ -382,7 +376,7 @@ where &self, storage: &mut S, new_oldest_epoch: Epoch, - ) -> StorageResult<()> + ) -> Result<()> where S: StorageRead + StorageWrite, { @@ -402,7 +396,7 @@ where storage: &mut S, params: &PosParams, current_epoch: Epoch, - ) -> StorageResult<()> + ) -> Result<()> where S: StorageRead + StorageWrite, { @@ -480,7 +474,7 @@ where storage: &mut S, value: Data, current_epoch: Epoch, - ) -> StorageResult<()> + ) -> Result<()> where S: StorageWrite + StorageRead, { @@ -495,7 +489,7 @@ where &self, storage: &S, epoch: Epoch, - ) -> StorageResult> + ) -> Result> where S: StorageRead, { @@ -508,7 +502,7 @@ where storage: &S, epoch: Epoch, params: &PosParams, - ) -> StorageResult> + ) -> Result> where S: StorageRead, { @@ -550,7 +544,7 @@ where value: Data, current_epoch: Epoch, offset: u64, - ) -> StorageResult<()> + ) -> Result<()> where S: StorageWrite + StorageRead, Gov: governance::Read, @@ -572,7 +566,7 @@ where value: Data, current_epoch: Epoch, offset: u64, - ) -> StorageResult<()> + ) -> Result<()> where S: StorageWrite + StorageRead, Gov: governance::Read, @@ -588,7 +582,7 @@ where value: Data, current_epoch: Epoch, offset: u64, - ) -> StorageResult<()> + ) -> Result<()> where S: StorageWrite + StorageRead, { @@ -606,7 +600,7 @@ where storage: &mut S, params: &PosParams, current_epoch: Epoch, - ) -> StorageResult<()> + ) -> Result<()> where S: StorageWrite + StorageRead, { @@ -687,10 +681,7 @@ where } /// Get the epoch of the most recent update - pub fn get_last_update( - &self, - storage: &S, - ) -> StorageResult> + pub fn get_last_update(&self, storage: &S) -> Result> where S: StorageRead, { @@ -708,10 +699,7 @@ where } /// Read all the data into a `HashMap` - pub fn to_hashmap( - &self, - storage: &S, - ) -> StorageResult> + pub fn to_hashmap(&self, storage: &S) -> Result> where S: StorageRead, { @@ -731,7 +719,7 @@ where .unwrap() } - fn get_oldest_epoch(&self, storage: &S) -> StorageResult> + fn get_oldest_epoch(&self, storage: &S) -> Result> where S: StorageRead, { @@ -743,7 +731,7 @@ where &self, storage: &mut S, new_oldest_epoch: Epoch, - ) -> StorageResult<()> + ) -> Result<()> where S: StorageRead + StorageWrite, { @@ -1116,7 +1104,7 @@ mod test { use crate::types::GenesisValidator; #[test] - fn test_epoched_data_trimming() -> StorageResult<()> { + fn test_epoched_data_trimming() -> Result<()> { let mut s = init_storage()?; let key_prefix = storage::Key::parse("test").unwrap(); @@ -1187,7 +1175,7 @@ mod test { } #[test] - fn test_epoched_without_data_trimming() -> StorageResult<()> { + fn test_epoched_without_data_trimming() -> Result<()> { let mut s = init_storage()?; let key_prefix = storage::Key::parse("test").unwrap(); @@ -1255,7 +1243,7 @@ mod test { } #[test] - fn test_epoched_delta_data_trimming() -> StorageResult<()> { + fn test_epoched_delta_data_trimming() -> Result<()> { let mut s = init_storage()?; let key_prefix = storage::Key::parse("test").unwrap(); @@ -1328,7 +1316,7 @@ mod test { } #[test] - fn test_epoched_delta_without_data_trimming() -> StorageResult<()> { + fn test_epoched_delta_without_data_trimming() -> Result<()> { let mut s = init_storage()?; // Nothing should ever get trimmed @@ -1412,7 +1400,7 @@ mod test { Ok(()) } - fn init_storage() -> StorageResult { + fn init_storage() -> Result { let mut s = TestState::default(); let gov_params = namada_governance::parameters::GovernanceParameters::default(); diff --git a/crates/proof_of_stake/src/error.rs b/crates/proof_of_stake/src/error.rs index ea4529d88a..3b849b5cf8 100644 --- a/crates/proof_of_stake/src/error.rs +++ b/crates/proof_of_stake/src/error.rs @@ -7,7 +7,7 @@ use namada_core::dec::Dec; use thiserror::Error; use crate::types::ValidatorState; -use crate::{rewards, StorageError}; +use crate::{rewards, Error}; #[allow(missing_docs)] #[derive(Error, Debug)] @@ -167,67 +167,67 @@ pub enum ConsensusKeyChangeError { MustBeEd25519, } -impl From for StorageError { +impl From for Error { fn from(err: BecomeValidatorError) -> Self { Self::new(err) } } -impl From for StorageError { +impl From for Error { fn from(err: BondError) -> Self { Self::new(err) } } -impl From for StorageError { +impl From for Error { fn from(err: UnbondError) -> Self { Self::new(err) } } -impl From for StorageError { +impl From for Error { fn from(err: CommissionRateChangeError) -> Self { Self::new(err) } } -impl From for StorageError { +impl From for Error { fn from(err: InflationError) -> Self { Self::new(err) } } -impl From for StorageError { +impl From for Error { fn from(err: UnjailValidatorError) -> Self { Self::new(err) } } -impl From for StorageError { +impl From for Error { fn from(err: RedelegationError) -> Self { Self::new(err) } } -impl From for StorageError { +impl From for Error { fn from(err: DeactivationError) -> Self { Self::new(err) } } -impl From for StorageError { +impl From for Error { fn from(err: ReactivationError) -> Self { Self::new(err) } } -impl From for StorageError { +impl From for Error { fn from(err: MetadataError) -> Self { Self::new(err) } } -impl From for StorageError { +impl From for Error { fn from(err: ConsensusKeyChangeError) -> Self { Self::new(err) } diff --git a/crates/proof_of_stake/src/lib.rs b/crates/proof_of_stake/src/lib.rs index 89f520be66..3e266849a5 100644 --- a/crates/proof_of_stake/src/lib.rs +++ b/crates/proof_of_stake/src/lib.rs @@ -57,8 +57,8 @@ pub use namada_state::collections::lazy_set::{self, LazySet}; pub use namada_state::collections::lazy_vec::{self, LazyVec}; pub use namada_state::collections::LazyCollection; pub use namada_state::{ - iter_prefix_bytes, Key, KeySeg, OptionExt, ResultExt, StorageError, - StorageRead, StorageResult, StorageWrite, + iter_prefix_bytes, Error, Key, KeySeg, OptionExt, Result, ResultExt, + StorageRead, StorageWrite, }; pub use namada_systems::proof_of_stake::*; use namada_systems::{governance, trans_token}; @@ -1214,7 +1214,7 @@ where .or_default() .insert(epoch, amount); } - Ok::<_, StorageError>((start, rbonds)) + Ok::<_, Error>((start, rbonds)) } else { for src_validator in &modified.validators_to_remove { if modified @@ -1326,14 +1326,14 @@ where let offset = offset_opt.unwrap_or(params.pipeline_len); if !address.is_established() { - return Err(StorageError::new_const( + return Err(Error::new_const( "The given address {address} is not established. Only an \ established address can become a validator.", )); } if is_validator(storage, address)? { - return Err(StorageError::new_const( + return Err(Error::new_const( "The given address is already a validator", )); } @@ -1341,7 +1341,7 @@ where // The address may not have any bonds if it is going to be initialized as a // validator if has_bonds::(storage, address)? { - return Err(StorageError::new_const( + return Err(Error::new_const( "The given address has delegations and therefore cannot become a \ validator. Unbond first.", )); @@ -2636,9 +2636,7 @@ where (Dec::one() - params.liveness_threshold) * params.liveness_window_check )? .to_uint() - .ok_or_else(|| { - StorageError::SimpleMessage("Found negative liveness threshold") - })? + .ok_or_else(|| Error::SimpleMessage("Found negative liveness threshold"))? .as_u64(); // Jail inactive validators diff --git a/crates/proof_of_stake/src/queries.rs b/crates/proof_of_stake/src/queries.rs index e9e6a03f1e..5a4166c70b 100644 --- a/crates/proof_of_stake/src/queries.rs +++ b/crates/proof_of_stake/src/queries.rs @@ -24,8 +24,8 @@ use crate::types::{ DelegationEpochs, Slash, UnbondDetails, }; use crate::{ - iter_prefix_bytes, raw_bond_amount, storage_key, PosParams, StorageError, - StorageRead, StorageResult, + iter_prefix_bytes, raw_bond_amount, storage_key, Error, PosParams, Result, + StorageRead, }; /// Find all validators to which a given bond `owner` (or source) has a @@ -34,7 +34,7 @@ pub fn find_delegation_validators( storage: &S, owner: &Address, epoch: &Epoch, -) -> StorageResult> +) -> Result> where S: StorageRead, { @@ -87,7 +87,7 @@ pub fn find_delegations( storage: &S, owner: &Address, epoch: &Epoch, -) -> StorageResult> +) -> Result> where S: StorageRead, Gov: governance::Read, @@ -145,7 +145,7 @@ where } /// Find if the given source address has any bonds. -pub fn has_bonds(storage: &S, source: &Address) -> StorageResult +pub fn has_bonds(storage: &S, source: &Address) -> Result where S: StorageRead, Gov: governance::Read, @@ -160,7 +160,7 @@ pub fn find_bonds( storage: &S, source: &Address, validator: &Address, -) -> StorageResult> +) -> Result> where S: StorageRead, { @@ -175,7 +175,7 @@ pub fn find_unbonds( storage: &S, source: &Address, validator: &Address, -) -> StorageResult> +) -> Result> where S: StorageRead, { @@ -201,7 +201,7 @@ pub fn bonds_and_unbonds( storage: &S, source: Option
, validator: Option
, -) -> StorageResult +) -> Result where S: StorageRead, Gov: governance::Read, @@ -223,7 +223,7 @@ fn get_multiple_bonds_and_unbonds( params: &PosParams, source: Option
, validator: Option
, -) -> StorageResult +) -> Result where S: StorageRead, { @@ -331,7 +331,7 @@ where slashes, &mut applied_slashes, )); - Ok::<_, StorageError>(()) + Ok::<_, Error>(()) })?; raw_unbonds.try_for_each(|(bond_id, start, withdraw, amount)| { @@ -352,7 +352,7 @@ where slashes, &mut applied_slashes, )); - Ok::<_, StorageError>(()) + Ok::<_, Error>(()) })?; Ok(bonds_and_unbonds @@ -376,7 +376,7 @@ fn find_bonds_and_unbonds_details( params: &PosParams, source: Address, validator: Address, -) -> StorageResult +) -> Result where S: StorageRead, { @@ -554,7 +554,7 @@ pub fn get_validator_protocol_key( storage: &S, addr: &Address, epoch: Epoch, -) -> StorageResult> +) -> Result> where S: StorageRead, Gov: governance::Read, @@ -570,7 +570,7 @@ pub fn get_validator_eth_hot_key( storage: &S, validator: &Address, epoch: Epoch, -) -> StorageResult> +) -> Result> where S: StorageRead, Gov: governance::Read, @@ -587,7 +587,7 @@ pub fn read_validator_stake( storage: &S, validator: &Address, epoch: Epoch, -) -> StorageResult +) -> Result where S: StorageRead, Gov: governance::Read, @@ -602,7 +602,7 @@ pub fn get_consensus_validator_from_protocol_pk( storage: &S, pk: &common::PublicKey, epoch: Option, -) -> StorageResult> +) -> Result> where S: StorageRead, Gov: governance::Read, diff --git a/crates/proof_of_stake/src/rewards.rs b/crates/proof_of_stake/src/rewards.rs index 7a03f43d61..471ef02ea9 100644 --- a/crates/proof_of_stake/src/rewards.rs +++ b/crates/proof_of_stake/src/rewards.rs @@ -23,8 +23,8 @@ use crate::storage::{ use crate::types::{into_tm_voting_power, BondId, ValidatorState, VoteInfo}; use crate::{ bond_amounts_for_rewards, get_total_consensus_stake, staking_token_address, - storage, storage_key, InflationError, PosParams, ResultExt, StorageRead, - StorageResult, StorageWrite, + storage, storage_key, InflationError, PosParams, Result, ResultExt, + StorageRead, StorageWrite, }; /// This is equal to 0.01. @@ -65,7 +65,7 @@ pub fn compute_inflation( epochs_per_year: u64, target_ratio: Dec, last_ratio: Dec, -) -> StorageResult { +) -> Result { let controller = PDController::new( total_native_amount.into(), max_reward_rate, @@ -116,7 +116,9 @@ impl PosRewardsCalculator { /// Calculate the rewards coefficients. These are used in combination with /// the validator's signing behavior and stake to determine the fraction of /// the block rewards earned. - pub fn get_reward_coeffs(&self) -> Result { + pub fn get_reward_coeffs( + &self, + ) -> std::result::Result { let votes_needed = self.get_min_required_votes(); let Self { @@ -175,7 +177,7 @@ pub(crate) fn log_block_rewards( height: BlockHeight, current_epoch: Epoch, new_epoch: bool, -) -> StorageResult<()> +) -> Result<()> where S: StorageWrite + StorageRead, Gov: governance::Read, @@ -217,7 +219,7 @@ pub(crate) fn log_block_rewards_aux( epoch: impl Into, proposer_address: &Address, votes: Vec, -) -> StorageResult<()> +) -> Result<()> where S: StorageRead + StorageWrite, Gov: governance::Read, @@ -359,7 +361,7 @@ pub fn apply_inflation( storage: &mut S, last_epoch: Epoch, num_blocks_in_last_epoch: u64, -) -> StorageResult<()> +) -> Result<()> where S: StorageRead + StorageWrite, Gov: governance::Read, @@ -441,7 +443,7 @@ pub fn update_rewards_products_and_mint_inflation( inflation: token::Amount, staking_token: &Address, total_native_tokens: token::Amount, -) -> StorageResult<()> +) -> Result<()> where S: StorageRead + StorageWrite, Token: trans_token::Write, @@ -559,7 +561,7 @@ pub fn compute_current_rewards_from_bonds( source: &Address, validator: &Address, current_epoch: Epoch, -) -> StorageResult +) -> Result where S: StorageRead, Gov: governance::Read, @@ -616,7 +618,7 @@ pub fn add_rewards_to_counter( source: &Address, validator: &Address, new_rewards: token::Amount, -) -> StorageResult<()> +) -> Result<()> where S: StorageRead + StorageWrite, { @@ -631,7 +633,7 @@ pub fn take_rewards_from_counter( storage: &mut S, source: &Address, validator: &Address, -) -> StorageResult +) -> Result where S: StorageRead + StorageWrite, { @@ -647,7 +649,7 @@ pub fn read_rewards_counter( storage: &S, source: &Address, validator: &Address, -) -> StorageResult +) -> Result where S: StorageRead, { diff --git a/crates/proof_of_stake/src/slashing.rs b/crates/proof_of_stake/src/slashing.rs index b1a7613ed9..0bcf0625bf 100644 --- a/crates/proof_of_stake/src/slashing.rs +++ b/crates/proof_of_stake/src/slashing.rs @@ -34,9 +34,9 @@ use crate::validator_set_update::update_validator_set; use crate::{ fold_and_slash_redelegated_bonds, get_total_consensus_stake, iter_prefix_bytes, jail_validator, storage, storage_key, types, - EagerRedelegatedUnbonds, FoldRedelegatedBondsResult, LazyMap, OptionExt, - OwnedPosParams, PosParams, ResultExt, StorageError, StorageRead, - StorageResult, StorageWrite, + EagerRedelegatedUnbonds, Error, FoldRedelegatedBondsResult, LazyMap, + OptionExt, OwnedPosParams, PosParams, Result, ResultExt, StorageRead, + StorageWrite, }; /// Apply PoS slashes from the evidence @@ -46,7 +46,7 @@ pub(crate) fn record_slashes_from_evidence( pos_params: &PosParams, current_epoch: Epoch, validator_set_update_epoch: Epoch, -) -> StorageResult<()> +) -> Result<()> where S: StorageWrite + StorageRead, Gov: governance::Read, @@ -148,7 +148,7 @@ pub fn slash( slash_type: SlashType, validator: &Address, validator_set_update_epoch: Epoch, -) -> StorageResult<()> +) -> Result<()> where S: StorageRead + StorageWrite, Gov: governance::Read, @@ -208,7 +208,7 @@ pub fn process_slashes( storage: &mut S, events: &mut impl EmitEvents, current_epoch: Epoch, -) -> StorageResult<()> +) -> Result<()> where S: StorageRead + StorageWrite, Gov: governance::Read, @@ -407,7 +407,7 @@ pub fn slash_validator_redelegation( dest_total_redelegated_unbonded: &TotalRedelegatedUnbonded, slash_rate: Dec, dest_slashed_amounts: &mut BTreeMap, -) -> StorageResult<()> +) -> Result<()> where S: StorageRead, { @@ -465,7 +465,7 @@ pub fn slash_redelegation( total_redelegated_unbonded: &TotalRedelegatedUnbonded, slash_rate: Dec, slashed_amounts: &mut BTreeMap, -) -> StorageResult<()> +) -> Result<()> where S: StorageRead, { @@ -494,9 +494,9 @@ where .at(src_validator) .get(storage, &bond_start)? .unwrap_or_default(); - Ok::<_, StorageError>(redelegated_unbonded) + Ok::<_, Error>(redelegated_unbonded) }) - .collect::>()?; + .collect::>()?; let mut init_tot_unbonded = token::Amount::sum(redelegated_unbonded.into_iter()) .ok_or_err_msg("token amount overflow")?; @@ -586,7 +586,7 @@ pub fn slash_validator( slash_rate: Dec, current_epoch: Epoch, slashed_amounts_map: &BTreeMap, -) -> StorageResult> +) -> Result> where S: StorageRead, { @@ -649,7 +649,7 @@ where redelegated_bonds.get(bond_start), slash_rate, )?; - Ok::(checked!(acc + slashed)?) + Ok::(checked!(acc + slashed)?) }, )?; @@ -716,7 +716,7 @@ pub fn compute_bond_at_epoch( start: Epoch, amount: token::Amount, redelegated_bonds: Option<&EagerRedelegatedBondsMap>, -) -> StorageResult +) -> Result where S: StorageRead, { @@ -773,7 +773,7 @@ pub fn compute_slash_bond_at_epoch( bond_amount: token::Amount, redelegated_bonds: Option<&EagerRedelegatedBondsMap>, slash_rate: Dec, -) -> StorageResult +) -> Result where S: StorageRead, { @@ -807,7 +807,7 @@ pub fn find_slashes_in_range( start: Epoch, end: Option, validator: &Address, -) -> StorageResult> +) -> Result> where S: StorageRead, { @@ -835,7 +835,7 @@ pub fn apply_list_slashes( params: &OwnedPosParams, slashes: &[Slash], amount: token::Amount, -) -> Result { +) -> std::result::Result { let mut final_amount = amount; let mut computed_slashes = BTreeMap::::new(); for slash in slashes { @@ -856,7 +856,7 @@ pub fn compute_slashable_amount( slash: &Slash, amount: token::Amount, computed_slashes: &BTreeMap, -) -> Result { +) -> std::result::Result { let updated_amount = computed_slashes .iter() .filter(|(&epoch, _)| { @@ -874,9 +874,7 @@ pub fn compute_slashable_amount( } /// Find all slashes and the associated validators in the PoS system -pub fn find_all_slashes( - storage: &S, -) -> StorageResult>> +pub fn find_all_slashes(storage: &S) -> Result>> where S: StorageRead, { @@ -915,7 +913,7 @@ where pub fn find_all_enqueued_slashes( storage: &S, epoch: Epoch, -) -> StorageResult>>> +) -> Result>>> where S: StorageRead, { @@ -950,7 +948,7 @@ where pub fn find_validator_slashes( storage: &S, validator: &Address, -) -> StorageResult> +) -> Result> where S: StorageRead, { @@ -964,7 +962,7 @@ pub fn get_slashed_amount( params: &PosParams, amount: token::Amount, slashes: &BTreeMap, -) -> StorageResult { +) -> Result { let mut updated_amount = amount; let mut computed_amounts = Vec::::new(); @@ -1016,7 +1014,7 @@ pub fn compute_amount_after_slashing_unbond( unbonds: &BTreeMap, redelegated_unbonds: &EagerRedelegatedUnbonds, slashes: Vec, -) -> StorageResult +) -> Result where S: StorageRead, { @@ -1074,7 +1072,7 @@ pub fn compute_amount_after_slashing_withdraw( (token::Amount, EagerRedelegatedBondsMap), >, slashes: Vec, -) -> StorageResult +) -> Result where S: StorageRead, { @@ -1152,7 +1150,7 @@ fn process_validator_slash( slash_rate: Dec, current_epoch: Epoch, slashed_amount_map: &mut EagerRedelegatedBondsMap, -) -> StorageResult<()> +) -> Result<()> where S: StorageRead + StorageWrite, { @@ -1191,7 +1189,7 @@ where ) = res?; Ok(dest_validator) }) - .collect::>>()?; + .collect::>>()?; for dest_validator in dest_validators { let to_modify = slashed_amount_map @@ -1228,7 +1226,7 @@ fn compute_cubic_slash_rate( storage: &S, params: &PosParams, infraction_epoch: Epoch, -) -> StorageResult +) -> Result where S: StorageRead, { @@ -1269,7 +1267,7 @@ where let stake = Dec::try_from(validator_stake).into_storage_result()?; - Ok::(checked!(acc + stake)?) + Ok::(checked!(acc + stake)?) })?; sum_vp_fraction = checked!(sum_vp_fraction + (infracting_stake / consensus_stake))?; diff --git a/crates/proof_of_stake/src/storage.rs b/crates/proof_of_stake/src/storage.rs index e75299d401..7cd402bbcb 100644 --- a/crates/proof_of_stake/src/storage.rs +++ b/crates/proof_of_stake/src/storage.rs @@ -30,7 +30,7 @@ use crate::types::{ }; use crate::{ storage_key, LazyCollection, LazySet, MetadataError, OwnedPosParams, - PosParams, StorageRead, StorageResult, StorageWrite, + PosParams, Result, StorageRead, StorageWrite, }; // ---- Storage handles ---- @@ -261,7 +261,7 @@ pub fn delegation_targets_handle(delegator: &Address) -> DelegationTargets { // ---- Storage read + write ---- /// Read owned PoS parameters -pub fn read_owned_pos_params(storage: &S) -> StorageResult +pub fn read_owned_pos_params(storage: &S) -> Result where S: StorageRead, { @@ -271,7 +271,7 @@ where } /// Read PoS parameters -pub fn read_pos_params(storage: &S) -> StorageResult +pub fn read_pos_params(storage: &S) -> Result where S: StorageRead, Gov: governance::Read, @@ -285,7 +285,7 @@ where pub fn read_non_pos_owned_params( storage: &S, owned: OwnedPosParams, -) -> StorageResult +) -> Result where S: StorageRead, Gov: governance::Read, @@ -301,7 +301,7 @@ where pub fn write_pos_params( storage: &mut S, params: &OwnedPosParams, -) -> StorageResult<()> +) -> Result<()> where S: StorageRead + StorageWrite, { @@ -313,7 +313,7 @@ where pub fn find_validator_by_raw_hash( storage: &S, raw_hash: impl AsRef, -) -> StorageResult> +) -> Result> where S: StorageRead, { @@ -326,7 +326,7 @@ pub fn write_validator_address_raw_hash( storage: &mut S, validator: &Address, consensus_key: &common::PublicKey, -) -> StorageResult<()> +) -> Result<()> where S: StorageRead + StorageWrite, { @@ -341,7 +341,7 @@ where pub fn read_validator_max_commission_rate_change( storage: &S, validator: &Address, -) -> StorageResult> +) -> Result> where S: StorageRead, { @@ -354,7 +354,7 @@ pub fn write_validator_max_commission_rate_change( storage: &mut S, validator: &Address, change: Dec, -) -> StorageResult<()> +) -> Result<()> where S: StorageRead + StorageWrite, { @@ -366,7 +366,7 @@ where pub fn read_validator_last_slash_epoch( storage: &S, validator: &Address, -) -> StorageResult> +) -> Result> where S: StorageRead, { @@ -379,7 +379,7 @@ pub fn write_validator_last_slash_epoch( storage: &mut S, validator: &Address, epoch: Epoch, -) -> StorageResult<()> +) -> Result<()> where S: StorageRead + StorageWrite, { @@ -390,7 +390,7 @@ where /// Read last block proposer address. pub fn read_last_block_proposer_address( storage: &S, -) -> StorageResult> +) -> Result> where S: StorageRead, { @@ -402,7 +402,7 @@ where pub fn write_last_block_proposer_address( storage: &mut S, address: Address, -) -> StorageResult<()> +) -> Result<()> where S: StorageRead + StorageWrite, { @@ -411,7 +411,7 @@ where } /// Read last epoch's staked ratio. -pub fn read_last_staked_ratio(storage: &S) -> StorageResult> +pub fn read_last_staked_ratio(storage: &S) -> Result> where S: StorageRead, { @@ -420,10 +420,7 @@ where } /// Write last epoch's staked ratio. -pub fn write_last_staked_ratio( - storage: &mut S, - ratio: Dec, -) -> StorageResult<()> +pub fn write_last_staked_ratio(storage: &mut S, ratio: Dec) -> Result<()> where S: StorageRead + StorageWrite, { @@ -434,7 +431,7 @@ where /// Read last epoch's PoS inflation amount. pub fn read_last_pos_inflation_amount( storage: &S, -) -> StorageResult> +) -> Result> where S: StorageRead, { @@ -446,7 +443,7 @@ where pub fn write_last_pos_inflation_amount( storage: &mut S, inflation: token::Amount, -) -> StorageResult<()> +) -> Result<()> where S: StorageRead + StorageWrite, { @@ -459,7 +456,7 @@ pub fn read_validator_state( storage: &S, validator: &Address, epoch: &Epoch, -) -> StorageResult> +) -> Result> where S: StorageRead, Gov: governance::Read, @@ -473,7 +470,7 @@ pub fn read_validator_deltas_value( storage: &S, validator: &Address, epoch: &namada_core::chain::Epoch, -) -> StorageResult> +) -> Result> where S: StorageRead, { @@ -489,7 +486,7 @@ pub fn read_validator_stake( params: &PosParams, validator: &Address, epoch: namada_core::chain::Epoch, -) -> StorageResult +) -> Result where S: StorageRead, { @@ -512,7 +509,7 @@ pub fn update_validator_deltas( delta: token::Change, current_epoch: namada_core::chain::Epoch, offset_opt: Option, -) -> StorageResult<()> +) -> Result<()> where S: StorageRead + StorageWrite, Gov: governance::Read, @@ -537,7 +534,7 @@ pub fn read_total_stake( storage: &S, params: &PosParams, epoch: namada_core::chain::Epoch, -) -> StorageResult +) -> Result where S: StorageRead, { @@ -557,7 +554,7 @@ pub fn read_total_active_stake( storage: &S, params: &PosParams, epoch: namada_core::chain::Epoch, -) -> StorageResult +) -> Result where S: StorageRead, { @@ -576,7 +573,7 @@ where pub fn read_consensus_validator_set_addresses( storage: &S, epoch: namada_core::chain::Epoch, -) -> StorageResult> +) -> Result> where S: StorageRead, { @@ -591,7 +588,7 @@ where pub fn read_below_capacity_validator_set_addresses( storage: &S, epoch: namada_core::chain::Epoch, -) -> StorageResult> +) -> Result> where S: StorageRead, { @@ -606,7 +603,7 @@ where pub fn read_below_threshold_validator_set_addresses( storage: &S, epoch: namada_core::chain::Epoch, -) -> StorageResult> +) -> Result> where S: StorageRead, Gov: governance::Read, @@ -629,7 +626,7 @@ where pub fn read_consensus_validator_set_addresses_with_stake( storage: &S, epoch: namada_core::chain::Epoch, -) -> StorageResult> +) -> Result> where S: StorageRead, { @@ -659,7 +656,7 @@ where pub fn get_num_consensus_validators( storage: &S, epoch: namada_core::chain::Epoch, -) -> StorageResult +) -> Result where S: StorageRead, { @@ -673,7 +670,7 @@ where pub fn read_below_capacity_validator_set_addresses_with_stake( storage: &S, epoch: namada_core::chain::Epoch, -) -> StorageResult> +) -> Result> where S: StorageRead, { @@ -703,7 +700,7 @@ where pub fn read_all_validator_addresses( storage: &S, epoch: namada_core::chain::Epoch, -) -> StorageResult> +) -> Result> where S: StorageRead, { @@ -722,7 +719,7 @@ pub fn update_total_deltas( current_epoch: namada_core::chain::Epoch, offset_opt: Option, update_active_voting_power: bool, -) -> StorageResult<()> +) -> Result<()> where S: StorageRead + StorageWrite, Gov: governance::Read, @@ -767,7 +764,7 @@ where pub fn read_validator_email( storage: &S, validator: &Address, -) -> StorageResult> +) -> Result> where S: StorageRead, { @@ -780,7 +777,7 @@ pub fn write_validator_email( storage: &mut S, validator: &Address, email: &String, -) -> StorageResult<()> +) -> Result<()> where S: StorageRead + StorageWrite, { @@ -796,7 +793,7 @@ where pub fn read_validator_description( storage: &S, validator: &Address, -) -> StorageResult> +) -> Result> where S: StorageRead, { @@ -809,7 +806,7 @@ pub fn write_validator_description( storage: &mut S, validator: &Address, description: &String, -) -> StorageResult<()> +) -> Result<()> where S: StorageRead + StorageWrite, { @@ -825,7 +822,7 @@ where pub fn read_validator_website( storage: &S, validator: &Address, -) -> StorageResult> +) -> Result> where S: StorageRead, { @@ -838,7 +835,7 @@ pub fn write_validator_website( storage: &mut S, validator: &Address, website: &String, -) -> StorageResult<()> +) -> Result<()> where S: StorageRead + StorageWrite, { @@ -854,7 +851,7 @@ where pub fn read_validator_discord_handle( storage: &S, validator: &Address, -) -> StorageResult> +) -> Result> where S: StorageRead, { @@ -867,7 +864,7 @@ pub fn write_validator_discord_handle( storage: &mut S, validator: &Address, discord_handle: &String, -) -> StorageResult<()> +) -> Result<()> where S: StorageRead + StorageWrite, { @@ -883,7 +880,7 @@ where pub fn read_validator_avatar( storage: &S, validator: &Address, -) -> StorageResult> +) -> Result> where S: StorageRead, { @@ -896,7 +893,7 @@ pub fn write_validator_avatar( storage: &mut S, validator: &Address, avatar: &String, -) -> StorageResult<()> +) -> Result<()> where S: StorageRead + StorageWrite, { @@ -912,7 +909,7 @@ where pub fn read_validator_name( storage: &S, validator: &Address, -) -> StorageResult> +) -> Result> where S: StorageRead, { @@ -925,7 +922,7 @@ pub fn write_validator_name( storage: &mut S, validator: &Address, validator_name: &String, -) -> StorageResult<()> +) -> Result<()> where S: StorageRead + StorageWrite, { @@ -941,7 +938,7 @@ pub fn write_validator_metadata( storage: &mut S, validator: &Address, metadata: &ValidatorMetaData, -) -> StorageResult<()> +) -> Result<()> where S: StorageRead + StorageWrite, { @@ -971,7 +968,7 @@ pub fn get_last_reward_claim_epoch( storage: &S, delegator: &Address, validator: &Address, -) -> StorageResult> +) -> Result> where S: StorageRead, { @@ -987,7 +984,7 @@ pub fn write_last_reward_claim_epoch( delegator: &Address, validator: &Address, epoch: Epoch, -) -> StorageResult<()> +) -> Result<()> where S: StorageRead + StorageWrite, { @@ -1003,7 +1000,7 @@ where pub fn try_insert_consensus_key( storage: &mut S, consensus_key: &common::PublicKey, -) -> StorageResult<()> +) -> Result<()> where S: StorageRead + StorageWrite, { @@ -1014,7 +1011,7 @@ where /// Get the unique set of consensus keys in storage pub fn get_consensus_key_set( storage: &S, -) -> StorageResult> +) -> Result> where S: StorageRead, { @@ -1027,7 +1024,7 @@ where pub fn is_consensus_key_used( storage: &S, consensus_key: &common::PublicKey, -) -> StorageResult +) -> Result where S: StorageRead, { @@ -1041,7 +1038,7 @@ pub fn get_consensus_key( storage: &S, addr: &Address, epoch: Epoch, -) -> StorageResult> +) -> Result> where S: StorageRead, Gov: governance::Read, diff --git a/crates/proof_of_stake/src/validator_set_update.rs b/crates/proof_of_stake/src/validator_set_update.rs index f1527c0f65..c51b6f4a8f 100644 --- a/crates/proof_of_stake/src/validator_set_update.rs +++ b/crates/proof_of_stake/src/validator_set_update.rs @@ -21,7 +21,7 @@ use crate::types::{ ConsensusValidatorSet, Position, ReverseOrdTokenAmount, ValidatorPositionAddresses, ValidatorSetUpdate, ValidatorState, }; -use crate::{PosParams, StorageRead, StorageResult, StorageWrite}; +use crate::{PosParams, Result, StorageRead, StorageWrite}; /// Update validator set at the pipeline epoch when a validator receives a new /// bond and when its bond is unbonded (self-bond or delegation). @@ -32,7 +32,7 @@ pub fn update_validator_set( token_change: token::Change, current_epoch: Epoch, offset: Option, -) -> StorageResult<()> +) -> Result<()> where S: StorageRead + StorageWrite, Gov: governance::Read, @@ -360,7 +360,7 @@ pub fn insert_validator_into_validator_set( stake: token::Amount, current_epoch: Epoch, offset: u64, -) -> StorageResult<()> +) -> Result<()> where S: StorageRead + StorageWrite, Gov: governance::Read, @@ -463,7 +463,7 @@ pub fn remove_consensus_validator( params: &PosParams, epoch: Epoch, validator: &Address, -) -> StorageResult<()> +) -> Result<()> where S: StorageRead + StorageWrite, { @@ -491,7 +491,7 @@ pub fn remove_below_capacity_validator( params: &PosParams, epoch: Epoch, validator: &Address, -) -> StorageResult<()> +) -> Result<()> where S: StorageRead + StorageWrite, { @@ -523,7 +523,7 @@ pub fn promote_next_below_capacity_validator_to_consensus( storage: &mut S, current_epoch: Epoch, offset: u64, -) -> StorageResult<()> +) -> Result<()> where S: StorageRead + StorageWrite, Gov: governance::Read, @@ -569,7 +569,7 @@ pub fn validator_set_update_comet( params: &PosParams, current_epoch: Epoch, f: impl FnMut(ValidatorSetUpdate) -> T, -) -> StorageResult> +) -> Result> where S: StorageRead, { @@ -771,7 +771,7 @@ pub fn copy_validator_sets_and_positions( params: &PosParams, current_epoch: Epoch, target_epoch: Epoch, -) -> StorageResult<()> +) -> Result<()> where S: StorageRead + StorageWrite, { @@ -890,7 +890,7 @@ fn insert_into_consensus_and_demote_to_below_cap( offset: u64, consensus_set: &ConsensusValidatorSet, below_capacity_set: &BelowCapacityValidatorSet, -) -> StorageResult<()> +) -> Result<()> where S: StorageRead + StorageWrite, Gov: governance::Read, @@ -941,7 +941,7 @@ where fn find_first_position( handle: &ValidatorPositionAddresses, storage: &S, -) -> StorageResult> +) -> Result> where S: StorageRead, { @@ -957,7 +957,7 @@ where fn find_last_position( handle: &ValidatorPositionAddresses, storage: &S, -) -> StorageResult> +) -> Result> where S: StorageRead, { @@ -973,7 +973,7 @@ where fn find_next_position( handle: &ValidatorPositionAddresses, storage: &S, -) -> StorageResult +) -> Result where S: StorageRead, { @@ -989,7 +989,7 @@ where fn get_min_consensus_validator_amount( handle: &ConsensusValidatorSet, storage: &S, -) -> StorageResult +) -> Result where S: StorageRead, { @@ -1010,7 +1010,7 @@ where fn get_max_below_capacity_validator_amount( handle: &BelowCapacityValidatorSet, storage: &S, -) -> StorageResult> +) -> Result> where S: StorageRead, { @@ -1034,7 +1034,7 @@ fn insert_validator_into_set( storage: &mut S, epoch: &Epoch, address: &Address, -) -> StorageResult<()> +) -> Result<()> where S: StorageRead + StorageWrite, { @@ -1062,7 +1062,7 @@ fn read_validator_set_position( validator: &Address, epoch: Epoch, _params: &PosParams, -) -> StorageResult> +) -> Result> where S: StorageRead, { diff --git a/crates/proof_of_stake/src/vp.rs b/crates/proof_of_stake/src/vp.rs index 6699f702a0..fccffb796f 100644 --- a/crates/proof_of_stake/src/vp.rs +++ b/crates/proof_of_stake/src/vp.rs @@ -13,7 +13,7 @@ use namada_tx::action::{ }; use namada_tx::BatchedTxRef; use namada_vp::native_vp::{ - self, Ctx, CtxPreStorageRead, NativeVp, VpEvaluator, + Ctx, CtxPreStorageRead, Error, NativeVp, Result, VpEvaluator, }; use thiserror::Error; @@ -24,17 +24,18 @@ use crate::{storage_key, token}; #[allow(missing_docs)] #[derive(Error, Debug)] -pub enum Error { - #[error("PoS VP error: Native VP error: {0}")] - NativeVpError(#[from] native_vp::Error), +pub enum VpError { #[error( "Action {0} not authorized by {1} which is not part of verifier set" )] Unauthorized(&'static str, Address), } -/// PoS functions result -pub type Result = std::result::Result; +impl From for Error { + fn from(value: VpError) -> Self { + Error::new(value) + } +} /// Proof-of-Stake validity predicate pub struct PosVp<'ctx, S, CA, EVAL, Gov> @@ -55,8 +56,6 @@ where EVAL: 'static + VpEvaluator<'ctx, S, CA, EVAL>, Gov: governance::Read>, { - type Error = Error; - fn validate_tx( &'view self, batched_tx: &BatchedTxRef<'_>, @@ -70,8 +69,7 @@ where .tx .data(batched_tx.cmt) .map(|tx_data| Gov::is_proposal_accepted(&self.ctx.pre(), &tx_data)) - .transpose() - .map_err(Error::NativeVpError)? + .transpose()? .unwrap_or(false) { for key in keys_changed { @@ -96,10 +94,9 @@ where tracing::info!( "Rejecting tx without any action written to temp storage" ); - return Err(native_vp::Error::new_const( + return Err(Error::new_const( "Rejecting tx without any action written to temp storage", - ) - .into()); + )); } let mut became_validator: BTreeSet
= Default::default(); @@ -126,10 +123,11 @@ where tracing::info!( "Unauthorized PosAction::BecomeValidator" ); - return Err(Error::Unauthorized( + return Err(VpError::Unauthorized( "BecomeValidator", address, - )); + ) + .into()); } became_validator.insert(address); } @@ -138,10 +136,11 @@ where tracing::info!( "Unauthorized PosAction::DeactivateValidator" ); - return Err(Error::Unauthorized( + return Err(VpError::Unauthorized( "DeactivateValidator", validator, - )); + ) + .into()); } deactivated.insert(validator); } @@ -150,19 +149,21 @@ where tracing::info!( "Unauthorized PosAction::ReactivateValidator" ); - return Err(Error::Unauthorized( + return Err(VpError::Unauthorized( "ReactivateValidator", validator, - )); + ) + .into()); } reactivated.insert(validator); } PosAction::Unjail(validator) => { if !verifiers.contains(&validator) { tracing::info!("Unauthorized PosAction::Unjail"); - return Err(Error::Unauthorized( + return Err(VpError::Unauthorized( "Unjail", validator, - )); + ) + .into()); } unjailed.insert(validator); } @@ -177,10 +178,11 @@ where }; if !verifiers.contains(&bond_id.source) { tracing::info!("Unauthorized PosAction::Bond"); - return Err(Error::Unauthorized( + return Err(VpError::Unauthorized( "Bond", bond_id.source, - )); + ) + .into()); } bonds.insert(bond_id, amount); } @@ -195,10 +197,11 @@ where }; if !verifiers.contains(&bond_id.source) { tracing::info!("Unauthorized PosAction::Unbond"); - return Err(Error::Unauthorized( + return Err(VpError::Unauthorized( "Unbond", bond_id.source, - )); + ) + .into()); } unbonds.insert(bond_id, amount); } @@ -209,10 +212,11 @@ where }; if !verifiers.contains(&bond_id.source) { tracing::info!("Unauthorized PosAction::Withdraw"); - return Err(Error::Unauthorized( + return Err(VpError::Unauthorized( "Withdraw", bond_id.source, - )); + ) + .into()); } withdrawals.insert(bond_id); } @@ -226,10 +230,11 @@ where tracing::info!( "Unauthorized PosAction::Redelegation" ); - return Err(Error::Unauthorized( + return Err(VpError::Unauthorized( "Redelegation", owner, - )); + ) + .into()); } let bond_id = BondId { source: owner, @@ -249,10 +254,11 @@ where tracing::info!( "Unauthorized PosAction::ClaimRewards" ); - return Err(Error::Unauthorized( + return Err(VpError::Unauthorized( "ClaimRewards", bond_id.source, - )); + ) + .into()); } claimed_rewards.insert(bond_id); } @@ -261,10 +267,11 @@ where tracing::info!( "Unauthorized PosAction::CommissionChange" ); - return Err(Error::Unauthorized( + return Err(VpError::Unauthorized( "CommissionChange", validator, - )); + ) + .into()); } changed_commission.insert(validator); } @@ -273,10 +280,11 @@ where tracing::info!( "Unauthorized PosAction::MetadataChange" ); - return Err(Error::Unauthorized( + return Err(VpError::Unauthorized( "MetadataChange", validator, - )); + ) + .into()); } changed_metadata.insert(validator); } @@ -285,10 +293,11 @@ where tracing::info!( "Unauthorized PosAction::ConsensusKeyChange" ); - return Err(Error::Unauthorized( + return Err(VpError::Unauthorized( "ConsensusKeyChange", validator, - )); + ) + .into()); } changed_consensus_key.insert(validator); } @@ -302,10 +311,10 @@ where for key in keys_changed { if is_params_key(key) { - return Err(Error::NativeVpError(native_vp::Error::new_const( + return Err(Error::new_const( "PoS parameter changes can only be performed by a \ governance proposal that has been accepted", - ))); + )); } // TODO: validate changes keys against the accumulated changes } @@ -330,16 +339,13 @@ where /// Return `Ok` if the changed parameters are valid fn is_valid_parameter_change(&self) -> Result<()> { let validation_errors: Vec = - read_owned_pos_params(&self.ctx.post()) - .map_err(Error::NativeVpError)? - .validate(); + read_owned_pos_params(&self.ctx.post())?.validate(); validation_errors.is_empty().ok_or_else(|| { let validation_errors_str = itertools::join(validation_errors, ", "); - native_vp::Error::new_alloc(format!( + Error::new_alloc(format!( "PoS parameter changes were invalid: {validation_errors_str}", )) - .into() }) } } diff --git a/crates/shielded_token/src/conversion.rs b/crates/shielded_token/src/conversion.rs index feafb4c845..cb6bd005f0 100644 --- a/crates/shielded_token/src/conversion.rs +++ b/crates/shielded_token/src/conversion.rs @@ -19,7 +19,7 @@ use crate::storage_key::{ masp_last_locked_amount_key, masp_locked_amount_target_key, masp_max_reward_rate_key, }; -use crate::{StorageRead, StorageResult, StorageWrite, WithConversionState}; +use crate::{Result, StorageRead, StorageWrite, WithConversionState}; /// Compute shielded token inflation amount #[allow(clippy::too_many_arguments)] @@ -65,7 +65,7 @@ pub fn compute_inflation( pub fn calculate_masp_rewards_precision( storage: &mut S, addr: &Address, -) -> StorageResult<(u128, Denomination)> +) -> Result<(u128, Denomination)> where S: StorageWrite + StorageRead, TransToken: trans_token::Read, @@ -91,7 +91,7 @@ pub fn calculate_masp_rewards( storage: &mut S, token: &Address, masp_epochs_per_year: u64, -) -> StorageResult<((u128, u128), Denomination)> +) -> Result<((u128, u128), Denomination)> where S: StorageWrite + StorageRead, TransToken: trans_token::Keys + trans_token::Read, @@ -232,7 +232,7 @@ where /// Update the MASP's allowed conversions pub fn update_allowed_conversions( _storage: &mut S, -) -> StorageResult<()> +) -> Result<()> where S: StorageWrite + StorageRead + WithConversionState, Params: parameters::Read, @@ -245,7 +245,7 @@ where /// Update the MASP's allowed conversions pub fn update_allowed_conversions( storage: &mut S, -) -> StorageResult<()> +) -> Result<()> where S: StorageWrite + StorageRead + WithConversionState, Params: parameters::Read, @@ -269,9 +269,7 @@ where }; use rayon::prelude::ParallelSlice; - use crate::{ - mint_rewards, ConversionLeaf, OptionExt, ResultExt, StorageError, - }; + use crate::{mint_rewards, ConversionLeaf, Error, OptionExt, ResultExt}; // The derived conversions will be placed in MASP address space let masp_addr = MASP; @@ -348,7 +346,7 @@ where storage.get_block_epoch()?, masp_epoch_multiplier, ) - .map_err(StorageError::new_const)?; + .map_err(Error::new_const)?; let prev_masp_epoch = match masp_epoch.prev() { Some(epoch) => epoch, None => return Ok(()), @@ -442,9 +440,7 @@ where normed_inflation, )) .ok_or_else(|| { - StorageError::new_const( - "Three digit reward overflow", - ) + Error::new_const("Three digit reward overflow") })?; total_reward = total_reward .checked_add( @@ -456,7 +452,7 @@ where .unwrap_or_default(), ) .ok_or_else(|| { - StorageError::new_const( + Error::new_const( "Three digit total reward overflow", ) })?; @@ -515,14 +511,14 @@ where addr_bal .u128_eucl_div_rem((reward, precision)) .ok_or_else(|| { - StorageError::new_const( + Error::new_const( "Total reward calculation overflow", ) })? .0, ) .ok_or_else(|| { - StorageError::new_const("Total reward overflow") + Error::new_const("Total reward overflow") })?; } } diff --git a/crates/shielded_token/src/lib.rs b/crates/shielded_token/src/lib.rs index 1328c359dd..2e35149be9 100644 --- a/crates/shielded_token/src/lib.rs +++ b/crates/shielded_token/src/lib.rs @@ -31,8 +31,8 @@ use namada_core::borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; pub use namada_core::dec::Dec; pub use namada_core::masp::{MaspEpoch, MaspTxId, MaspTxRefs, MaspValue}; pub use namada_state::{ - ConversionLeaf, ConversionState, Key, OptionExt, ResultExt, StorageError, - StorageRead, StorageResult, StorageWrite, WithConversionState, + ConversionLeaf, ConversionState, Error, Key, OptionExt, Result, ResultExt, + StorageRead, StorageWrite, WithConversionState, }; use serde::{Deserialize, Serialize}; pub use storage::*; diff --git a/crates/shielded_token/src/storage.rs b/crates/shielded_token/src/storage.rs index c124a0f6a3..50aab32021 100644 --- a/crates/shielded_token/src/storage.rs +++ b/crates/shielded_token/src/storage.rs @@ -6,9 +6,7 @@ use namada_core::uint::Uint; use namada_systems::trans_token; use crate::storage_key::*; -use crate::{ - ResultExt, ShieldedParams, StorageRead, StorageResult, StorageWrite, -}; +use crate::{Result, ResultExt, ShieldedParams, StorageRead, StorageWrite}; /// Initialize parameters for the token in storage during the genesis block. pub fn write_params( @@ -16,7 +14,7 @@ pub fn write_params( storage: &mut S, token: &Address, denom: &token::Denomination, -) -> StorageResult<()> +) -> Result<()> where S: StorageRead + StorageWrite, TransToken: trans_token::Keys, @@ -55,7 +53,7 @@ where pub fn mint_rewards( storage: &mut S, amount: token::Amount, -) -> StorageResult<()> +) -> Result<()> where S: StorageRead + StorageWrite, TransToken: trans_token::Write, @@ -70,7 +68,7 @@ where } /// Read the total rewards minted by MASP. -pub fn read_total_rewards(storage: &S) -> StorageResult +pub fn read_total_rewards(storage: &S) -> Result where S: StorageRead, { diff --git a/crates/shielded_token/src/utils.rs b/crates/shielded_token/src/utils.rs index d67d09f5c5..9593c93a20 100644 --- a/crates/shielded_token/src/utils.rs +++ b/crates/shielded_token/src/utils.rs @@ -9,13 +9,13 @@ use masp_primitives::transaction::Transaction; use crate::storage_key::{ is_masp_transfer_key, masp_commitment_tree_key, masp_nullifier_key, }; -use crate::{Key, StorageError, StorageRead, StorageResult, StorageWrite}; +use crate::{Error, Key, Result, StorageRead, StorageWrite}; // Writes the nullifiers of the provided masp transaction to storage fn reveal_nullifiers( ctx: &mut impl StorageWrite, transaction: &Transaction, -) -> StorageResult<()> { +) -> Result<()> { for description in transaction .sapling_bundle() .map_or(&vec![], |description| &description.shielded_spends) @@ -33,12 +33,12 @@ fn reveal_nullifiers( pub fn update_note_commitment_tree( ctx: &mut (impl StorageRead + StorageWrite), transaction: &Transaction, -) -> StorageResult<()> { +) -> Result<()> { if let Some(bundle) = transaction.sapling_bundle() { if !bundle.shielded_outputs.is_empty() { let tree_key = masp_commitment_tree_key(); let mut commitment_tree: CommitmentTree = - ctx.read(&tree_key)?.ok_or(StorageError::SimpleMessage( + ctx.read(&tree_key)?.ok_or(Error::SimpleMessage( "Missing note commitment tree in storage", ))?; @@ -47,9 +47,7 @@ pub fn update_note_commitment_tree( commitment_tree .append(Node::from_scalar(description.cmu)) .map_err(|_| { - StorageError::SimpleMessage( - "Note commitment tree is full", - ) + Error::SimpleMessage("Note commitment tree is full") })?; } @@ -64,7 +62,7 @@ pub fn update_note_commitment_tree( pub fn handle_masp_tx( ctx: &mut (impl StorageRead + StorageWrite), shielded: &Transaction, -) -> StorageResult<()> { +) -> Result<()> { // TODO(masp#73): temporarily disabled because of the node aggregation issue // in WASM. Using the host env tx_update_masp_note_commitment_tree or // directly the update_note_commitment_tree function as a workaround diff --git a/crates/shielded_token/src/validation.rs b/crates/shielded_token/src/validation.rs index 193846a3a4..1567791acd 100644 --- a/crates/shielded_token/src/validation.rs +++ b/crates/shielded_token/src/validation.rs @@ -21,7 +21,7 @@ use masp_proofs::sapling::BatchValidator; use rand_core::OsRng; use smooth_operator::checked; -use crate::{StorageError, StorageResult}; +use crate::{Error, Result}; // TODO these could be exported from masp_proof crate /// Spend circuit name @@ -119,16 +119,16 @@ fn load_pvks() -> &'static PVKs { pub fn verify_shielded_tx( transaction: &Transaction, consume_verify_gas: F, -) -> Result<(), StorageError> +) -> Result<()> where - F: Fn(u64) -> StorageResult<()>, + F: Fn(u64) -> Result<()>, { tracing::debug!("entered verify_shielded_tx()"); let sapling_bundle = if let Some(bundle) = transaction.sapling_bundle() { bundle } else { - return Err(StorageError::SimpleMessage("no sapling bundle")); + return Err(Error::new_const("no sapling bundle")); }; let tx_data = transaction.deref(); @@ -136,9 +136,7 @@ where let unauth_tx_data = match partial_deauthorize(tx_data) { Some(tx_data) => tx_data, None => { - return Err(StorageError::SimpleMessage( - "Failed to partially de-authorize", - )); + return Err(Error::new_const("Failed to partially de-authorize")); } }; @@ -167,16 +165,14 @@ where if !ctx.check_bundle(sapling_bundle.to_owned(), sighash.as_ref().to_owned()) { tracing::debug!("failed check bundle"); - return Err(StorageError::SimpleMessage("Invalid sapling bundle")); + return Err(Error::new_const("Invalid sapling bundle")); } tracing::debug!("passed check bundle"); // Charge gas before final validation charge_masp_validate_gas(sapling_bundle, consume_verify_gas)?; if !ctx.validate(spend_vk, convert_vk, output_vk, OsRng) { - return Err(StorageError::SimpleMessage( - "Invalid proofs or signatures", - )); + return Err(Error::new_const("Invalid proofs or signatures")); } Ok(()) } @@ -219,9 +215,9 @@ pub fn partial_deauthorize( fn charge_masp_validate_gas( sapling_bundle: &SaplingBundle, consume_verify_gas: F, -) -> StorageResult<()> +) -> Result<()> where - F: Fn(u64) -> StorageResult<()>, + F: Fn(u64) -> Result<()>, { // Signatures gas consume_verify_gas(checked!( @@ -267,9 +263,9 @@ where fn charge_masp_check_bundle_gas( sapling_bundle: &SaplingBundle, consume_verify_gas: F, -) -> StorageResult<()> +) -> Result<()> where - F: Fn(u64) -> StorageResult<()>, + F: Fn(u64) -> Result<()>, { consume_verify_gas(checked!( (sapling_bundle.shielded_spends.len() as u64) diff --git a/crates/shielded_token/src/vp.rs b/crates/shielded_token/src/vp.rs index 099e3c56f1..1adef13aba 100644 --- a/crates/shielded_token/src/vp.rs +++ b/crates/shielded_token/src/vp.rs @@ -27,10 +27,10 @@ use namada_systems::{governance, ibc, parameters, trans_token}; use namada_tx::action::Read; use namada_tx::BatchedTxRef; use namada_vp::native_vp::{ - Ctx, CtxPostStorageRead, CtxPreStorageRead, NativeVp, VpEvaluator, + Ctx, CtxPostStorageRead, CtxPreStorageRead, Error, NativeVp, Result, + VpEvaluator, }; -use namada_vp::{native_vp, VpEnv}; -use thiserror::Error; +use namada_vp::VpEnv; use crate::storage_key::{ is_masp_key, is_masp_nullifier_key, is_masp_token_map_key, @@ -39,16 +39,6 @@ use crate::storage_key::{ }; use crate::validation::verify_shielded_tx; -#[allow(missing_docs)] -#[derive(Error, Debug)] -pub enum Error { - #[error("MASP VP error: Native VP error: {0}")] - NativeVpError(#[from] native_vp::Error), -} - -/// MASP VP result -pub type Result = std::result::Result; - /// MASP VP pub struct MaspVp<'ctx, S, CA, EVAL, Params, Gov, Ibc, TransToken, Transfer> where @@ -101,20 +91,17 @@ where ) -> Result<()> { tx.tx.data(tx.cmt).map_or_else( || { - Err(native_vp::Error::new_const( + Err(Error::new_const( "MASP parameter changes require tx data to be present", - ) - .into()) + )) }, |data| { - Gov::is_proposal_accepted(&self.ctx.pre(), data.as_ref()) - .map_err(Error::NativeVpError)? + Gov::is_proposal_accepted(&self.ctx.pre(), data.as_ref())? .ok_or_else(|| { - native_vp::Error::new_const( + Error::new_const( "MASP parameter changes can only be performed by \ a governance proposal that has been accepted", ) - .into() }) }, ) @@ -138,12 +125,11 @@ where if self.ctx.has_key_pre(&nullifier_key)? || revealed_nullifiers.contains(&nullifier_key) { - let error = native_vp::Error::new_alloc(format!( + let error = Error::new_alloc(format!( "MASP double spending attempt, the nullifier {:?} has \ already been revealed previously", description.nullifier.0, - )) - .into(); + )); tracing::debug!("{error}"); return Err(error); } @@ -156,10 +142,10 @@ where .read_bytes_post(&nullifier_key)? .is_some_and(|value| value.is_empty()) .ok_or_else(|| { - Error::NativeVpError(native_vp::Error::new_const( + Error::new_const( "The nullifier should have been committed with no \ associated data", - )) + ) })?; revealed_nullifiers.insert(nullifier_key); @@ -170,11 +156,10 @@ where keys_changed.iter().filter(|key| is_masp_nullifier_key(key)) { if !revealed_nullifiers.contains(nullifier_key) { - let error = native_vp::Error::new_alloc(format!( + let error = Error::new_alloc(format!( "An unexpected MASP nullifier key {nullifier_key} has \ been revealed by the transaction" - )) - .into(); + )); tracing::debug!("{error}"); return Err(error); } @@ -192,14 +177,14 @@ where // Check that the merkle tree in storage has been correctly updated with // the output descriptions cmu let tree_key = masp_commitment_tree_key(); - let mut previous_tree: CommitmentTree = - self.ctx.read_pre(&tree_key)?.ok_or(Error::NativeVpError( - native_vp::Error::SimpleMessage("Cannot read storage"), - ))?; - let post_tree: CommitmentTree = - self.ctx.read_post(&tree_key)?.ok_or(Error::NativeVpError( - native_vp::Error::SimpleMessage("Cannot read storage"), - ))?; + let mut previous_tree: CommitmentTree = self + .ctx + .read_pre(&tree_key)? + .ok_or(Error::new_const("Cannot read storage"))?; + let post_tree: CommitmentTree = self + .ctx + .read_post(&tree_key)? + .ok_or(Error::new_const("Cannot read storage"))?; // Based on the output descriptions of the transaction, update the // previous tree in storage @@ -210,18 +195,16 @@ where previous_tree .append(Node::from_scalar(description.cmu)) .map_err(|()| { - Error::NativeVpError(native_vp::Error::SimpleMessage( - "Failed to update the commitment tree", - )) + Error::new_const("Failed to update the commitment tree") })?; } // Check that the updated previous tree matches the actual post tree. // This verifies that all and only the necessary notes have been // appended to the tree if previous_tree != post_tree { - let error = Error::NativeVpError(native_vp::Error::SimpleMessage( + let error = Error::new_const( "The note commitment tree was incorrectly updated", - )); + ); tracing::debug!("{error}"); return Err(error); } @@ -242,10 +225,9 @@ where // Check if the provided anchor was published before if !self.ctx.has_key_pre(&anchor_key)? { - let error = - Error::NativeVpError(native_vp::Error::SimpleMessage( - "Spend description refers to an invalid anchor", - )); + let error = Error::new_const( + "Spend description refers to an invalid anchor", + ); tracing::debug!("{error}"); return Err(error); } @@ -265,9 +247,7 @@ where let expected_anchor = self .ctx .read_pre::(&anchor_key)? - .ok_or(Error::NativeVpError( - native_vp::Error::SimpleMessage("Cannot read storage"), - ))?; + .ok_or(Error::new_const("Cannot read storage"))?; for description in &bundle.shielded_converts { // Check if the provided anchor matches the current @@ -275,11 +255,8 @@ where if namada_core::hash::Hash(description.anchor.to_bytes()) != expected_anchor { - let error = Error::NativeVpError( - native_vp::Error::SimpleMessage( - "Convert description refers to an invalid \ - anchor", - ), + let error = Error::new_const( + "Convert description refers to an invalid anchor", ); tracing::debug!("{error}"); return Err(error); @@ -328,7 +305,7 @@ where checked!( pre_entry + &ValueSum::from_pair((*token).clone(), pre_balance) ) - .map_err(native_vp::Error::new)?, + .map_err(Error::new)?, ); // And then record the final state let post_entry = result.post.get(&addr_hash).cloned().unwrap_or(zero); @@ -338,7 +315,7 @@ where post_entry + &ValueSum::from_pair((*token).clone(), post_balance) ) - .map_err(native_vp::Error::new)?, + .map_err(Error::new)?, ); Result::<_>::Ok(result) } @@ -401,9 +378,7 @@ where self.ctx.get_block_epoch()?, masp_epoch_multiplier, ) - .map_err(|msg| { - Error::NativeVpError(native_vp::Error::new_const(msg)) - })?; + .map_err(Error::new_const)?; let conversion_state = self.ctx.state.in_mem().get_conversion_state(); let tx_data = batched_tx .tx @@ -419,9 +394,9 @@ where } else { let masp_section_ref = namada_tx::action::get_masp_section_ref(&actions) - .map_err(native_vp::Error::new_const)? + .map_err(Error::new_const)? .ok_or_else(|| { - native_vp::Error::new_const( + Error::new_const( "Missing MASP section reference in action", ) })?; @@ -431,18 +406,14 @@ where .get_masp_section(&masp_section_ref) .cloned() .ok_or_else(|| { - native_vp::Error::new_const( - "Missing MASP section in transaction", - ) + Error::new_const("Missing MASP section in transaction") })? }; if u64::from(self.ctx.get_block_height()?) > u64::from(shielded_tx.expiry_height()) { - let error = - native_vp::Error::new_const("MASP transaction is expired") - .into(); + let error = Error::new_const("MASP transaction is expired"); tracing::debug!("{error}"); return Err(error); } @@ -554,11 +525,10 @@ where if let Some(TAddrData::Ibc(_)) = changed_bals_minus_txn.decoder.get(&vout.address) { - let error = native_vp::Error::new_const( + let error = Error::new_const( "Simultaneous credit and debit of IBC account \ in a MASP transaction not allowed", - ) - .into(); + ); tracing::debug!("{error}"); return Err(error); } @@ -570,10 +540,9 @@ where // Otherwise the owner's vp must have been triggered and the // relative action must have been written if !verifiers.contains(signer) { - let error = native_vp::Error::new_alloc(format!( + let error = Error::new_alloc(format!( "The required vp of address {signer} was not triggered" - )) - .into(); + )); tracing::debug!("{error}"); return Err(error); } @@ -584,20 +553,18 @@ where // because of a masp transaction, which might require a // different validation than a normal balance change if !actions_authorizers.swap_remove(signer) { - let error = native_vp::Error::new_alloc(format!( + let error = Error::new_alloc(format!( "The required masp authorizer action for address \ {signer} is missing" - )) - .into(); + )); tracing::debug!("{error}"); return Err(error); } } else { // We are not able to decode the authorizer, so just fail - let error = native_vp::Error::new_const( + let error = Error::new_const( "Unable to decode a transaction authorizer", - ) - .into(); + ); tracing::debug!("{error}"); return Err(error); } @@ -605,17 +572,15 @@ where // The transaction shall not push masp authorizer actions that are not // needed cause this might lead vps to run a wrong validation logic if !actions_authorizers.is_empty() { - let error = native_vp::Error::new_const( + let error = Error::new_const( "Found masp authorizer actions that are not required", - ) - .into(); + ); tracing::debug!("{error}"); return Err(error); } // Verify the proofs verify_shielded_tx(&shielded_tx, |gas| self.ctx.charge_gas(gas)) - .map_err(Error::NativeVpError) } } @@ -674,9 +639,7 @@ fn validate_transparent_input( *bal_ref = bal_ref .checked_sub(&ValueSum::from_pair(asset.token.clone(), amount)) .ok_or_else(|| { - Error::NativeVpError(native_vp::Error::SimpleMessage( - "Underflow in bundle balance", - )) + Error::new_const("Underflow in bundle balance") })?; } // Maybe the asset type has no attached epoch @@ -692,9 +655,7 @@ fn validate_transparent_input( // conversion tree, then we must reject the unepoched // variant let error = - Error::NativeVpError(native_vp::Error::SimpleMessage( - "epoch is missing from asset type", - )); + Error::new_const("epoch is missing from asset type"); tracing::debug!("{error}"); return Err(error); } else { @@ -704,17 +665,13 @@ fn validate_transparent_input( *bal_ref = bal_ref .checked_sub(&ValueSum::from_pair(token.clone(), amount)) .ok_or_else(|| { - Error::NativeVpError(native_vp::Error::SimpleMessage( - "Underflow in bundle balance", - )) + Error::new_const("Underflow in bundle balance") })?; } } // unrecognized asset _ => { - let error = Error::NativeVpError(native_vp::Error::SimpleMessage( - "Unable to decode asset type", - )); + let error = Error::new_const("Unable to decode asset type"); tracing::debug!("{error}"); return Err(error); } @@ -752,9 +709,7 @@ fn validate_transparent_output( *bal_ref = bal_ref .checked_sub(&ValueSum::from_pair(asset.token.clone(), amount)) .ok_or_else(|| { - Error::NativeVpError(native_vp::Error::SimpleMessage( - "Underflow in bundle balance", - )) + Error::new_const("Underflow in bundle balance") })?; } // Maybe the asset type has no attached epoch @@ -767,16 +722,12 @@ fn validate_transparent_output( *bal_ref = bal_ref .checked_sub(&ValueSum::from_pair(token.clone(), amount)) .ok_or_else(|| { - Error::NativeVpError(native_vp::Error::SimpleMessage( - "Underflow in bundle balance", - )) + Error::new_const("Underflow in bundle balance") })?; } // unrecognized asset _ => { - let error = Error::NativeVpError(native_vp::Error::SimpleMessage( - "Unable to decode asset type", - )); + let error = Error::new_const("Unable to decode asset type"); tracing::debug!("{error}"); return Err(error); } @@ -824,22 +775,20 @@ fn validate_transparent_bundle( // Ensure that the shielded transaction exactly balances match transparent_tx_pool.partial_cmp(&I128Sum::zero()) { None | Some(Ordering::Less) => { - let error = native_vp::Error::new_const( + let error = Error::new_const( "Transparent transaction value pool must be nonnegative. \ Violation may be caused by transaction being constructed in \ previous epoch. Maybe try again.", - ) - .into(); + ); tracing::debug!("{error}"); // The remaining value in the transparent transaction value pool // MUST be nonnegative. Err(error) } Some(Ordering::Greater) => { - let error = native_vp::Error::new_const( + let error = Error::new_const( "Transaction fees cannot be left on the MASP balance.", - ) - .into(); + ); tracing::debug!("{error}"); Err(error) } @@ -855,20 +804,13 @@ fn apply_balance_component( address: Address, ) -> Result> { // Put val into the correct digit position - let decoded_change = - I320::from_masp_denominated(val, digit).map_err(|_| { - Error::NativeVpError(native_vp::Error::SimpleMessage( - "Overflow in MASP value balance", - )) - })?; + let decoded_change = I320::from_masp_denominated(val, digit) + .map_err(|_| Error::new_const("Overflow in MASP value balance"))?; // Tag the numerical change with the token type let decoded_change = ValueSum::from_pair(address, decoded_change); // Apply the change to the accumulator - acc.checked_add(&decoded_change).ok_or_else(|| { - Error::NativeVpError(native_vp::Error::SimpleMessage( - "Overflow in MASP value balance", - )) - }) + acc.checked_add(&decoded_change) + .ok_or_else(|| Error::new_const("Overflow in MASP value balance")) } // Verify that the pre balance - the Sapling value balance = the post balance @@ -899,10 +841,7 @@ fn verify_sapling_balancing_value( apply_balance_component(&acc, *val, *digit, token.clone())?; } _ => { - let error = - Error::NativeVpError(native_vp::Error::SimpleMessage( - "Unable to decode asset type", - )); + let error = Error::new_const("Unable to decode asset type"); tracing::debug!("{error}"); return Err(error); } @@ -911,9 +850,9 @@ fn verify_sapling_balancing_value( if acc == ValueSum::from_sum(pre.clone()) { Ok(()) } else { - let error = Error::NativeVpError(native_vp::Error::SimpleMessage( + let error = Error::new_const( "MASP balance change not equal to Sapling value balance", - )); + ); tracing::debug!("{error}"); Err(error) } @@ -933,8 +872,6 @@ where + trans_token::Read>, Transfer: BorshDeserialize, { - type Error = Error; - fn validate_tx( &'view self, tx_data: &BatchedTxRef<'_>, @@ -949,9 +886,9 @@ where // Check that the transaction didn't write unallowed masp keys if non_allowed_changes { - return Err(Error::NativeVpError(native_vp::Error::SimpleMessage( + return Err(Error::new_const( "Found modifications to non-allowed masp keys", - ))); + )); } let masp_token_map_changed = masp_keys_changed .iter() @@ -960,10 +897,10 @@ where .iter() .any(|key| is_masp_transfer_key(key)); if masp_token_map_changed && masp_transfer_changes { - Err(Error::NativeVpError(native_vp::Error::SimpleMessage( + Err(Error::new_const( "Cannot simultaneously do governance proposal and MASP \ transfer", - ))) + )) } else if masp_token_map_changed { // The token map can only be changed by a successful governance // proposal diff --git a/crates/state/src/host_env.rs b/crates/state/src/host_env.rs index 6f97c66f4b..fb1b96a8c1 100644 --- a/crates/state/src/host_env.rs +++ b/crates/state/src/host_env.rs @@ -6,7 +6,9 @@ use namada_tx::data::TxSentinel; use crate::in_memory::InMemory; use crate::write_log::WriteLog; -use crate::{DBIter, Error, Result, State, StateRead, StorageHasher, DB}; +use crate::{ + DBIter, Error, Result, State, StateError, StateRead, StorageHasher, DB, +}; /// State with mutable write log and gas metering for tx host env. #[derive(Debug)] @@ -71,7 +73,7 @@ where "Stopping transaction execution because of gas error: {}", err ); - Error::Gas(err) + Error::from(StateError::Gas(err)) }) } } @@ -137,6 +139,10 @@ where } fn charge_gas(&self, gas: u64) -> Result<()> { - self.gas_meter.borrow_mut().consume(gas).map_err(Error::Gas) + Ok(self + .gas_meter + .borrow_mut() + .consume(gas) + .map_err(StateError::Gas)?) } } diff --git a/crates/state/src/in_memory.rs b/crates/state/src/in_memory.rs index a2807d520d..1f669420f6 100644 --- a/crates/state/src/in_memory.rs +++ b/crates/state/src/in_memory.rs @@ -21,7 +21,7 @@ use namada_storage::{ KeySeg, StorageHasher, TxIndex, EPOCH_TYPE_LENGTH, }; -use crate::{Error, Result}; +use crate::Result; /// The ledger's state #[derive(Debug)] @@ -281,23 +281,17 @@ where let key_prefix: Key = Address::Internal(InternalAddress::PoS).to_db_key().into(); - let key = key_prefix - .push(&"epoch_start_height".to_string()) - .map_err(Error::KeyError)?; + let key = key_prefix.push(&"epoch_start_height".to_string())?; self.block .tree .update(&key, encode(&self.next_epoch_min_start_height))?; - let key = key_prefix - .push(&"epoch_start_time".to_string()) - .map_err(Error::KeyError)?; + let key = key_prefix.push(&"epoch_start_time".to_string())?; self.block .tree .update(&key, encode(&self.next_epoch_min_start_time))?; - let key = key_prefix - .push(&"current_epoch".to_string()) - .map_err(Error::KeyError)?; + let key = key_prefix.push(&"current_epoch".to_string())?; self.block.tree.update(&key, encode(&self.block.epoch))?; Ok(()) diff --git a/crates/state/src/lib.rs b/crates/state/src/lib.rs index 0eef2a0686..6ddce56da9 100644 --- a/crates/state/src/lib.rs +++ b/crates/state/src/lib.rs @@ -31,14 +31,14 @@ pub use in_memory::{ BlockStorage, InMemory, LastBlock, ProcessProposalCachedResult, }; use namada_core::address::Address; -use namada_core::arith::{self, checked}; +use namada_core::arith::checked; pub use namada_core::chain::{ BlockHash, BlockHeader, BlockHeight, Epoch, Epochs, BLOCK_HASH_LENGTH, BLOCK_HEIGHT_LENGTH, }; use namada_core::eth_bridge_pool::is_pending_transfer_key; +use namada_core::hash::Hash; pub use namada_core::hash::Sha256Hasher; -use namada_core::hash::{Error as HashError, Hash}; pub use namada_core::storage::{ BlockResults, EthEventsQueue, Key, KeySeg, TxIndex, EPOCH_TYPE_LENGTH, }; @@ -57,9 +57,8 @@ pub use namada_storage::types::{KVBytes, PatternIterator, PrefixIterator}; pub use namada_storage::{ collections, iter_prefix, iter_prefix_bytes, iter_prefix_with_filter, mockdb, tx_queue, BlockStateRead, BlockStateWrite, DBIter, DBWriteBatch, - DbError, DbResult, Error as StorageError, OptionExt, - Result as StorageResult, ResultExt, StorageHasher, StorageRead, - StorageWrite, DB, + DbError, DbResult, Error, OptionExt, Result, ResultExt, StorageHasher, + StorageRead, StorageWrite, DB, }; use namada_systems::parameters; use thiserror::Error; @@ -67,9 +66,6 @@ use wl_state::TxWlState; pub use wl_state::{FullAccessState, TempWlState, WlState}; use write_log::WriteLog; -/// A result of a function that may fail -pub type Result = std::result::Result; - /// We delay epoch change 2 blocks to keep it in sync with Tendermint, because /// it has 2 blocks delay on validator set update. pub const EPOCH_SWITCH_BLOCKS_DELAY: u32 = 2; @@ -155,8 +151,7 @@ pub trait StateRead: StorageRead + Debug { }; match self.db_read(&key)? { (Some(value), gas) => { - let vp_code_hash = Hash::try_from(&value[..]) - .map_err(Error::InvalidCodeHash)?; + let vp_code_hash = Hash::try_from(&value[..])?; Ok((Some(vp_code_hash), gas)) } (None, gas) => Ok((None, gas)), @@ -436,36 +431,16 @@ impl_storage_write!(TxHostEnvState<'_, D, H>); #[allow(missing_docs)] #[derive(Error, Debug)] -pub enum Error { - #[error("TEMPORARY error: {error}")] - Temporary { error: String }, - #[error("Storage key error {0}")] - KeyError(namada_core::storage::Error), - #[error("Coding error: {0}")] - CodingError(#[from] namada_core::DecodeError), - #[error("Merkle tree error: {0}")] - MerkleTreeError(MerkleTreeError), - #[error("DB error: {0}")] - DBError(String), - #[error("Borsh (de)-serialization error: {0}")] - BorshCodingError(std::io::Error), +pub enum StateError { #[error("Merkle tree at the height {height} is not stored")] NoMerkleTree { height: BlockHeight }, - #[error("Code hash error: {0}")] - InvalidCodeHash(HashError), - #[error("DB error: {0}")] - DbError(#[from] namada_storage::DbError), #[error("{0}")] Gas(namada_gas::Error), - #[error("{0}")] - StorageError(#[from] namada_storage::Error), - #[error("Arithmetic {0}")] - Arith(#[from] arith::Error), } -impl From for Error { - fn from(error: MerkleTreeError) -> Self { - Self::MerkleTreeError(error) +impl From for Error { + fn from(value: StateError) -> Self { + Error::new(value) } } diff --git a/crates/state/src/wl_state.rs b/crates/state/src/wl_state.rs index 5d498ffc6a..57869ba788 100644 --- a/crates/state/src/wl_state.rs +++ b/crates/state/src/wl_state.rs @@ -7,8 +7,8 @@ use namada_core::borsh::BorshSerializeExt; use namada_core::chain::ChainId; use namada_core::masp::MaspEpoch; use namada_core::parameters::{EpochDuration, Parameters}; -use namada_core::storage; use namada_core::time::DateTimeUtc; +use namada_core::{decode, storage}; use namada_events::{EmitEvents, EventToEmit}; use namada_merkle_tree::NO_DIFF_KEY_PREFIX; use namada_replay_protection as replay_protection; @@ -22,7 +22,7 @@ use crate::write_log::{StorageModification, WriteLog}; use crate::{ is_pending_transfer_key, DBIter, Epoch, Error, Hash, Key, KeySeg, LastBlock, MembershipProof, MerkleTree, MerkleTreeError, ProofOps, Result, - State, StateRead, StorageHasher, StorageResult, StoreType, TxWrites, DB, + State, StateError, StateRead, StorageHasher, StoreType, TxWrites, DB, EPOCH_SWITCH_BLOCKS_DELAY, STORAGE_ACCESS_GAS_PER_BYTE, }; @@ -154,7 +154,7 @@ where height: BlockHeight, time: DateTimeUtc, parameters: &Parameters, - ) -> StorageResult { + ) -> Result { match self.in_mem.update_epoch_blocks_delay.as_mut() { None => { // Check if the new epoch minimum start height and start time @@ -209,7 +209,7 @@ where &self, is_new_epoch: bool, masp_epoch_multiplier: u64, - ) -> StorageResult { + ) -> Result { let masp_new_epoch = is_new_epoch && matches!( self.in_mem.block.epoch.checked_rem(masp_epoch_multiplier), @@ -230,7 +230,7 @@ where /// Commit the current block's write log to the storage and commit the block /// to DB. Starts a new block write log. - pub fn commit_block(&mut self) -> StorageResult<()> { + pub fn commit_block(&mut self) -> Result<()> { if self.in_mem.last_epoch != self.in_mem.block.epoch { self.in_mem_mut() .update_epoch_in_merkle_tree() @@ -528,7 +528,7 @@ where .rebuild_full_merkle_tree(height) .expect("Merkle tree should be restored"); - tree.validate().map_err(Error::MerkleTreeError).unwrap(); + tree.validate().unwrap(); let in_mem = &mut self.0.in_mem; in_mem.block.tree = tree; @@ -548,7 +548,7 @@ where .block .tree .update_commit_data(data) - .map_err(Error::MerkleTreeError) + .map_err(Into::into) } /// Persist the block's state from batch writes to the database. @@ -871,34 +871,35 @@ where }; if height > self.in_mem.get_last_block_height() { - if let MembershipProof::ICS23(proof) = self - .in_mem - .block - .tree - .get_sub_tree_existence_proof(array::from_ref(key), vec![value]) - .map_err(Error::MerkleTreeError)? + if let MembershipProof::ICS23(proof) = + self.in_mem.block.tree.get_sub_tree_existence_proof( + array::from_ref(key), + vec![value], + )? { self.in_mem .block .tree .get_sub_tree_proof(key, proof) .map(Into::into) - .map_err(Error::MerkleTreeError) + .map_err(Into::into) } else { - Err(Error::MerkleTreeError(MerkleTreeError::TendermintProof)) + Err(Error::from(MerkleTreeError::TendermintProof)) } } else { let (store_type, _) = StoreType::sub_key(key)?; let tree = self.get_merkle_tree(height, Some(store_type))?; if let MembershipProof::ICS23(proof) = tree - .get_sub_tree_existence_proof(array::from_ref(key), vec![value]) - .map_err(Error::MerkleTreeError)? + .get_sub_tree_existence_proof( + array::from_ref(key), + vec![value], + )? { tree.get_sub_tree_proof(key, proof) .map(Into::into) - .map_err(Error::MerkleTreeError) + .map_err(Into::into) } else { - Err(Error::MerkleTreeError(MerkleTreeError::TendermintProof)) + Err(Error::from(MerkleTreeError::TendermintProof)) } } } @@ -917,18 +918,16 @@ where }; if height > self.in_mem.get_last_block_height() { - Err(Error::Temporary { - error: format!( - "The block at the height {} hasn't committed yet", - height, - ), - }) + Err(Error::new_alloc(format!( + "The block at the height {} hasn't committed yet", + height, + ))) } else { let (store_type, _) = StoreType::sub_key(key)?; self.get_merkle_tree(height, Some(store_type))? .get_non_existence_proof(key) .map(Into::into) - .map_err(Error::MerkleTreeError) + .map_err(Into::into) } } @@ -971,7 +970,7 @@ where let stores = self .db .read_merkle_tree_stores(epoch, start_height, store_type)? - .ok_or(Error::NoMerkleTree { height })?; + .ok_or(StateError::NoMerkleTree { height })?; let prefix = store_type.and_then(|st| st.provable_prefix()); let mut tree = match store_type { Some(_) => MerkleTree::::new_partial(stores), @@ -1070,7 +1069,7 @@ where height, Some(StoreType::Base), )? - .ok_or(Error::NoMerkleTree { height })?; + .ok_or(StateError::NoMerkleTree { height })?; let restored_stores = tree.stores(); stores.set_root(&st, *restored_stores.root(&st)); stores.set_store(restored_stores.store(&st).to_owned()); @@ -1081,7 +1080,7 @@ where let mut stores = self .db .read_merkle_tree_stores(epoch, height, None)? - .ok_or(Error::NoMerkleTree { height })?; + .ok_or(StateError::NoMerkleTree { height })?; let restored_stores = tree.stores(); // Set all rebuilt subtrees except for the subtrees stored in // every block @@ -1143,7 +1142,7 @@ where self.db() .has_replay_protection_entry(hash) - .map_err(Error::DbError) + .map_err(Into::into) } /// Check if the given tx hash has already been committed to storage @@ -1153,7 +1152,7 @@ where ) -> Result { self.db() .has_replay_protection_entry(hash) - .map_err(Error::DbError) + .map_err(Into::into) } } @@ -1448,9 +1447,7 @@ where let (log_val, _) = self.write_log().read_temp(key).unwrap(); match log_val { Some(value) => { - let value = - namada_core::borsh::BorshDeserialize::try_from_slice(value) - .map_err(Error::BorshCodingError)?; + let value = decode(value)?; Ok(Some(value)) } None => Ok(None), @@ -1471,10 +1468,7 @@ where ) -> Result<()> { let _ = self .write_log_mut() - .write_temp(key, val.serialize_to_vec()) - .map_err(|err| Error::Temporary { - error: err.to_string(), - })?; + .write_temp(key, val.serialize_to_vec())?; Ok(()) } } @@ -1493,9 +1487,7 @@ where let (log_val, _) = self.write_log().read_temp(key).unwrap(); match log_val { Some(value) => { - let value = - namada_core::borsh::BorshDeserialize::try_from_slice(value) - .map_err(Error::BorshCodingError)?; + let value = decode(value)?; Ok(Some(value)) } None => Ok(None), @@ -1517,9 +1509,7 @@ where let (log_val, _) = self.write_log().read_temp(key).unwrap(); match log_val { Some(value) => { - let value = - namada_core::borsh::BorshDeserialize::try_from_slice(value) - .map_err(Error::BorshCodingError)?; + let value = decode(value)?; Ok(Some(value)) } None => Ok(None), diff --git a/crates/state/src/write_log.rs b/crates/state/src/write_log.rs index 61bbfc958f..30b897f40f 100644 --- a/crates/state/src/write_log.rs +++ b/crates/state/src/write_log.rs @@ -20,8 +20,6 @@ use thiserror::Error; #[allow(missing_docs)] #[derive(Error, Debug)] pub enum Error { - #[error("Storage error applying a write log: {0}")] - StorageError(crate::Error), #[error("Trying to update a temporary value")] UpdateTemporaryValue, #[error( @@ -45,6 +43,12 @@ pub enum Error { ValueLenOverflow, } +impl From for crate::Error { + fn from(value: Error) -> Self { + crate::Error::new(value) + } +} + /// Result for functions that may fail pub type Result = std::result::Result; diff --git a/crates/storage/src/error.rs b/crates/storage/src/error.rs index 7d54148291..a5b2e76c87 100644 --- a/crates/storage/src/error.rs +++ b/crates/storage/src/error.rs @@ -1,9 +1,14 @@ //! Storage API error type, extensible with custom user errors and static string //! messages. +use std::convert::Infallible; +use std::num::TryFromIntError; + use namada_core::arith; use thiserror::Error; +use crate::db; + #[allow(missing_docs)] #[derive(Error, Debug)] pub enum Error { @@ -17,12 +22,6 @@ pub enum Error { CustomWithMessage(&'static str, CustomError), } -impl From for Error { - fn from(value: arith::Error) -> Self { - Error::new(value) - } -} - /// Result of a storage API call. pub type Result = std::result::Result; @@ -105,6 +104,23 @@ impl Error { _ => Err(self), } } + + /// Returns some reference to the inner value if it is of type `E`, or + /// `None` if it isn't. + pub fn downcast_ref(&self) -> Option<&E> + where + E: std::error::Error + Send + Sync + 'static, + { + match self { + Self::Custom(CustomError(b)) + | Self::CustomWithMessage(_, CustomError(b)) + if b.is::() => + { + b.downcast_ref::() + } + _ => None, + } + } } /// A custom error @@ -145,3 +161,56 @@ impl From .into() } } + +impl From for Error { + fn from(value: arith::Error) -> Self { + Error::new(value) + } +} + +impl From for Error { + fn from(value: db::Error) -> Self { + Error::new(value) + } +} + +impl From for Error { + fn from(value: namada_core::storage::Error) -> Self { + Error::new(value) + } +} + +impl From for Error { + fn from(value: namada_core::DecodeError) -> Self { + Error::new(value) + } +} +impl From for Error { + fn from(value: namada_core::string_encoding::DecodeError) -> Self { + Error::new(value) + } +} + +impl From for Error { + fn from(value: namada_core::hash::Error) -> Self { + Error::new(value) + } +} + +impl From for Error { + fn from(value: namada_merkle_tree::Error) -> Self { + Error::new(value) + } +} + +impl From for Error { + fn from(_value: Infallible) -> Self { + panic!("Infallible error can never be constructed") + } +} + +impl From for Error { + fn from(value: TryFromIntError) -> Self { + Error::new(value) + } +} diff --git a/crates/tests/src/native_vp/mod.rs b/crates/tests/src/native_vp/mod.rs index f3c659f2ce..677c3827ae 100644 --- a/crates/tests/src/native_vp/mod.rs +++ b/crates/tests/src/native_vp/mod.rs @@ -12,7 +12,7 @@ use namada_sdk::storage; use namada_vm::wasm::run::VpEvalWasm; use namada_vm::wasm::VpCache; use namada_vm::WasmCacheRwAccess; -use namada_vp::native_vp::{Ctx, NativeVp}; +use namada_vp::native_vp::{self, Ctx, NativeVp}; use crate::tx::TestTxEnv; @@ -79,7 +79,7 @@ impl TestNativeVpEnv { pub fn validate_tx<'view, 'ctx: 'view, T>( &'ctx self, vp: &'view T, - ) -> Result<(), >::Error> + ) -> native_vp::Result<()> where T: 'view + NativeVp<'view>, { diff --git a/crates/tests/src/vm_host_env/ibc.rs b/crates/tests/src/vm_host_env/ibc.rs index 2465339e3d..d96eacfc2e 100644 --- a/crates/tests/src/vm_host_env/ibc.rs +++ b/crates/tests/src/vm_host_env/ibc.rs @@ -69,7 +69,6 @@ use namada_sdk::state::StateRead; use namada_sdk::storage::{self, BlockHeight, Epoch, Key, TxIndex}; use namada_sdk::tendermint::time::Time as TmTime; use namada_sdk::time::DurationSecs; -use namada_sdk::token::vp::MultitokenError; use namada_sdk::tx::BatchedTxRef; use namada_sdk::validation::{IbcVp, MultitokenVp}; use namada_sdk::{ibc, proof_of_stake, token}; @@ -77,6 +76,7 @@ use namada_test_utils::TestWasms; use namada_tx_env::TxEnv; use namada_tx_prelude::BorshSerializeExt; use namada_vm::{wasm, WasmCacheRwAccess}; +use namada_vp::native_vp; use namada_vp::native_vp::{Ctx, NativeVp}; use crate::tx::*; @@ -89,10 +89,7 @@ pub struct TestIbcVp<'a> { } impl<'a> TestIbcVp<'a> { - pub fn validate( - &self, - batched_tx: &BatchedTxRef, - ) -> std::result::Result<(), namada_sdk::ibc::vp::Error> { + pub fn validate(&self, batched_tx: &BatchedTxRef) -> native_vp::Result<()> { self.ibc.validate_tx( batched_tx, self.ibc.ctx.keys_changed, @@ -106,10 +103,7 @@ pub struct TestMultitokenVp<'a> { } impl<'a> TestMultitokenVp<'a> { - pub fn validate( - &self, - batched_tx: &BatchedTxRef, - ) -> std::result::Result<(), MultitokenError> { + pub fn validate(&self, batched_tx: &BatchedTxRef) -> native_vp::Result<()> { self.multitoken_vp.validate_tx( batched_tx, self.multitoken_vp.ctx.keys_changed, @@ -122,7 +116,7 @@ impl<'a> TestMultitokenVp<'a> { pub fn validate_ibc_vp_from_tx<'a>( tx_env: &'a TestTxEnv, batched_tx: &'a BatchedTxRef, -) -> std::result::Result<(), namada_sdk::ibc::vp::Error> { +) -> native_vp::Result<()> { let (verifiers, keys_changed) = tx_env .state .write_log() @@ -161,7 +155,7 @@ pub fn validate_multitoken_vp_from_tx<'a>( tx_env: &'a TestTxEnv, batched_tx: &'a BatchedTxRef, target: &Key, -) -> std::result::Result<(), MultitokenError> { +) -> native_vp::Result<()> { let (verifiers, keys_changed) = tx_env .state .write_log() diff --git a/crates/tests/src/vm_host_env/mod.rs b/crates/tests/src/vm_host_env/mod.rs index b958a63ab5..4e6aa6fd64 100644 --- a/crates/tests/src/vm_host_env/mod.rs +++ b/crates/tests/src/vm_host_env/mod.rs @@ -30,7 +30,6 @@ mod tests { use namada_sdk::ibc::context::nft_transfer_mod::testing::DummyNftTransferModule; use namada_sdk::ibc::context::transfer_mod::testing::DummyTransferModule; use namada_sdk::ibc::primitives::ToProto; - use namada_sdk::ibc::vp::Error as IbcError; use namada_sdk::ibc::{ storage as ibc_storage, trace as ibc_trace, Error as IbcActionError, }; @@ -1109,8 +1108,10 @@ mod tests { ); // VP should fail because the transfer channel cannot be closed assert!(matches!( - result.expect_err("validation succeeded unexpectedly"), - IbcError::IbcAction(IbcActionError::Context(_)), + result + .expect_err("validation succeeded unexpectedly") + .downcast_ref::(), + Some(IbcActionError::Context(_)), )); } diff --git a/crates/token/src/lib.rs b/crates/token/src/lib.rs index a7e6953150..ad98ba4cc5 100644 --- a/crates/token/src/lib.rs +++ b/crates/token/src/lib.rs @@ -30,12 +30,11 @@ pub use namada_trans_token::*; /// Validity predicates pub mod vp { - pub use namada_shielded_token::vp::{ - Error as MaspError, MaspVp, Result as MaspResult, - }; - pub use namada_trans_token::vp::{ - Error as MultitokenError, MultitokenVp, Result as MultitokenResult, - }; + pub use namada_shielded_token::vp::MaspVp; + // The error and result type are the same as in `namada_trans_token` - + // a native VP + pub use namada_shielded_token::{Error, Result}; + pub use namada_trans_token::vp::MultitokenVp; } use serde::{Deserialize, Serialize}; diff --git a/crates/trans_token/src/lib.rs b/crates/trans_token/src/lib.rs index 680f9f7376..6865014817 100644 --- a/crates/trans_token/src/lib.rs +++ b/crates/trans_token/src/lib.rs @@ -31,7 +31,7 @@ use namada_core::uint::Uint; use namada_events::extend::UserAccount; use namada_events::{EmitEvents, EventLevel}; pub use namada_state::{ - Key, ResultExt, StorageError, StorageRead, StorageResult, StorageWrite, + Error, Key, Result, ResultExt, StorageRead, StorageWrite, }; pub use namada_systems::trans_token::*; pub use storage::*; diff --git a/crates/trans_token/src/storage.rs b/crates/trans_token/src/storage.rs index 4391f7d43f..02281fec91 100644 --- a/crates/trans_token/src/storage.rs +++ b/crates/trans_token/src/storage.rs @@ -7,12 +7,10 @@ pub use namada_core::storage::Key; use namada_core::token::{self, Amount, AmountError, DenominatedAmount}; use crate::storage_key::*; -use crate::{ - ResultExt, StorageError, StorageRead, StorageResult, StorageWrite, -}; +use crate::{Error, Result, ResultExt, StorageRead, StorageWrite}; /// Initialize parameters for the token in storage during the genesis block. -pub fn write_params(storage: &mut S, address: &Address) -> StorageResult<()> +pub fn write_params(storage: &mut S, address: &Address) -> Result<()> where S: StorageRead + StorageWrite, { @@ -25,7 +23,7 @@ pub fn read_balance( storage: &S, token: &Address, owner: &Address, -) -> StorageResult +) -> Result where S: StorageRead, { @@ -40,10 +38,10 @@ pub fn update_balance( token: &Address, owner: &Address, f: F, -) -> StorageResult<()> +) -> Result<()> where S: StorageRead + StorageWrite, - F: FnOnce(token::Amount) -> StorageResult, + F: FnOnce(token::Amount) -> Result, { let key = balance_key(token, owner); let balance = storage.read::(&key)?.unwrap_or_default(); @@ -57,7 +55,7 @@ pub fn increment_balance( token: &Address, owner: &Address, amount: token::Amount, -) -> StorageResult<()> +) -> Result<()> where S: StorageRead + StorageWrite, { @@ -75,7 +73,7 @@ pub fn decrement_balance( token: &Address, owner: &Address, amount: token::Amount, -) -> StorageResult<()> +) -> Result<()> where S: StorageRead + StorageWrite, { @@ -91,7 +89,7 @@ where pub fn read_total_supply( storage: &S, token: &Address, -) -> StorageResult +) -> Result where S: StorageRead, { @@ -105,10 +103,10 @@ pub fn update_total_supply( storage: &mut S, token: &Address, f: F, -) -> StorageResult<()> +) -> Result<()> where S: StorageRead + StorageWrite, - F: FnOnce(token::Amount) -> StorageResult, + F: FnOnce(token::Amount) -> Result, { let key = minted_balance_key(token); let total_supply = storage.read::(&key)?.unwrap_or_default(); @@ -121,7 +119,7 @@ pub fn increment_total_supply( storage: &mut S, token: &Address, amount: token::Amount, -) -> StorageResult<()> +) -> Result<()> where S: StorageRead + StorageWrite, { @@ -138,7 +136,7 @@ pub fn decrement_total_supply( storage: &mut S, token: &Address, amount: token::Amount, -) -> StorageResult<()> +) -> Result<()> where S: StorageRead + StorageWrite, { @@ -153,7 +151,7 @@ where /// Get the effective circulating total supply of native tokens. pub fn get_effective_total_native_supply( storage: &S, -) -> StorageResult +) -> Result where S: StorageRead, { @@ -175,7 +173,7 @@ where pub fn read_denom( storage: &S, token: &Address, -) -> StorageResult> +) -> Result> where S: StorageRead, { @@ -213,7 +211,7 @@ pub fn write_denom( storage: &mut S, token: &Address, denom: token::Denomination, -) -> StorageResult<()> +) -> Result<()> where S: StorageRead + StorageWrite, { @@ -230,7 +228,7 @@ pub fn transfer( src: &Address, dest: &Address, amount: token::Amount, -) -> StorageResult<()> +) -> Result<()> where S: StorageRead + StorageWrite, { @@ -249,14 +247,14 @@ where storage.write(&src_key, new_src_balance)?; storage.write(&dest_key, new_dest_balance) } - None => Err(StorageError::new_alloc(format!( + None => Err(Error::new_alloc(format!( "The transfer would overflow balance of {dest}" ))), } } - None => Err(StorageError::new_alloc(format!( - "{src} has insufficient balance" - ))), + None => { + Err(Error::new_alloc(format!("{src} has insufficient balance"))) + } } } @@ -268,7 +266,7 @@ pub fn multi_transfer( storage: &mut S, sources: &BTreeMap<(Address, Address), Amount>, dests: &BTreeMap<(Address, Address), Amount>, -) -> StorageResult> +) -> Result> where S: StorageRead + StorageWrite, { @@ -279,20 +277,19 @@ where accounts.extend(dests.keys().cloned()); let unexpected_err = || { - StorageError::new_const( + Error::new_const( "Computing difference between amounts should never overflow", ) }; // Apply the balance change for each account in turn for ref account @ (ref owner, ref token) in accounts { let overflow_err = || { - StorageError::new_alloc(format!( + Error::new_alloc(format!( "The transfer would overflow balance of {owner}" )) }; - let underflow_err = || { - StorageError::new_alloc(format!("{owner} has insufficient balance")) - }; + let underflow_err = + || Error::new_alloc(format!("{owner} has insufficient balance")); // Load account balances and deltas let owner_key = balance_key(token, owner); let owner_balance = read_balance(storage, token, owner)?; @@ -326,7 +323,7 @@ pub fn mint_tokens( token: &Address, dest: &Address, amount: token::Amount, -) -> StorageResult<()> +) -> Result<()> where S: StorageRead + StorageWrite, { @@ -341,7 +338,7 @@ pub fn credit_tokens( token: &Address, dest: &Address, amount: token::Amount, -) -> StorageResult<()> +) -> Result<()> where S: StorageRead + StorageWrite, { @@ -360,7 +357,7 @@ pub fn burn_tokens( token: &Address, source: &Address, amount: token::Amount, -) -> StorageResult<()> +) -> Result<()> where S: StorageRead + StorageWrite, { @@ -385,9 +382,9 @@ pub fn denominated( amount: token::Amount, token: &Address, storage: &impl StorageRead, -) -> StorageResult { +) -> Result { let denom = read_denom(storage, token)?.ok_or_else(|| { - StorageError::SimpleMessage( + Error::SimpleMessage( "No denomination found in storage for the given token", ) })?; @@ -401,15 +398,15 @@ pub fn denom_to_amount( denom_amount: DenominatedAmount, token: &Address, storage: &impl StorageRead, -) -> StorageResult { +) -> Result { #[cfg(not(fuzzing))] { let denom = read_denom(storage, token)?.ok_or_else(|| { - StorageError::SimpleMessage( + Error::SimpleMessage( "No denomination found in storage for the given token", ) })?; - denom_amount.scale(denom).map_err(StorageError::new) + denom_amount.scale(denom).map_err(Error::new) } #[cfg(fuzzing)] diff --git a/crates/trans_token/src/vp.rs b/crates/trans_token/src/vp.rs index 59c32fc8a5..bc89bf0482 100644 --- a/crates/trans_token/src/vp.rs +++ b/crates/trans_token/src/vp.rs @@ -15,10 +15,9 @@ use namada_tx::action::{ }; use namada_tx::BatchedTxRef; use namada_vp::native_vp::{ - self, Ctx, CtxPreStorageRead, NativeVp, VpEvaluator, + Ctx, CtxPreStorageRead, Error, NativeVp, Result, VpEvaluator, }; use namada_vp::VpEnv; -use thiserror::Error; use crate::storage_key::{ is_any_minted_balance_key, is_any_minter_key, is_any_token_balance_key, @@ -33,18 +32,6 @@ enum Owner<'a> { Protocol, } -#[allow(missing_docs)] -#[derive(Error, Debug)] -pub enum Error { - #[error("Multitoken VP error: governance proposal change is invalid")] - InvalidGovernanceChange, - #[error("Multitoken VP error: Native VP error: {0}")] - NativeVpError(#[from] native_vp::Error), -} - -/// Multitoken functions result -pub type Result = std::result::Result; - /// Multitoken VP pub struct MultitokenVp<'ctx, S, CA, EVAL, Params, Gov> where @@ -65,8 +52,6 @@ where Params: parameters::Read>, Gov: governance::Read>, { - type Error = Error; - fn validate_tx( &'view self, tx_data: &BatchedTxRef<'_>, @@ -134,21 +119,15 @@ where tracing::debug!( "Native token deposit isn't allowed" ); - return Err(Error::NativeVpError( - native_vp::Error::SimpleMessage( - "Native token deposit isn't allowed", - ), + return Err(Error::new_const( + "Native token deposit isn't allowed", )); } let change = inc_changes.entry(token.clone()).or_default(); *change = change.checked_add(diff).ok_or_else(|| { - Error::NativeVpError( - native_vp::Error::SimpleMessage( - "Overflowed in balance check", - ), - ) + Error::new_const("Overflowed in balance check") })?; } None => { @@ -156,10 +135,8 @@ where tracing::debug!( "Native token withdraw isn't allowed" ); - return Err(Error::NativeVpError( - native_vp::Error::SimpleMessage( - "Native token deposit isn't allowed", - ), + return Err(Error::new_const( + "Native token deposit isn't allowed", )); } let diff = pre @@ -169,11 +146,7 @@ where dec_changes.entry(token.clone()).or_default(); *change = change.checked_add(diff).ok_or_else(|| { - Error::NativeVpError( - native_vp::Error::SimpleMessage( - "Overflowed in balance check", - ), - ) + Error::new_const("Overflowed in balance check") })?; } } @@ -182,10 +155,8 @@ where tracing::debug!( "Minting/Burning native token isn't allowed" ); - return Err(Error::NativeVpError( - native_vp::Error::SimpleMessage( - "Minting/Burning native token isn't allowed", - ), + return Err(Error::new_const( + "Minting/Burning native token isn't allowed", )); } @@ -195,11 +166,7 @@ where Some(diff) => { let mint = inc_mints.entry(token.clone()).or_default(); *mint = mint.checked_add(diff).ok_or_else(|| { - Error::NativeVpError( - native_vp::Error::SimpleMessage( - "Overflowed in balance check", - ), - ) + Error::new_const("Overflowed in balance check") })?; } None => { @@ -208,11 +175,7 @@ where .expect("Underflow shouldn't happen here"); let mint = dec_mints.entry(token.clone()).or_default(); *mint = mint.checked_add(diff).ok_or_else(|| { - Error::NativeVpError( - native_vp::Error::SimpleMessage( - "Overflowed in balance check", - ), - ) + Error::new_const("Overflowed in balance check") })?; } } @@ -229,10 +192,9 @@ where { // Reject when trying to update an unexpected key under // `#Multitoken/...` - return Err(native_vp::Error::new_alloc(format!( + return Err(Error::new_alloc(format!( "Unexpected change to the multitoken account: {key}" - )) - .into()); + ))); } } @@ -251,10 +213,9 @@ where // VPs themselves, their validation is handled // by the `Multitoken` internal address, // but internal token Nut addresses have to verify the transfer - return Err(native_vp::Error::new_alloc(format!( + return Err(Error::new_alloc(format!( "Token {token} must verify the tx" - )) - .into()); + ))); } let inc_change = @@ -278,10 +239,9 @@ where }; token_changes_are_balanced.ok_or_else(|| { - native_vp::Error::new_const( + Error::new_const( "The transaction's token changes are unbalanced", ) - .into() }) }) } @@ -320,22 +280,17 @@ where == Address::Internal(InternalAddress::Ibc) => { verifiers.contains(&minter).ok_or_else(|| { - native_vp::Error::new_const( - "The IBC VP was not triggered", - ) - .into() + Error::new_const("The IBC VP was not triggered") }) } - _ => Err(native_vp::Error::new_const( + _ => Err(Error::new_const( "Only the IBC account is able to mint IBC tokens", - ) - .into()), + )), } } - _ => Err(native_vp::Error::new_alloc(format!( + _ => Err(Error::new_alloc(format!( "Attempted to mint non-IBC token {token}" - )) - .into()), + ))), } } @@ -346,20 +301,17 @@ where ) -> Result<()> { batched_tx.tx.data(batched_tx.cmt).map_or_else( || { - Err(native_vp::Error::new_const( + Err(Error::new_const( "Token parameter changes require tx data to be present", - ) - .into()) + )) }, |data| { - Gov::is_proposal_accepted(&self.ctx.pre(), data.as_ref()) - .map_err(Error::NativeVpError)? + Gov::is_proposal_accepted(&self.ctx.pre(), data.as_ref())? .ok_or_else(|| { - native_vp::Error::new_const( + Error::new_const( "Token parameter changes can only be performed by \ a governance proposal that has been accepted", ) - .into() }) }, ) diff --git a/crates/tx_prelude/src/lib.rs b/crates/tx_prelude/src/lib.rs index abe4e7d8d1..07e8289e5f 100644 --- a/crates/tx_prelude/src/lib.rs +++ b/crates/tx_prelude/src/lib.rs @@ -43,7 +43,7 @@ pub use namada_macros::transaction; pub use namada_parameters::storage as parameters_storage; pub use namada_state::{ collections, iter_prefix, iter_prefix_bytes, OptionExt, ResultExt, - StorageError as Error, StorageRead, StorageResult as Result, StorageWrite, + Error as Error, StorageRead, Result as Result, StorageWrite, }; use namada_token::MaspTransaction; pub use namada_tx::{action, data as transaction, BatchedTx, Section, Tx}; diff --git a/crates/vm/src/host_env.rs b/crates/vm/src/host_env.rs index 1e4d86f52f..027ecbcbfb 100644 --- a/crates/vm/src/host_env.rs +++ b/crates/vm/src/host_env.rs @@ -8,9 +8,10 @@ use std::num::TryFromIntError; use namada_account::AccountPublicKeysMap; use namada_core::address::{self, Address, ESTABLISHED_ADDRESS_BYTES_LEN}; -use namada_core::arith::{self, checked}; +use namada_core::arith::checked; use namada_core::borsh::{BorshDeserialize, BorshSerializeExt}; use namada_core::chain::BlockHeight; +use namada_core::decode; use namada_core::hash::Hash; use namada_core::internal::{HostEnvResult, KeyVal}; use namada_core::storage::{Key, TxIndex, TX_INDEX_LENGTH}; @@ -22,10 +23,10 @@ use namada_gas::{ use namada_state::prefix_iter::{PrefixIteratorId, PrefixIterators}; use namada_state::write_log::{self, WriteLog}; use namada_state::{ - DBIter, InMemory, OptionExt, ResultExt, State, StateRead, StorageError, - StorageHasher, StorageRead, StorageWrite, TxHostEnvState, VpHostEnvState, - DB, + DBIter, InMemory, OptionExt, ResultExt, State, StateRead, StorageHasher, + StorageRead, StorageWrite, TxHostEnvState, VpHostEnvState, DB, }; +pub use namada_state::{Error, Result}; use namada_token::storage_key::{ is_any_minted_balance_key, is_any_minter_key, is_any_token_balance_key, is_any_token_parameter_key, @@ -60,14 +61,6 @@ pub enum TxRuntimeError { InvalidVpCodeHash(String), #[error("A validity predicate of an account cannot be deleted")] CannotDeleteVp, - #[error("Storage modification error: {0}")] - StorageModificationError(write_log::Error), - #[error("State error: {0}")] - StateError(#[from] namada_state::Error), - #[error("Storage error: {0}")] - StorageError(#[from] StorageError), - #[error("Storage data error: {0}")] - StorageDataError(namada_core::storage::Error), #[error("Encoding error: {0}")] EncodingError(std::io::Error), #[error("Address error: {0}")] @@ -80,12 +73,16 @@ pub enum TxRuntimeError { NoValueInResultBuffer, #[error("VP code is not allowed in allowlist parameter.")] DisallowedVp, - #[error("Arithmetic {0}")] - Arith(#[from] arith::Error), +} + +impl From for namada_state::Error { + fn from(value: TxRuntimeError) -> Self { + Self::new(value) + } } /// Result of a tx host env fn call -pub type TxResult = std::result::Result; +pub type TxResult = namada_state::Result; /// A transaction's host environment pub struct TxVmEnv @@ -589,22 +586,26 @@ where { let (gas_meter, sentinel) = env.ctx.gas_meter_and_sentinel(); // if we run out of gas, we need to stop the execution - gas_meter.borrow_mut().consume(used_gas).map_err(|err| { - sentinel.borrow_mut().set_out_of_gas(); - tracing::info!( - "Stopping transaction execution because of gas error: {}", - err - ); + gas_meter + .borrow_mut() + .consume(used_gas) + .map_err(|err| { + sentinel.borrow_mut().set_out_of_gas(); + tracing::info!( + "Stopping transaction execution because of gas error: {}", + err + ); - TxRuntimeError::OutOfGas(err) - }) + TxRuntimeError::OutOfGas(err) + }) + .into_storage_result() } /// Called from VP wasm to request to use the given gas amount pub fn vp_charge_gas( env: &mut VpVmEnv, used_gas: u64, -) -> vp_host_fns::EnvResult<()> +) -> Result<()> where MEM: VmMemory, D: 'static + DB + for<'iter> DBIter<'iter>, @@ -637,7 +638,7 @@ where tracing::debug!("tx_has_key {}, key {}", key, key_ptr,); - let key = Key::parse(key).map_err(TxRuntimeError::StorageDataError)?; + let key = Key::parse(key)?; // try to read from the write log first let state = env.state(); @@ -669,7 +670,7 @@ where tracing::debug!("tx_read {}, key {}", key, key_ptr,); - let key = Key::parse(key).map_err(TxRuntimeError::StorageDataError)?; + let key = Key::parse(key)?; let state = env.state(); let value = state.read_bytes(&key)?; @@ -712,7 +713,7 @@ where tracing::debug!("tx_read {}, key {}", key, key_ptr,); - let key = Key::parse(key).map_err(TxRuntimeError::StorageDataError)?; + let key = Key::parse(key)?; let write_log = unsafe { env.ctx.write_log.get() }; let (log_val, gas) = write_log.read_temp(&key).into_storage_result()?; @@ -782,8 +783,7 @@ where tracing::debug!("tx_iter_prefix {}", prefix); - let prefix = - Key::parse(prefix).map_err(TxRuntimeError::StorageDataError)?; + let prefix = Key::parse(prefix)?; let write_log = unsafe { env.ctx.write_log.get() }; let db = unsafe { env.ctx.db.get() }; @@ -822,10 +822,7 @@ where let state = env.state(); let (log_val, log_gas) = state .write_log() - .read( - &Key::parse(key.clone()) - .map_err(TxRuntimeError::StorageDataError)?, - ) + .read(&Key::parse(key.clone())?) .into_storage_result()?; (log_val.cloned(), log_gas) }; @@ -894,7 +891,7 @@ where tracing::debug!("tx_update {}, {:?}", key, value); - let key = Key::parse(key).map_err(TxRuntimeError::StorageDataError)?; + let key = Key::parse(key)?; if key.is_validity_predicate().is_some() { tx_validate_vp_code_hash::(env, &value, &None)?; } @@ -902,9 +899,7 @@ where check_address_existence::(env, &key)?; let mut state = env.state(); - state - .write_bytes(&key, value) - .map_err(TxRuntimeError::StorageError) + state.write_bytes(&key, value) } /// Temporary storage write function exposed to the wasm VM Tx environment. The @@ -936,15 +931,12 @@ where tracing::debug!("tx_write_temp {}, {:?}", key, value); - let key = Key::parse(key).map_err(TxRuntimeError::StorageDataError)?; + let key = Key::parse(key)?; check_address_existence::(env, &key)?; let mut state = env.state(); - let (gas, _size_diff) = state - .write_log_mut() - .write_temp(&key, value) - .map_err(TxRuntimeError::StorageModificationError)?; + let (gas, _size_diff) = state.write_log_mut().write_temp(&key, value)?; tx_charge_gas::(env, gas) } @@ -987,7 +979,8 @@ where ); return Err(TxRuntimeError::UnknownAddressStorageModification( addr, - )); + ) + .into()); } } Ok(()) @@ -1014,13 +1007,13 @@ where tracing::debug!("tx_delete {}", key); - let key = Key::parse(key).map_err(TxRuntimeError::StorageDataError)?; + let key = Key::parse(key)?; if key.is_validity_predicate().is_some() { - return Err(TxRuntimeError::CannotDeleteVp); + return Err(TxRuntimeError::CannotDeleteVp.into()); } let mut state = env.state(); - state.delete(&key).map_err(TxRuntimeError::StorageError) + state.delete(&key) } /// Expose the functionality to emit events to the wasm VM's Tx environment. @@ -1097,7 +1090,7 @@ pub fn vp_read_pre( env: &mut VpVmEnv, key_ptr: u64, key_len: u64, -) -> vp_host_fns::EnvResult +) -> Result where MEM: VmMemory, D: 'static + DB + for<'iter> DBIter<'iter>, @@ -1108,13 +1101,12 @@ where let (key, gas) = env .memory .read_string(key_ptr, key_len.try_into()?) - .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; + .map_err(Into::into)?; let gas_meter = env.ctx.gas_meter(); vp_host_fns::add_gas(gas_meter, gas)?; // try to read from the storage - let key = - Key::parse(key).map_err(vp_host_fns::RuntimeError::StorageDataError)?; + let key = Key::parse(key)?; let state = env.state(); let value = vp_host_fns::read_pre(gas_meter, &state, &key)?; tracing::debug!( @@ -1125,10 +1117,7 @@ where ); Ok(match value { Some(value) => { - let len: i64 = value - .len() - .try_into() - .map_err(vp_host_fns::RuntimeError::NumConversionError)?; + let len: i64 = value.len().try_into()?; let result_buffer = unsafe { env.ctx.result_buffer.get_mut() }; result_buffer.replace(value); len @@ -1147,7 +1136,7 @@ pub fn vp_read_post( env: &mut VpVmEnv, key_ptr: u64, key_len: u64, -) -> vp_host_fns::EnvResult +) -> Result where MEM: VmMemory, D: 'static + DB + for<'iter> DBIter<'iter>, @@ -1158,23 +1147,19 @@ where let (key, gas) = env .memory .read_string(key_ptr, key_len.try_into()?) - .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; + .map_err(Into::into)?; let gas_meter = env.ctx.gas_meter(); vp_host_fns::add_gas(gas_meter, gas)?; tracing::debug!("vp_read_post {}, key {}", key, key_ptr,); // try to read from the write log first - let key = - Key::parse(key).map_err(vp_host_fns::RuntimeError::StorageDataError)?; + let key = Key::parse(key)?; let state = env.state(); let value = vp_host_fns::read_post(gas_meter, &state, &key)?; Ok(match value { Some(value) => { - let len: i64 = value - .len() - .try_into() - .map_err(vp_host_fns::RuntimeError::NumConversionError)?; + let len: i64 = value.len().try_into()?; let result_buffer = unsafe { env.ctx.result_buffer.get_mut() }; result_buffer.replace(value); len @@ -1192,7 +1177,7 @@ pub fn vp_read_temp( env: &mut VpVmEnv, key_ptr: u64, key_len: u64, -) -> vp_host_fns::EnvResult +) -> Result where MEM: VmMemory, D: 'static + DB + for<'iter> DBIter<'iter>, @@ -1203,23 +1188,19 @@ where let (key, gas) = env .memory .read_string(key_ptr, key_len.try_into()?) - .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; + .map_err(Into::into)?; let gas_meter = env.ctx.gas_meter(); vp_host_fns::add_gas(gas_meter, gas)?; tracing::debug!("vp_read_temp {}, key {}", key, key_ptr); // try to read from the write log - let key = - Key::parse(key).map_err(vp_host_fns::RuntimeError::StorageDataError)?; + let key = Key::parse(key)?; let state = env.state(); let value = vp_host_fns::read_temp(gas_meter, &state, &key)?; Ok(match value { Some(value) => { - let len: i64 = value - .len() - .try_into() - .map_err(vp_host_fns::RuntimeError::NumConversionError)?; + let len: i64 = value.len().try_into()?; let result_buffer = unsafe { env.ctx.result_buffer.get_mut() }; result_buffer.replace(value); len @@ -1239,7 +1220,7 @@ where pub fn vp_result_buffer( env: &mut VpVmEnv, result_ptr: u64, -) -> vp_host_fns::EnvResult<()> +) -> Result<()> where MEM: VmMemory, D: 'static + DB + for<'iter> DBIter<'iter>, @@ -1250,11 +1231,11 @@ where let result_buffer = unsafe { env.ctx.result_buffer.get_mut() }; let value = result_buffer .take() - .ok_or(vp_host_fns::RuntimeError::NoValueInResultBuffer)?; + .ok_or(Error::new_const("No value found in result buffer"))?; let gas = env .memory .write_bytes(result_ptr, value) - .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; + .map_err(Into::into)?; let gas_meter = env.ctx.gas_meter(); vp_host_fns::add_gas(gas_meter, gas) } @@ -1265,7 +1246,7 @@ pub fn vp_has_key_pre( env: &mut VpVmEnv, key_ptr: u64, key_len: u64, -) -> vp_host_fns::EnvResult +) -> Result where MEM: VmMemory, D: 'static + DB + for<'iter> DBIter<'iter>, @@ -1276,14 +1257,13 @@ where let (key, gas) = env .memory .read_string(key_ptr, key_len.try_into()?) - .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; + .map_err(Into::into)?; let gas_meter = env.ctx.gas_meter(); vp_host_fns::add_gas(gas_meter, gas)?; tracing::debug!("vp_has_key_pre {}, key {}", key, key_ptr,); - let key = - Key::parse(key).map_err(vp_host_fns::RuntimeError::StorageDataError)?; + let key = Key::parse(key)?; let state = env.state(); let present = vp_host_fns::has_key_pre(gas_meter, &state, &key)?; Ok(HostEnvResult::from(present).to_i64()) @@ -1296,7 +1276,7 @@ pub fn vp_has_key_post( env: &mut VpVmEnv, key_ptr: u64, key_len: u64, -) -> vp_host_fns::EnvResult +) -> Result where MEM: VmMemory, D: 'static + DB + for<'iter> DBIter<'iter>, @@ -1307,14 +1287,13 @@ where let (key, gas) = env .memory .read_string(key_ptr, key_len.try_into()?) - .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; + .map_err(Into::into)?; let gas_meter = env.ctx.gas_meter(); vp_host_fns::add_gas(gas_meter, gas)?; tracing::debug!("vp_has_key_post {}, key {}", key, key_ptr,); - let key = - Key::parse(key).map_err(vp_host_fns::RuntimeError::StorageDataError)?; + let key = Key::parse(key)?; let state = env.state(); let present = vp_host_fns::has_key_post(gas_meter, &state, &key)?; Ok(HostEnvResult::from(present).to_i64()) @@ -1328,7 +1307,7 @@ pub fn vp_iter_prefix_pre( env: &mut VpVmEnv, prefix_ptr: u64, prefix_len: u64, -) -> vp_host_fns::EnvResult +) -> Result where MEM: VmMemory, D: 'static + DB + for<'iter> DBIter<'iter>, @@ -1339,14 +1318,13 @@ where let (prefix, gas) = env .memory .read_string(prefix_ptr, prefix_len.try_into()?) - .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; + .map_err(Into::into)?; let gas_meter = env.ctx.gas_meter(); vp_host_fns::add_gas(gas_meter, gas)?; tracing::debug!("vp_iter_prefix_pre {}", prefix); - let prefix = Key::parse(prefix) - .map_err(vp_host_fns::RuntimeError::StorageDataError)?; + let prefix = Key::parse(prefix)?; let write_log = unsafe { env.ctx.write_log.get() }; let db = unsafe { env.ctx.db.get() }; @@ -1367,7 +1345,7 @@ pub fn vp_iter_prefix_post( env: &mut VpVmEnv, prefix_ptr: u64, prefix_len: u64, -) -> vp_host_fns::EnvResult +) -> Result where MEM: VmMemory, D: 'static + DB + for<'iter> DBIter<'iter>, @@ -1378,14 +1356,13 @@ where let (prefix, gas) = env .memory .read_string(prefix_ptr, prefix_len.try_into()?) - .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; + .map_err(Into::into)?; let gas_meter = env.ctx.gas_meter(); vp_host_fns::add_gas(gas_meter, gas)?; tracing::debug!("vp_iter_prefix_post {}", prefix); - let prefix = Key::parse(prefix) - .map_err(vp_host_fns::RuntimeError::StorageDataError)?; + let prefix = Key::parse(prefix)?; let write_log = unsafe { env.ctx.write_log.get() }; let db = unsafe { env.ctx.db.get() }; @@ -1407,7 +1384,7 @@ where pub fn vp_iter_next( env: &mut VpVmEnv, iter_id: u64, -) -> vp_host_fns::EnvResult +) -> Result where MEM: VmMemory, D: 'static + DB + for<'iter> DBIter<'iter>, @@ -1422,12 +1399,8 @@ where if let Some(iter) = iterators.get_mut(iter_id) { let gas_meter = env.ctx.gas_meter(); if let Some((key, val)) = vp_host_fns::iter_next(gas_meter, iter)? { - let key_val = borsh::to_vec(&KeyVal { key, val }) - .map_err(vp_host_fns::RuntimeError::EncodingError)?; - let len: i64 = key_val - .len() - .try_into() - .map_err(vp_host_fns::RuntimeError::NumConversionError)?; + let key_val = KeyVal { key, val }.serialize_to_vec(); + let len: i64 = key_val.len().try_into()?; let result_buffer = unsafe { env.ctx.result_buffer.get_mut() }; result_buffer.replace(key_val); return Ok(len); @@ -1513,10 +1486,7 @@ where tx_validate_vp_code_hash::(env, &code_hash, &code_tag)?; let mut state = env.state(); - let (gas, _size_diff) = state - .write_log_mut() - .write(&key, code_hash) - .map_err(TxRuntimeError::StorageModificationError)?; + let (gas, _size_diff) = state.write_log_mut().write(&key, code_hash)?; tx_charge_gas::(env, gas) } @@ -1643,7 +1613,7 @@ where /// transaction is being applied. pub fn vp_get_tx_index( env: &mut VpVmEnv, -) -> vp_host_fns::EnvResult +) -> Result where MEM: VmMemory, D: 'static + DB + for<'iter> DBIter<'iter>, @@ -1743,8 +1713,7 @@ where { let state = env.state(); let (header, gas) = - StateRead::get_block_header(&state, Some(BlockHeight(height))) - .map_err(TxRuntimeError::StateError)?; + StateRead::get_block_header(&state, Some(BlockHeight(height)))?; tx_charge_gas::(env, gas)?; Ok(match header { @@ -1766,7 +1735,7 @@ where pub fn vp_get_chain_id( env: &mut VpVmEnv, result_ptr: u64, -) -> vp_host_fns::EnvResult<()> +) -> Result<()> where MEM: VmMemory, D: 'static + DB + for<'iter> DBIter<'iter>, @@ -1780,7 +1749,7 @@ where let gas = env .memory .write_string(result_ptr, chain_id) - .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; + .map_err(Into::into)?; vp_host_fns::add_gas(gas_meter, gas) } @@ -1789,7 +1758,7 @@ where /// transaction is being applied. pub fn vp_get_block_height( env: &mut VpVmEnv, -) -> vp_host_fns::EnvResult +) -> Result where MEM: VmMemory, D: 'static + DB + for<'iter> DBIter<'iter>, @@ -1807,7 +1776,7 @@ where pub fn vp_get_block_header( env: &mut VpVmEnv, height: u64, -) -> vp_host_fns::EnvResult +) -> Result where MEM: VmMemory, D: 'static + DB + for<'iter> DBIter<'iter>, @@ -1818,16 +1787,12 @@ where let gas_meter = env.ctx.gas_meter(); let state = env.state(); let (header, gas) = - StateRead::get_block_header(&state, Some(BlockHeight(height))) - .map_err(vp_host_fns::RuntimeError::StateError)?; + StateRead::get_block_header(&state, Some(BlockHeight(height)))?; vp_host_fns::add_gas(gas_meter, gas)?; Ok(match header { Some(h) => { let value = h.serialize_to_vec(); - let len: i64 = value - .len() - .try_into() - .map_err(vp_host_fns::RuntimeError::NumConversionError)?; + let len: i64 = value.len().try_into()?; let result_buffer = unsafe { env.ctx.result_buffer.get_mut() }; result_buffer.replace(value); len @@ -1840,7 +1805,7 @@ where pub fn vp_get_tx_code_hash( env: &mut VpVmEnv, result_ptr: u64, -) -> vp_host_fns::EnvResult<()> +) -> Result<()> where MEM: VmMemory, D: 'static + DB + for<'iter> DBIter<'iter>, @@ -1863,7 +1828,7 @@ where let gas = env .memory .write_bytes(result_ptr, result_bytes) - .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; + .map_err(Into::into)?; vp_host_fns::add_gas(gas_meter, gas) } @@ -1872,7 +1837,7 @@ where /// transaction is being applied. pub fn vp_get_block_epoch( env: &mut VpVmEnv, -) -> vp_host_fns::EnvResult +) -> Result where MEM: VmMemory, D: 'static + DB + for<'iter> DBIter<'iter>, @@ -1889,7 +1854,7 @@ where /// Get predecessor epochs function exposed to the wasm VM VP environment. pub fn vp_get_pred_epochs( env: &mut VpVmEnv, -) -> vp_host_fns::EnvResult +) -> Result where MEM: VmMemory, D: 'static + DB + for<'iter> DBIter<'iter>, @@ -1901,10 +1866,7 @@ where let state = env.state(); let pred_epochs = vp_host_fns::get_pred_epochs(gas_meter, &state)?; let bytes = pred_epochs.serialize_to_vec(); - let len: i64 = bytes - .len() - .try_into() - .map_err(vp_host_fns::RuntimeError::NumConversionError)?; + let len: i64 = bytes.len().try_into()?; let result_buffer = unsafe { env.ctx.result_buffer.get_mut() }; result_buffer.replace(bytes); Ok(len) @@ -1915,7 +1877,7 @@ pub fn vp_get_events( env: &mut VpVmEnv, event_type_ptr: u64, event_type_len: u64, -) -> vp_host_fns::EnvResult +) -> Result where MEM: VmMemory, D: 'static + DB + for<'iter> DBIter<'iter>, @@ -1926,17 +1888,14 @@ where let (event_type, gas) = env .memory .read_string(event_type_ptr, event_type_len.try_into()?) - .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; + .map_err(Into::into)?; let gas_meter = env.ctx.gas_meter(); vp_host_fns::add_gas(gas_meter, gas)?; let state = env.state(); let events = vp_host_fns::get_events(gas_meter, &state, event_type)?; let value = events.serialize_to_vec(); - let len: i64 = value - .len() - .try_into() - .map_err(vp_host_fns::RuntimeError::NumConversionError)?; + let len: i64 = value.len().try_into()?; let result_buffer = unsafe { env.ctx.result_buffer.get_mut() }; result_buffer.replace(value); Ok(len) @@ -1954,7 +1913,7 @@ pub fn vp_verify_tx_section_signature( signer_ptr: u64, signer_len: u64, threshold: u8, -) -> vp_host_fns::EnvResult<()> +) -> Result<()> where MEM: VmMemory, D: 'static + DB + for<'iter> DBIter<'iter>, @@ -1965,29 +1924,25 @@ where let (hash_list, gas) = env .memory .read_bytes(hash_list_ptr, hash_list_len.try_into()?) - .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; + .map_err(Into::into)?; let gas_meter = env.ctx.gas_meter(); vp_host_fns::add_gas(gas_meter, gas)?; - let hashes = <[Hash; 1]>::try_from_slice(&hash_list) - .map_err(vp_host_fns::RuntimeError::EncodingError)?; + let hashes: [Hash; 1] = decode(hash_list)?; let (public_keys_map, gas) = env .memory .read_bytes(public_keys_map_ptr, public_keys_map_len.try_into()?) - .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; + .map_err(Into::into)?; vp_host_fns::add_gas(gas_meter, gas)?; - let public_keys_map = - AccountPublicKeysMap::try_from_slice(&public_keys_map) - .map_err(vp_host_fns::RuntimeError::EncodingError)?; + let public_keys_map: AccountPublicKeysMap = decode(public_keys_map)?; let (signer, gas) = env .memory .read_bytes(signer_ptr, signer_len.try_into()?) - .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; + .map_err(Into::into)?; vp_host_fns::add_gas(gas_meter, gas)?; - let signer = Address::try_from_slice(&signer) - .map_err(vp_host_fns::RuntimeError::EncodingError)?; + let signer: Address = decode(signer)?; let tx = unsafe { env.ctx.tx.get() }; @@ -2002,11 +1957,13 @@ where Err(err) => match err { namada_tx::VerifySigError::Gas(inner) => { Err(vp_host_fns::RuntimeError::OutOfGas(inner)) + .into_storage_result() } namada_tx::VerifySigError::InvalidSectionSignature(inner) => { Err(vp_host_fns::RuntimeError::InvalidSectionSignature(inner)) + .into_storage_result() } - err => Err(vp_host_fns::RuntimeError::Erased(err.to_string())), + err => Err(Error::new_alloc(err.to_string())), }, } } @@ -2051,10 +2008,8 @@ where // First check that code hash corresponds to the code tag if it is present if let Some(tag) = code_tag { let hash_key = Key::wasm_hash(tag); - let (result, gas) = env - .state() - .db_read(&hash_key) - .map_err(TxRuntimeError::StateError)?; + let (result, gas) = env.state().db_read(&hash_key)?; + tx_charge_gas::(env, gas)?; if let Some(tag_hash) = result { let tag_hash = Hash::try_from(&tag_hash[..]).map_err(|e| { @@ -2065,20 +2020,20 @@ where "The VP code tag does not correspond to the given code \ hash" .to_string(), - )); + ) + .into()); } } else { return Err(TxRuntimeError::InvalidVpCodeHash( "The VP code tag doesn't exist".to_string(), - )); + ) + .into()); } } // Then check that VP code hash is in the allowlist. - if !namada_parameters::is_vp_allowed(&env.ctx.state(), &code_hash) - .map_err(TxRuntimeError::StorageError)? - { - return Err(TxRuntimeError::DisallowedVp); + if !namada_parameters::is_vp_allowed(&env.ctx.state(), &code_hash)? { + return Err(TxRuntimeError::DisallowedVp.into()); } // Then check that the corresponding VP code does indeed exist @@ -2087,7 +2042,8 @@ where if !is_present { return Err(TxRuntimeError::InvalidVpCodeHash( "The corresponding VP code doesn't exist".to_string(), - )); + ) + .into()); } Ok(()) } @@ -2163,6 +2119,7 @@ where _ => Ok(HostEnvResult::Fail.to_i64()), }, } + .into_storage_result() } /// Appends the new note commitments to the tree in storage @@ -2231,7 +2188,7 @@ pub fn vp_eval( vp_code_hash_len: u64, input_data_ptr: u64, input_data_len: u64, -) -> vp_host_fns::EnvResult +) -> Result where MEM: VmMemory, D: 'static + DB + for<'iter> DBIter<'iter>, @@ -2242,7 +2199,7 @@ where let (vp_code_hash, gas) = env .memory .read_bytes(vp_code_hash_ptr, vp_code_hash_len.try_into()?) - .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; + .map_err(Into::into)?; // The borrowed `gas_meter` must be dropped before eval, // which has to borrow it too. @@ -2253,16 +2210,15 @@ where let (input_data, gas) = env .memory .read_bytes(input_data_ptr, input_data_len.try_into()?) - .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; + .map_err(Into::into)?; vp_host_fns::add_gas(gas_meter, gas)?; - let tx: BatchedTx = BorshDeserialize::try_from_slice(&input_data) - .map_err(vp_host_fns::RuntimeError::EncodingError)?; + let tx: BatchedTx = decode(input_data)?; tx }; let eval_runner = unsafe { env.ctx.eval_runner.get() }; let vp_code_hash = Hash(vp_code_hash.try_into().map_err(|e| { - vp_host_fns::RuntimeError::EncodingError(std::io::Error::new( + Error::new(std::io::Error::new( std::io::ErrorKind::InvalidData, format!("Not a valid hash: {:?}", e), )) @@ -2280,7 +2236,7 @@ where pub fn vp_get_native_token( env: &mut VpVmEnv, result_ptr: u64, -) -> vp_host_fns::EnvResult<()> +) -> Result<()> where MEM: VmMemory, D: 'static + DB + for<'iter> DBIter<'iter>, @@ -2295,7 +2251,7 @@ where let gas = env .memory .write_string(result_ptr, native_token_string) - .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; + .map_err(Into::into)?; vp_host_fns::add_gas(gas_meter, gas) } @@ -2306,7 +2262,7 @@ pub fn vp_log_string( env: &mut VpVmEnv, str_ptr: u64, str_len: u64, -) -> vp_host_fns::EnvResult<()> +) -> Result<()> where MEM: VmMemory, D: 'static + DB + for<'iter> DBIter<'iter>, @@ -2317,7 +2273,7 @@ where let (str, _gas) = env .memory .read_string(str_ptr, str_len.try_into()?) - .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; + .map_err(Into::into)?; tracing::info!("WASM Validity predicate log: {}", str); Ok(()) } @@ -2327,7 +2283,7 @@ pub fn vp_yield_value( env: &mut VpVmEnv, buf_ptr: u64, buf_len: u64, -) -> vp_host_fns::EnvResult<()> +) -> Result<()> where MEM: VmMemory, D: 'static + DB + for<'iter> DBIter<'iter>, @@ -2338,7 +2294,7 @@ where let (value_to_yield, gas) = env .memory .read_bytes(buf_ptr, buf_len.try_into()?) - .map_err(|e| vp_host_fns::RuntimeError::MemoryError(Box::new(e)))?; + .map_err(Into::into)?; vp_host_fns::add_gas(env.ctx.gas_meter(), gas)?; let host_buf = unsafe { env.ctx.yielded_value.get_mut() }; host_buf.replace(value_to_yield); diff --git a/crates/vm/src/memory.rs b/crates/vm/src/memory.rs index fe53a6c832..7bb86797ba 100644 --- a/crates/vm/src/memory.rs +++ b/crates/vm/src/memory.rs @@ -5,7 +5,7 @@ use std::error::Error; /// Abstract representation of virtual machine's memory. pub trait VmMemory: Clone + Send + Sync { /// Error type for the methods' results. - type Error: Error + Sync + Send + 'static; + type Error: Error + Sync + Send + 'static + Into; /// Returns bytes read from memory together with the associated gas cost. fn read_bytes( diff --git a/crates/vm/src/wasm/memory.rs b/crates/vm/src/wasm/memory.rs index 3b20b7b532..374ca378df 100644 --- a/crates/vm/src/wasm/memory.rs +++ b/crates/vm/src/wasm/memory.rs @@ -51,6 +51,12 @@ pub enum Error { /// Result of a function that may fail pub type Result = std::result::Result; +impl From for namada_state::Error { + fn from(value: Error) -> Self { + namada_state::Error::new(value) + } +} + // The bounds are set in number of pages, the actual size is multiplied by // `wasmer::WASM_PAGE_SIZE = 64kiB`. // diff --git a/crates/vm/src/wasm/run.rs b/crates/vm/src/wasm/run.rs index d22e7b84c8..9a199157f8 100644 --- a/crates/vm/src/wasm/run.rs +++ b/crates/vm/src/wasm/run.rs @@ -97,7 +97,7 @@ pub enum Error { #[error("Failed type conversion: {0}")] ConversionError(String), #[error("Storage error: {0}")] - StorageError(String), + Error(String), #[error("Tx is not allowed in allowlist parameter")] DisallowedTx, #[error("Invalid transaction section signature: {0}")] @@ -123,7 +123,7 @@ where .and_then(|x| Section::code_sec(&x)) { if namada_parameters::is_tx_allowed(storage, &code_sec.code.hash()) - .map_err(|e| Error::StorageError(e.to_string()))? + .map_err(|e| Error::Error(e.to_string()))? { return Ok(()); } @@ -465,8 +465,11 @@ where .map_err(|rt_error| { let downcasted_err = || { let source_err = rt_error.source()?; - let downcasted_vp_rt_err: &vp_host_fns::RuntimeError = - source_err.downcast_ref()?; + let downcasted_vp_err = + source_err.downcast_ref::()?; + let downcasted_vp_rt_err = downcasted_vp_err + .downcast_ref::( + )?; match downcasted_vp_rt_err { vp_host_fns::RuntimeError::OutOfGas(_) => { @@ -534,7 +537,7 @@ where ctx: &namada_vp::native_vp::Ctx<'a, S, VpCache, Self>, vp_code_hash: Hash, input_data: BatchedTxRef<'_>, - ) -> namada_state::StorageResult<()> { + ) -> namada_state::Result<()> { use namada_state::ResultExt; let eval_runner = @@ -1021,7 +1024,7 @@ mod tests { use super::memory::{TX_MEMORY_INIT_PAGES, VP_MEMORY_INIT_PAGES}; use super::*; - use crate::host_env::TxRuntimeError; + use crate::host_env::{self, TxRuntimeError}; use crate::wasm; const TX_GAS_LIMIT: u64 = 10_000_000_000_000; @@ -1072,7 +1075,10 @@ mod tests { }; let source_err = rt_error.source().expect("No runtime error source found"); - let downcasted_tx_rt_err: &TxRuntimeError = source_err + let downcasted_tx_err: &host_env::Error = source_err + .downcast_ref() + .unwrap_or_else(|| panic!("{assert_msg}: {source_err}")); + let downcasted_tx_rt_err: &TxRuntimeError = downcasted_tx_err .downcast_ref() .unwrap_or_else(|| panic!("{assert_msg}: {source_err}")); let TxRuntimeError::MemoryError(tx_mem_err) = downcasted_tx_rt_err @@ -1094,17 +1100,13 @@ mod tests { }; let source_err = rt_error.source().expect("No runtime error source found"); - let downcasted_tx_rt_err: &vp_host_fns::RuntimeError = source_err + let downcasted_vp_err: &host_env::Error = source_err .downcast_ref() .unwrap_or_else(|| panic!("{assert_msg}: {source_err}")); - let vp_host_fns::RuntimeError::MemoryError(vp_mem_err) = - downcasted_tx_rt_err - else { - panic!("{assert_msg}: {downcasted_tx_rt_err}"); - }; - vp_mem_err + let downcasted_err: &wasm::memory::Error = downcasted_vp_err .downcast_ref() - .unwrap_or_else(|| panic!("{assert_msg}: {vp_mem_err}")) + .unwrap_or_else(|| panic!("{assert_msg}: {downcasted_vp_err}")); + downcasted_err } /// Test that when a transaction wasm goes over the stack-height limit, the diff --git a/crates/vp/src/native_vp.rs b/crates/vp/src/native_vp.rs index 513acd59b3..4f01ab4f01 100644 --- a/crates/vp/src/native_vp.rs +++ b/crates/vp/src/native_vp.rs @@ -12,32 +12,25 @@ use namada_core::borsh::BorshDeserialize; use namada_core::chain::Epochs; use namada_gas::{GasMetering, VpGasMeter}; use namada_tx::{BatchedTxRef, Tx, TxCommitments}; -use state::prefix_iter::PrefixIterators; -use state::{ - BlockHeader, BlockHeight, Epoch, Key, ResultExt, StateRead, StorageRead, - StorageResult, TxIndex, -}; use super::vp_host_fns; -use crate::{state, Address, Event, EventType, Hash, VpEnv}; - -/// Possible error in a native VP host function call -/// The `state::StorageError` may wrap the `vp_host_fns::RuntimeError` -/// and can be extended with other custom errors when using `trait VpEnv`. -pub type Error = state::StorageError; +use crate::state::prefix_iter::PrefixIterators; +use crate::state::{ + BlockHeader, BlockHeight, Epoch, Key, PrefixIter, StateRead, StorageRead, + TxIndex, +}; +pub use crate::state::{Error, Result, ResultExt}; +use crate::{Address, Event, EventType, Hash, VpEnv}; /// A native VP module should implement its validation logic using this trait. pub trait NativeVp<'a> { - /// Error type for the methods' results. - type Error: std::error::Error; - /// Run the validity predicate fn validate_tx( &'a self, batched_tx: &BatchedTxRef<'_>, keys_changed: &BTreeSet, verifiers: &BTreeSet
, - ) -> std::result::Result<(), Self::Error>; + ) -> Result<()>; } /// A validity predicate's host context. @@ -90,7 +83,7 @@ where ctx: &Ctx<'a, S, CA, EVAL>, vp_code_hash: Hash, input_data: BatchedTxRef<'_>, - ) -> StorageResult<()>; + ) -> Result<()>; } /// Read access to the prior storage (state before tx execution) via @@ -173,17 +166,14 @@ where EVAL: 'static + VpEvaluator<'a, S, CA, EVAL>, CA: 'static + Clone, { - type PrefixIter<'iter> = state::PrefixIter<'iter,:: D> where Self: 'iter; + type PrefixIter<'iter> = PrefixIter<'iter,:: D> where Self: 'iter; - fn read_bytes( - &self, - key: &Key, - ) -> Result>, state::StorageError> { + fn read_bytes(&self, key: &Key) -> Result>> { vp_host_fns::read_pre(self.ctx.gas_meter, self.ctx.state, key) .into_storage_result() } - fn has_key(&self, key: &Key) -> Result { + fn has_key(&self, key: &Key) -> Result { vp_host_fns::has_key_pre(self.ctx.gas_meter, self.ctx.state, key) .into_storage_result() } @@ -191,7 +181,7 @@ where fn iter_prefix<'iter>( &'iter self, prefix: &Key, - ) -> Result, state::StorageError> { + ) -> Result> { vp_host_fns::iter_prefix_pre( self.ctx.gas_meter, self.ctx.state.write_log(), @@ -207,39 +197,39 @@ where fn iter_next<'iter>( &'iter self, iter: &mut Self::PrefixIter<'iter>, - ) -> Result)>, state::StorageError> { + ) -> Result)>> { vp_host_fns::iter_next::<::D>(self.ctx.gas_meter, iter) .into_storage_result() } - fn get_chain_id(&self) -> Result { + fn get_chain_id(&self) -> Result { self.ctx.get_chain_id() } - fn get_block_height(&self) -> Result { + fn get_block_height(&self) -> Result { self.ctx.get_block_height() } fn get_block_header( &self, height: BlockHeight, - ) -> Result, state::StorageError> { + ) -> Result> { self.ctx.get_block_header(height) } - fn get_block_epoch(&self) -> Result { + fn get_block_epoch(&self) -> Result { self.ctx.get_block_epoch() } - fn get_tx_index(&self) -> Result { + fn get_tx_index(&self) -> Result { self.ctx.get_tx_index().into_storage_result() } - fn get_native_token(&self) -> Result { + fn get_native_token(&self) -> Result
{ self.ctx.get_native_token() } - fn get_pred_epochs(&self) -> StorageResult { + fn get_pred_epochs(&self) -> Result { self.ctx.get_pred_epochs() } } @@ -251,17 +241,14 @@ where EVAL: 'static + VpEvaluator<'a, S, CA, EVAL>, CA: 'static + Clone, { - type PrefixIter<'iter> = state::PrefixIter<'iter, ::D> where Self: 'iter; + type PrefixIter<'iter> = PrefixIter<'iter, ::D> where Self: 'iter; - fn read_bytes( - &self, - key: &Key, - ) -> Result>, state::StorageError> { + fn read_bytes(&self, key: &Key) -> Result>> { vp_host_fns::read_post(self.ctx.gas_meter, self.ctx.state, key) .into_storage_result() } - fn has_key(&self, key: &Key) -> Result { + fn has_key(&self, key: &Key) -> Result { vp_host_fns::has_key_post(self.ctx.gas_meter, self.ctx.state, key) .into_storage_result() } @@ -269,7 +256,7 @@ where fn iter_prefix<'iter>( &'iter self, prefix: &Key, - ) -> Result, state::StorageError> { + ) -> Result> { vp_host_fns::iter_prefix_post( self.ctx.gas_meter, self.ctx.state.write_log(), @@ -285,39 +272,39 @@ where fn iter_next<'iter>( &'iter self, iter: &mut Self::PrefixIter<'iter>, - ) -> Result)>, state::StorageError> { + ) -> Result)>> { vp_host_fns::iter_next::<::D>(self.ctx.gas_meter, iter) .into_storage_result() } - fn get_chain_id(&self) -> Result { + fn get_chain_id(&self) -> Result { self.ctx.get_chain_id() } - fn get_block_height(&self) -> Result { + fn get_block_height(&self) -> Result { self.ctx.get_block_height() } fn get_block_header( &self, height: BlockHeight, - ) -> Result, state::StorageError> { + ) -> Result> { self.ctx.get_block_header(height) } - fn get_block_epoch(&self) -> Result { + fn get_block_epoch(&self) -> Result { self.ctx.get_block_epoch() } - fn get_tx_index(&self) -> Result { + fn get_tx_index(&self) -> Result { self.ctx.get_tx_index().into_storage_result() } - fn get_native_token(&self) -> Result { + fn get_native_token(&self) -> Result
{ Ok(self.ctx.state.in_mem().native_token.clone()) } - fn get_pred_epochs(&self) -> StorageResult { + fn get_pred_epochs(&self) -> Result { self.ctx.get_pred_epochs() } } @@ -330,7 +317,7 @@ where { type Post = CtxPostStorageRead<'view, 'a, S, CA, EVAL>; type Pre = CtxPreStorageRead<'view, 'a, S, CA, EVAL>; - type PrefixIter<'iter> = state::PrefixIter<'iter, ::D> where Self: 'iter; + type PrefixIter<'iter> = PrefixIter<'iter, ::D> where Self: 'iter; fn pre(&'view self) -> Self::Pre { CtxPreStorageRead { ctx: self } @@ -343,26 +330,23 @@ where fn read_temp( &self, key: &Key, - ) -> Result, state::StorageError> { + ) -> Result> { vp_host_fns::read_temp(self.gas_meter, self.state, key) .map(|data| data.and_then(|t| T::try_from_slice(&t[..]).ok())) .into_storage_result() } - fn read_bytes_temp( - &self, - key: &Key, - ) -> Result>, state::StorageError> { + fn read_bytes_temp(&self, key: &Key) -> Result>> { vp_host_fns::read_temp(self.gas_meter, self.state, key) .into_storage_result() } - fn get_chain_id(&self) -> Result { + fn get_chain_id(&self) -> Result { vp_host_fns::get_chain_id(self.gas_meter, self.state) .into_storage_result() } - fn get_block_height(&self) -> Result { + fn get_block_height(&self) -> Result { vp_host_fns::get_block_height(self.gas_meter, self.state) .into_storage_result() } @@ -370,35 +354,32 @@ where fn get_block_header( &self, height: BlockHeight, - ) -> Result, state::StorageError> { + ) -> Result> { vp_host_fns::get_block_header(self.gas_meter, self.state, height) .into_storage_result() } - fn get_block_epoch(&self) -> Result { + fn get_block_epoch(&self) -> Result { vp_host_fns::get_block_epoch(self.gas_meter, self.state) .into_storage_result() } - fn get_tx_index(&self) -> Result { + fn get_tx_index(&self) -> Result { vp_host_fns::get_tx_index(self.gas_meter, self.tx_index) .into_storage_result() } - fn get_native_token(&self) -> Result { + fn get_native_token(&self) -> Result
{ vp_host_fns::get_native_token(self.gas_meter, self.state) .into_storage_result() } - fn get_pred_epochs(&self) -> StorageResult { + fn get_pred_epochs(&self) -> Result { vp_host_fns::get_pred_epochs(self.gas_meter, self.state) .into_storage_result() } - fn get_events( - &self, - event_type: &EventType, - ) -> Result, state::StorageError> { + fn get_events(&self, event_type: &EventType) -> Result> { vp_host_fns::get_events( self.gas_meter, self.state, @@ -410,7 +391,7 @@ where fn iter_prefix<'iter>( &'iter self, prefix: &Key, - ) -> Result, state::StorageError> { + ) -> Result> { vp_host_fns::iter_prefix_pre( self.gas_meter, self.state.write_log(), @@ -424,17 +405,17 @@ where &self, vp_code_hash: Hash, input_data: BatchedTxRef<'_>, - ) -> StorageResult<()> { + ) -> Result<()> { EVAL::eval(self, vp_code_hash, input_data) } - fn charge_gas(&self, used_gas: u64) -> Result<(), state::StorageError> { + fn charge_gas(&self, used_gas: u64) -> Result<()> { self.gas_meter.borrow_mut().consume(used_gas).map_err(|_| { Error::SimpleMessage("Gas limit exceeded in native vp") }) } - fn get_tx_code_hash(&self) -> Result, state::StorageError> { + fn get_tx_code_hash(&self) -> Result> { vp_host_fns::get_tx_code_hash( self.gas_meter, &self.tx.batch_ref_tx(self.cmt), @@ -445,36 +426,30 @@ where fn read_pre( &self, key: &Key, - ) -> Result, state::StorageError> { + ) -> Result> { self.pre().read(key).map_err(Into::into) } - fn read_bytes_pre( - &self, - key: &Key, - ) -> Result>, state::StorageError> { + fn read_bytes_pre(&self, key: &Key) -> Result>> { self.pre().read_bytes(key).map_err(Into::into) } fn read_post( &self, key: &Key, - ) -> Result, state::StorageError> { + ) -> Result> { self.post().read(key).map_err(Into::into) } - fn read_bytes_post( - &self, - key: &Key, - ) -> Result>, state::StorageError> { + fn read_bytes_post(&self, key: &Key) -> Result>> { self.post().read_bytes(key).map_err(Into::into) } - fn has_key_pre(&self, key: &Key) -> Result { + fn has_key_pre(&self, key: &Key) -> Result { self.pre().has_key(key).map_err(Into::into) } - fn has_key_post(&self, key: &Key) -> Result { + fn has_key_post(&self, key: &Key) -> Result { self.post().has_key(key).map_err(Into::into) } } @@ -487,10 +462,7 @@ where { type Err = Error; - fn read_temp( - &self, - key: &Key, - ) -> Result, Self::Err> { + fn read_temp(&self, key: &Key) -> Result> { VpEnv::read_temp(self, key) } } @@ -503,7 +475,7 @@ pub trait StorageReader { fn read_pre_value( &self, key: &Key, - ) -> Result, state::StorageError>; + ) -> Result>; /// Storage read posterior state (after tx execution). It will try to read /// from the write log first and if no entry found then from the @@ -511,15 +483,12 @@ pub trait StorageReader { fn read_post_value( &self, key: &Key, - ) -> Result, state::StorageError>; + ) -> Result>; /// Calls `read_pre_value`, and returns an error on `Ok(None)`. - fn must_read_pre_value( - &self, - key: &Key, - ) -> Result { + fn must_read_pre_value(&self, key: &Key) -> Result { match self.read_pre_value(key) { - Ok(None) => Err(state::StorageError::AllocMessage(format!( + Ok(None) => Err(Error::AllocMessage(format!( "Expected a value to be present in the key {key}" ))), Ok(Some(x)) => Ok(x), @@ -531,9 +500,9 @@ pub trait StorageReader { fn must_read_post_value( &self, key: &Key, - ) -> Result { + ) -> Result { match self.read_post_value(key) { - Ok(None) => Err(state::StorageError::AllocMessage(format!( + Ok(None) => Err(Error::AllocMessage(format!( "Expected a value to be present in the key {key}" ))), Ok(Some(x)) => Ok(x), @@ -550,10 +519,7 @@ where { /// Helper function. After reading posterior state, /// borsh deserialize to specified type - fn read_post_value( - &self, - key: &Key, - ) -> Result, state::StorageError> + fn read_post_value(&self, key: &Key) -> Result> where T: BorshDeserialize, { @@ -562,10 +528,7 @@ where /// Helper function. After reading prior state, /// borsh deserialize to specified type - fn read_pre_value( - &self, - key: &Key, - ) -> Result, state::StorageError> + fn read_pre_value(&self, key: &Key) -> Result> where T: BorshDeserialize, { @@ -589,7 +552,7 @@ pub(super) mod testing { fn read_pre_value( &self, key: &Key, - ) -> Result, state::StorageError> { + ) -> Result> { self.pre .get(key) .map(|bytes| T::try_from_slice(bytes).into_storage_result()) @@ -599,7 +562,7 @@ pub(super) mod testing { fn read_post_value( &self, key: &Key, - ) -> Result, state::StorageError> { + ) -> Result> { self.post .get(key) .map(|bytes| T::try_from_slice(bytes).into_storage_result()) diff --git a/crates/vp/src/vp_host_fns.rs b/crates/vp/src/vp_host_fns.rs index 35cb825825..48c12099c6 100644 --- a/crates/vp/src/vp_host_fns.rs +++ b/crates/vp/src/vp_host_fns.rs @@ -2,62 +2,51 @@ use std::cell::RefCell; use std::fmt::Debug; -use std::num::TryFromIntError; use namada_core::address::{Address, ESTABLISHED_ADDRESS_BYTES_LEN}; -use namada_core::arith::{self, checked}; +use namada_core::arith::checked; use namada_core::chain::{BlockHeader, BlockHeight, Epoch, Epochs}; use namada_core::hash::{Hash, HASH_LENGTH}; use namada_core::storage::{Key, TxIndex, TX_INDEX_LENGTH}; use namada_events::{Event, EventTypeBuilder}; -use namada_gas as gas; -use namada_gas::{GasMetering, VpGasMeter, MEMORY_ACCESS_GAS_PER_BYTE}; -use namada_state::write_log::WriteLog; -use namada_state::{write_log, DBIter, ResultExt, StateRead, DB}; +use namada_gas::{ + self as gas, GasMetering, VpGasMeter, MEMORY_ACCESS_GAS_PER_BYTE, +}; use namada_tx::{BatchedTxRef, Section}; use thiserror::Error; +use crate::state::write_log::WriteLog; +use crate::state::{write_log, DBIter, PrefixIter, ResultExt, StateRead, DB}; +pub use crate::state::{Error, Result}; + /// These runtime errors will abort VP execution immediately #[allow(missing_docs)] #[derive(Error, Debug)] pub enum RuntimeError { #[error("Out of gas: {0}")] OutOfGas(gas::Error), - #[error("State error: {0}")] - StateError(namada_state::Error), - #[error("Storage error: {0}")] - StorageError(#[from] namada_state::StorageError), - #[error("Storage data error: {0}")] - StorageDataError(namada_core::storage::Error), - #[error("Encoding error: {0}")] - EncodingError(std::io::Error), - #[error("Numeric conversion error: {0}")] - NumConversionError(#[from] TryFromIntError), - #[error("Memory error: {0}")] - MemoryError(Box), #[error("Invalid transaction code hash")] InvalidCodeHash, #[error("No value found in result buffer")] NoValueInResultBuffer, #[error("The section signature is invalid: {0}")] InvalidSectionSignature(String), - #[error("{0}")] - Erased(String), // type erased error - #[error("Arithmetic {0}")] - Arith(#[from] arith::Error), +} + +impl From for Error { + fn from(value: RuntimeError) -> Self { + Error::new(value) + } } /// VP environment function result pub type EnvResult = std::result::Result; /// Add a gas cost incured in a validity predicate -pub fn add_gas( - gas_meter: &RefCell, - used_gas: u64, -) -> EnvResult<()> { +pub fn add_gas(gas_meter: &RefCell, used_gas: u64) -> Result<()> { gas_meter.borrow_mut().consume(used_gas).map_err(|err| { tracing::info!("Stopping VP execution because of gas error: {}", err); - RuntimeError::OutOfGas(err) + Error::new(RuntimeError::OutOfGas(err)) }) } @@ -67,7 +56,7 @@ pub fn read_pre( gas_meter: &RefCell, state: &S, key: &Key, -) -> EnvResult>> +) -> Result>> where S: StateRead + Debug, { @@ -90,8 +79,7 @@ where } None => { // When not found in write log, try to read from the storage - let (value, gas) = - state.db_read(key).map_err(RuntimeError::StateError)?; + let (value, gas) = state.db_read(key)?; add_gas(gas_meter, gas)?; Ok(value) } @@ -104,7 +92,7 @@ pub fn read_post( gas_meter: &RefCell, state: &S, key: &Key, -) -> EnvResult>> +) -> Result>> where S: StateRead + Debug, { @@ -126,8 +114,7 @@ where None => { // When not found in write log, try // to read from the storage - let (value, gas) = - state.db_read(key).map_err(RuntimeError::StateError)?; + let (value, gas) = state.db_read(key)?; add_gas(gas_meter, gas)?; Ok(value) } @@ -140,7 +127,7 @@ pub fn read_temp( gas_meter: &RefCell, state: &S, key: &Key, -) -> EnvResult>> +) -> Result>> where S: StateRead + Debug, { @@ -156,7 +143,7 @@ pub fn has_key_pre( gas_meter: &RefCell, state: &S, key: &Key, -) -> EnvResult +) -> Result where S: StateRead + Debug, { @@ -173,8 +160,7 @@ where Some(&write_log::StorageModification::InitAccount { .. }) => Ok(true), None => { // When not found in write log, try to check the storage - let (present, gas) = - state.db_has_key(key).map_err(RuntimeError::StateError)?; + let (present, gas) = state.db_has_key(key)?; add_gas(gas_meter, gas)?; Ok(present) } @@ -187,7 +173,7 @@ pub fn has_key_post( gas_meter: &RefCell, state: &S, key: &Key, -) -> EnvResult +) -> Result where S: StateRead + Debug, { @@ -204,8 +190,7 @@ where None => { // When not found in write log, try // to check the storage - let (present, gas) = - state.db_has_key(key).map_err(RuntimeError::StateError)?; + let (present, gas) = state.db_has_key(key)?; add_gas(gas_meter, gas)?; Ok(present) } @@ -216,7 +201,7 @@ where pub fn get_chain_id( gas_meter: &RefCell, state: &S, -) -> EnvResult +) -> Result where S: StateRead + Debug, { @@ -230,7 +215,7 @@ where pub fn get_block_height( gas_meter: &RefCell, state: &S, -) -> EnvResult +) -> Result where S: StateRead + Debug, { @@ -244,12 +229,11 @@ pub fn get_block_header( gas_meter: &RefCell, state: &S, height: BlockHeight, -) -> EnvResult> +) -> Result> where S: StateRead + Debug, { - let (header, gas) = StateRead::get_block_header(state, Some(height)) - .map_err(RuntimeError::StateError)?; + let (header, gas) = StateRead::get_block_header(state, Some(height))?; add_gas(gas_meter, gas)?; Ok(header) } @@ -259,7 +243,7 @@ where pub fn get_tx_code_hash( gas_meter: &RefCell, batched_tx: &BatchedTxRef<'_>, -) -> EnvResult> { +) -> Result> { add_gas( gas_meter, (HASH_LENGTH as u64) @@ -279,7 +263,7 @@ pub fn get_tx_code_hash( pub fn get_block_epoch( gas_meter: &RefCell, state: &S, -) -> EnvResult +) -> Result where S: StateRead + Debug, { @@ -293,7 +277,7 @@ where pub fn get_tx_index( gas_meter: &RefCell, tx_index: &TxIndex, -) -> EnvResult { +) -> Result { add_gas( gas_meter, (TX_INDEX_LENGTH as u64) @@ -307,7 +291,7 @@ pub fn get_tx_index( pub fn get_native_token( gas_meter: &RefCell, state: &S, -) -> EnvResult
+) -> Result
where S: StateRead + Debug, { @@ -324,7 +308,7 @@ where pub fn get_pred_epochs( gas_meter: &RefCell, state: &S, -) -> EnvResult +) -> Result where S: StateRead + Debug, { @@ -338,7 +322,7 @@ pub fn get_events( _gas_meter: &RefCell, state: &S, event_type: String, -) -> EnvResult> +) -> Result> where S: StateRead + Debug, { @@ -361,7 +345,7 @@ pub fn iter_prefix_pre<'a, D>( write_log: &'a WriteLog, db: &'a D, prefix: &Key, -) -> EnvResult> +) -> Result> where D: DB + for<'iter> DBIter<'iter>, { @@ -380,7 +364,7 @@ pub fn iter_prefix_post<'a, D>( write_log: &'a WriteLog, db: &'a D, prefix: &Key, -) -> EnvResult> +) -> Result> where D: DB + for<'iter> DBIter<'iter>, { @@ -392,8 +376,8 @@ where /// Get the next item in a storage prefix iterator (pre or post). pub fn iter_next( gas_meter: &RefCell, - iter: &mut namada_state::PrefixIter<'_, DB>, -) -> EnvResult)>> + iter: &mut PrefixIter<'_, DB>, +) -> Result)>> where DB: namada_state::DB + for<'iter> namada_state::DBIter<'iter>, { diff --git a/crates/vp_prelude/.gitignore b/crates/vp_prelude/.gitignore deleted file mode 100644 index 65d4c18e2d..0000000000 --- a/crates/vp_prelude/.gitignore +++ /dev/null @@ -1,10 +0,0 @@ -# Generated by Cargo -# will have compiled files and executables -debug/ -target/ - -# These are backup files generated by rustfmt -**/*.rs.bk - -# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html -Cargo.lock \ No newline at end of file diff --git a/crates/vp_prelude/src/lib.rs b/crates/vp_prelude/src/lib.rs index 8fd7e70e0a..46d52ab619 100644 --- a/crates/vp_prelude/src/lib.rs +++ b/crates/vp_prelude/src/lib.rs @@ -42,8 +42,7 @@ pub use namada_governance::pgf::storage as pgf_storage; pub use namada_governance::storage as gov_storage; pub use namada_macros::validity_predicate; pub use namada_storage::{ - iter_prefix, iter_prefix_bytes, Error as StorageError, OptionExt, - ResultExt, StorageRead, + iter_prefix, iter_prefix_bytes, Error, OptionExt, ResultExt, StorageRead, }; pub use namada_tx::{BatchedTx, Section, Tx}; use namada_vm_env::vp::*; @@ -279,7 +278,7 @@ impl<'view> VpEnv<'view> for Ctx { fn read_temp( &self, key: &storage::Key, - ) -> Result, StorageError> { + ) -> Result, Error> { let key = key.to_string(); let read_result = unsafe { namada_vp_read_temp(key.as_ptr() as _, key.len() as _) }; @@ -290,19 +289,19 @@ impl<'view> VpEnv<'view> for Ctx { fn read_bytes_temp( &self, key: &storage::Key, - ) -> Result>, StorageError> { + ) -> Result>, Error> { let key = key.to_string(); let read_result = unsafe { namada_vp_read_temp(key.as_ptr() as _, key.len() as _) }; Ok(read_from_buffer(read_result, namada_vp_result_buffer)) } - fn get_chain_id(&self) -> Result { + fn get_chain_id(&self) -> Result { // Both `CtxPreStorageRead` and `CtxPostStorageRead` have the same impl get_chain_id() } - fn get_block_height(&self) -> Result { + fn get_block_height(&self) -> Result { // Both `CtxPreStorageRead` and `CtxPostStorageRead` have the same impl get_block_height() } @@ -310,12 +309,12 @@ impl<'view> VpEnv<'view> for Ctx { fn get_block_header( &self, height: BlockHeight, - ) -> Result, StorageError> { + ) -> Result, Error> { // Both `CtxPreStorageRead` and `CtxPostStorageRead` have the same impl get_block_header(height) } - fn get_block_epoch(&self) -> Result { + fn get_block_epoch(&self) -> Result { // Both `CtxPreStorageRead` and `CtxPostStorageRead` have the same impl get_block_epoch() } @@ -325,19 +324,16 @@ impl<'view> VpEnv<'view> for Ctx { get_pred_epochs() } - fn get_tx_index(&self) -> Result { + fn get_tx_index(&self) -> Result { get_tx_index() } - fn get_native_token(&self) -> Result { + fn get_native_token(&self) -> Result { // Both `CtxPreStorageRead` and `CtxPostStorageRead` have the same impl get_native_token() } - fn get_events( - &self, - event_type: &EventType, - ) -> Result, StorageError> { + fn get_events(&self, event_type: &EventType) -> Result, Error> { let event_type = event_type.to_string(); let read_result = unsafe { namada_vp_get_events( @@ -355,7 +351,7 @@ impl<'view> VpEnv<'view> for Ctx { fn iter_prefix<'iter>( &'iter self, prefix: &storage::Key, - ) -> Result, StorageError> { + ) -> Result, Error> { iter_prefix_pre_impl(prefix) } @@ -363,7 +359,7 @@ impl<'view> VpEnv<'view> for Ctx { &self, vp_code_hash: Hash, input_data: BatchedTxRef<'_>, - ) -> Result<(), StorageError> { + ) -> Result<(), Error> { let input_data_bytes = input_data.serialize_to_vec(); HostEnvResult::success_or( @@ -375,11 +371,11 @@ impl<'view> VpEnv<'view> for Ctx { input_data_bytes.len() as _, ) }, - StorageError::SimpleMessage("VP rejected the tx"), + Error::SimpleMessage("VP rejected the tx"), ) } - fn get_tx_code_hash(&self) -> Result, StorageError> { + fn get_tx_code_hash(&self) -> Result, Error> { let result = Vec::with_capacity(HASH_LENGTH + 1); unsafe { namada_vp_get_tx_code_hash(result.as_ptr() as _); @@ -397,14 +393,14 @@ impl<'view> VpEnv<'view> for Ctx { }) } - fn charge_gas(&self, used_gas: u64) -> Result<(), StorageError> { + fn charge_gas(&self, used_gas: u64) -> Result<(), Error> { unsafe { namada_vp_charge_gas(used_gas) }; Ok(()) } } impl namada_tx::action::Read for Ctx { - type Err = StorageError; + type Err = Error; fn read_temp( &self, @@ -417,17 +413,14 @@ impl namada_tx::action::Read for Ctx { impl StorageRead for CtxPreStorageRead<'_> { type PrefixIter<'iter> = KeyValIterator<(String, Vec)> where Self: 'iter; - fn read_bytes( - &self, - key: &storage::Key, - ) -> Result>, StorageError> { + fn read_bytes(&self, key: &storage::Key) -> Result>, Error> { let key = key.to_string(); let read_result = unsafe { namada_vp_read_pre(key.as_ptr() as _, key.len() as _) }; Ok(read_from_buffer(read_result, namada_vp_result_buffer)) } - fn has_key(&self, key: &storage::Key) -> Result { + fn has_key(&self, key: &storage::Key) -> Result { let key = key.to_string(); let found = unsafe { namada_vp_has_key_pre(key.as_ptr() as _, key.len() as _) }; @@ -437,7 +430,7 @@ impl StorageRead for CtxPreStorageRead<'_> { fn iter_prefix<'iter>( &'iter self, prefix: &storage::Key, - ) -> Result, StorageError> { + ) -> Result, Error> { iter_prefix_pre_impl(prefix) } @@ -446,7 +439,7 @@ impl StorageRead for CtxPreStorageRead<'_> { fn iter_next<'iter>( &'iter self, iter: &mut Self::PrefixIter<'iter>, - ) -> Result)>, StorageError> { + ) -> Result)>, Error> { let read_result = unsafe { namada_vp_iter_next(iter.0) }; Ok(read_key_val_bytes_from_buffer( read_result, @@ -454,22 +447,22 @@ impl StorageRead for CtxPreStorageRead<'_> { )) } - fn get_chain_id(&self) -> Result { + fn get_chain_id(&self) -> Result { get_chain_id() } - fn get_block_height(&self) -> Result { + fn get_block_height(&self) -> Result { get_block_height() } fn get_block_header( &self, height: BlockHeight, - ) -> Result, StorageError> { + ) -> Result, Error> { get_block_header(height) } - fn get_block_epoch(&self) -> Result { + fn get_block_epoch(&self) -> Result { get_block_epoch() } @@ -477,11 +470,11 @@ impl StorageRead for CtxPreStorageRead<'_> { get_pred_epochs() } - fn get_tx_index(&self) -> Result { + fn get_tx_index(&self) -> Result { get_tx_index() } - fn get_native_token(&self) -> Result { + fn get_native_token(&self) -> Result { get_native_token() } } @@ -489,17 +482,14 @@ impl StorageRead for CtxPreStorageRead<'_> { impl StorageRead for CtxPostStorageRead<'_> { type PrefixIter<'iter> = KeyValIterator<(String, Vec)> where Self:'iter; - fn read_bytes( - &self, - key: &storage::Key, - ) -> Result>, StorageError> { + fn read_bytes(&self, key: &storage::Key) -> Result>, Error> { let key = key.to_string(); let read_result = unsafe { namada_vp_read_post(key.as_ptr() as _, key.len() as _) }; Ok(read_from_buffer(read_result, namada_vp_result_buffer)) } - fn has_key(&self, key: &storage::Key) -> Result { + fn has_key(&self, key: &storage::Key) -> Result { let key = key.to_string(); let found = unsafe { namada_vp_has_key_post(key.as_ptr() as _, key.len() as _) @@ -510,7 +500,7 @@ impl StorageRead for CtxPostStorageRead<'_> { fn iter_prefix<'iter>( &'iter self, prefix: &storage::Key, - ) -> Result, StorageError> { + ) -> Result, Error> { iter_prefix_post_impl(prefix) } @@ -519,7 +509,7 @@ impl StorageRead for CtxPostStorageRead<'_> { fn iter_next<'iter>( &'iter self, iter: &mut Self::PrefixIter<'iter>, - ) -> Result)>, StorageError> { + ) -> Result)>, Error> { let read_result = unsafe { namada_vp_iter_next(iter.0) }; Ok(read_key_val_bytes_from_buffer( read_result, @@ -527,22 +517,22 @@ impl StorageRead for CtxPostStorageRead<'_> { )) } - fn get_chain_id(&self) -> Result { + fn get_chain_id(&self) -> Result { get_chain_id() } - fn get_block_height(&self) -> Result { + fn get_block_height(&self) -> Result { get_block_height() } fn get_block_header( &self, height: BlockHeight, - ) -> Result, StorageError> { + ) -> Result, Error> { get_block_header(height) } - fn get_block_epoch(&self) -> Result { + fn get_block_epoch(&self) -> Result { get_block_epoch() } @@ -550,18 +540,18 @@ impl StorageRead for CtxPostStorageRead<'_> { get_pred_epochs() } - fn get_tx_index(&self) -> Result { + fn get_tx_index(&self) -> Result { get_tx_index() } - fn get_native_token(&self) -> Result { + fn get_native_token(&self) -> Result { get_native_token() } } fn iter_prefix_pre_impl( prefix: &storage::Key, -) -> Result)>, StorageError> { +) -> Result)>, Error> { let prefix = prefix.to_string(); let iter_id = unsafe { namada_vp_iter_prefix_pre(prefix.as_ptr() as _, prefix.len() as _) @@ -571,7 +561,7 @@ fn iter_prefix_pre_impl( fn iter_prefix_post_impl( prefix: &storage::Key, -) -> Result)>, StorageError> { +) -> Result)>, Error> { let prefix = prefix.to_string(); let iter_id = unsafe { namada_vp_iter_prefix_post(prefix.as_ptr() as _, prefix.len() as _) @@ -579,7 +569,7 @@ fn iter_prefix_post_impl( Ok(KeyValIterator(iter_id, PhantomData)) } -fn get_chain_id() -> Result { +fn get_chain_id() -> Result { let result = Vec::with_capacity(CHAIN_ID_LENGTH); unsafe { namada_vp_get_chain_id(result.as_ptr() as _); @@ -592,13 +582,11 @@ fn get_chain_id() -> Result { ) } -fn get_block_height() -> Result { +fn get_block_height() -> Result { Ok(BlockHeight(unsafe { namada_vp_get_block_height() })) } -fn get_block_header( - height: BlockHeight, -) -> Result, StorageError> { +fn get_block_header(height: BlockHeight) -> Result, Error> { let read_result = unsafe { namada_vp_get_block_header(height.0) }; match read_from_buffer(read_result, namada_vp_result_buffer) { Some(value) => Ok(Some( @@ -609,25 +597,25 @@ fn get_block_header( } } -fn get_block_epoch() -> Result { +fn get_block_epoch() -> Result { Ok(Epoch(unsafe { namada_vp_get_block_epoch() })) } -fn get_tx_index() -> Result { +fn get_tx_index() -> Result { Ok(TxIndex(unsafe { namada_vp_get_tx_index() })) } -fn get_pred_epochs() -> Result { +fn get_pred_epochs() -> Result { let read_result = unsafe { namada_vp_get_pred_epochs() }; let bytes = read_from_buffer(read_result, namada_vp_result_buffer).ok_or( - StorageError::SimpleMessage( + Error::SimpleMessage( "Missing result from `namada_vp_get_pred_epochs` call", ), )?; Ok(namada_core::decode(bytes).expect("Cannot decode pred epochs")) } -fn get_native_token() -> Result { +fn get_native_token() -> Result { let result = Vec::with_capacity(address::ADDRESS_LEN); unsafe { namada_vp_get_native_token(result.as_ptr() as _); From 494b533c4fffd54eb8e10bc9e2dd3c81767febc9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Thu, 22 Aug 2024 15:28:48 +0100 Subject: [PATCH 30/73] tx_prelude: update crate doc --- crates/tx_prelude/src/lib.rs | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/crates/tx_prelude/src/lib.rs b/crates/tx_prelude/src/lib.rs index 07e8289e5f..d7767e9892 100644 --- a/crates/tx_prelude/src/lib.rs +++ b/crates/tx_prelude/src/lib.rs @@ -1,5 +1,4 @@ -//! This crate contains library code for transaction WASM. Most of the code is -//! re-exported from the `namada_vm_env` crate. +//! This crate contains library code for transaction WASM. #![doc(html_favicon_url = "https://dev.namada.net/master/favicon.png")] #![doc(html_logo_url = "https://dev.namada.net/master/rustdoc-logo.png")] @@ -42,8 +41,8 @@ pub use namada_governance::storage as gov_storage; pub use namada_macros::transaction; pub use namada_parameters::storage as parameters_storage; pub use namada_state::{ - collections, iter_prefix, iter_prefix_bytes, OptionExt, ResultExt, - Error as Error, StorageRead, Result as Result, StorageWrite, + collections, iter_prefix, iter_prefix_bytes, Error, OptionExt, Result, + ResultExt, StorageRead, StorageWrite, }; use namada_token::MaspTransaction; pub use namada_tx::{action, data as transaction, BatchedTx, Section, Tx}; From 18e71d639985c0d67f4851ae97a32823fca14fc8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Fri, 23 Aug 2024 09:55:12 +0100 Subject: [PATCH 31/73] state: propagate migrations feature --- crates/state/Cargo.toml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/crates/state/Cargo.toml b/crates/state/Cargo.toml index c56b6285bf..bc327a7aaf 100644 --- a/crates/state/Cargo.toml +++ b/crates/state/Cargo.toml @@ -17,13 +17,15 @@ default = [] # for integration tests and test utilities testing = [ - "proptest", "namada_core/testing", "namada_merkle_tree/testing", "namada_storage/testing", + "proptest", ] migrations = [ "namada_migrations", + "namada_core/migrations", + "namada_storage/migrations", "linkme", ] benches = [] From 67c586a94d768912ee782e387e6f5027490c4d4e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Fri, 23 Aug 2024 09:55:37 +0100 Subject: [PATCH 32/73] sdk: remove unused dep --- Cargo.lock | 1 - crates/sdk/Cargo.toml | 1 - 2 files changed, 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 968bdb0240..c47e2aee9a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5225,7 +5225,6 @@ dependencies = [ "namada_proof_of_stake", "namada_state", "namada_storage", - "namada_test_utils", "namada_token", "namada_tx", "namada_vm", diff --git a/crates/sdk/Cargo.toml b/crates/sdk/Cargo.toml index f45a646956..ff5209547c 100644 --- a/crates/sdk/Cargo.toml +++ b/crates/sdk/Cargo.toml @@ -163,7 +163,6 @@ namada_proof_of_stake = { path = "../proof_of_stake", default-features = false, ] } namada_state = { path = "../state", features = ["testing"] } namada_storage = { path = "../storage", features = ["testing"] } -namada_test_utils = { path = "../test_utils" } namada_token = { path = "../token", features = ["testing"] } namada_tx = { path = "../tx", features = ["testing"]} namada_vm = { path = "../vm" } From 2e0c7a52f088009d05b17cb32eae9f9e5f762347 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Fri, 23 Aug 2024 10:16:57 +0100 Subject: [PATCH 33/73] tests: rm unused dep --- Cargo.lock | 1 - crates/tests/Cargo.toml | 1 - 2 files changed, 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c47e2aee9a..c7effe1ecb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5394,7 +5394,6 @@ dependencies = [ "namada_vp", "namada_vp_prelude", "once_cell", - "pretty_assertions", "proptest", "proptest-state-machine", "prost 0.12.3", diff --git a/crates/tests/Cargo.toml b/crates/tests/Cargo.toml index a58ee5aa81..263d1bc826 100644 --- a/crates/tests/Cargo.toml +++ b/crates/tests/Cargo.toml @@ -75,7 +75,6 @@ eyre.workspace = true flate2.workspace = true fs_extra.workspace = true once_cell.workspace = true -pretty_assertions.workspace = true proptest-state-machine.workspace = true rand.workspace = true tar.workspace = true From 323da6a98432669cfa071a17bed4d5773eb02be0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Fri, 23 Aug 2024 10:31:51 +0100 Subject: [PATCH 34/73] apps_lib: rm unused deps --- Cargo.lock | 4 ---- crates/apps_lib/Cargo.toml | 9 ++------- crates/apps_lib/src/lib.rs | 4 ---- 3 files changed, 2 insertions(+), 15 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c7effe1ecb..da7ceaf4b7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4663,7 +4663,6 @@ dependencies = [ name = "namada_apps_lib" version = "0.43.0" dependencies = [ - "assert_matches", "async-trait", "base64 0.13.1", "bit-set", @@ -4694,7 +4693,6 @@ dependencies = [ "namada_macros", "namada_migrations", "namada_sdk", - "namada_test_utils", "namada_vm", "pretty_assertions", "proptest", @@ -4710,11 +4708,9 @@ dependencies = [ "tempfile", "tendermint-config", "tendermint-rpc", - "test-log", "textwrap-macros", "thiserror", "tokio", - "tokio-test", "toml 0.5.11", "tracing", "tracing-appender", diff --git a/crates/apps_lib/Cargo.toml b/crates/apps_lib/Cargo.toml index fef6adbd53..c5b76cc2ea 100644 --- a/crates/apps_lib/Cargo.toml +++ b/crates/apps_lib/Cargo.toml @@ -18,8 +18,8 @@ mainnet = [ "namada_sdk/mainnet", ] # for integration tests and test utilities -testing = ["namada_test_utils", "lazy_static", "namada_sdk/testing"] -benches = ["namada_test_utils", "lazy_static", "namada_sdk/benches"] +testing = ["lazy_static", "namada_sdk/testing"] +benches = ["lazy_static", "namada_sdk/benches"] integration = [] migrations = [ "namada_migrations", @@ -35,7 +35,6 @@ namada_core = {path = "../core"} namada_macros = {path = "../macros"} namada_migrations = {path = "../migrations", optional = true} namada_sdk = {path = "../sdk", features = ["download-params", "multicore"]} -namada_test_utils = {path = "../test_utils", optional = true} namada_vm = {path = "../vm"} async-trait.workspace = true @@ -86,13 +85,9 @@ zeroize.workspace = true [dev-dependencies] namada_sdk = {path = "../sdk", features = ["testing"]} -namada_test_utils = {path = "../test_utils"} -assert_matches.workspace = true bit-set.workspace = true proptest.workspace = true -test-log.workspace = true -tokio-test.workspace = true lazy_static.workspace= true pretty_assertions.workspace = true diff --git a/crates/apps_lib/src/lib.rs b/crates/apps_lib/src/lib.rs index 29a18fd78b..12a7f7991e 100644 --- a/crates/apps_lib/src/lib.rs +++ b/crates/apps_lib/src/lib.rs @@ -21,10 +21,6 @@ pub mod logging; pub mod tendermint_node; pub mod wallet; pub mod wasm_loader; -// This is here only to include the std's docs in our docs. -// Taken from . -#[doc(inline)] -pub use std; pub use namada_sdk::*; From e39b356f5abfbd975af3876a35fdbce6af534611 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Fri, 23 Aug 2024 10:44:33 +0100 Subject: [PATCH 35/73] apps: rm unused deps --- Cargo.lock | 6 ------ crates/apps/Cargo.toml | 7 ------- 2 files changed, 13 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index da7ceaf4b7..37cb5f6d84 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4639,8 +4639,6 @@ dependencies = [ name = "namada_apps" version = "0.43.0" dependencies = [ - "assert_matches", - "bit-set", "clap_complete", "clap_complete_nushell", "color-eyre", @@ -4648,11 +4646,7 @@ dependencies = [ "git2", "namada_apps_lib", "namada_node", - "namada_test_utils", - "proptest", - "test-log", "tokio", - "tokio-test", "toml 0.5.11", "tracing", "tracing-subscriber", diff --git a/crates/apps/Cargo.toml b/crates/apps/Cargo.toml index 32c14690da..e62e841810 100644 --- a/crates/apps/Cargo.toml +++ b/crates/apps/Cargo.toml @@ -67,13 +67,6 @@ tracing.workspace = true winapi.workspace = true [dev-dependencies] -namada_test_utils = {path = "../test_utils"} - -assert_matches.workspace = true -bit-set.workspace = true -proptest.workspace = true -test-log.workspace = true -tokio-test.workspace = true [build-dependencies] git2.workspace = true From 9a9b7dab32d039969c87930734a1bebb6cdc8d38 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Fri, 23 Aug 2024 12:44:36 +0100 Subject: [PATCH 36/73] changelog: add #3670 --- .changelog/unreleased/improvements/3670-crate-refactors.md | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/unreleased/improvements/3670-crate-refactors.md diff --git a/.changelog/unreleased/improvements/3670-crate-refactors.md b/.changelog/unreleased/improvements/3670-crate-refactors.md new file mode 100644 index 0000000000..3d41c0a729 --- /dev/null +++ b/.changelog/unreleased/improvements/3670-crate-refactors.md @@ -0,0 +1,3 @@ +- Reorganized some types and modules and refactored crates re- + exports, error handling features and removed unused depdendencies. + ([\#3670](https://github.com/anoma/namada/pull/3670)) \ No newline at end of file From e9f7cb700ed84dccb667a621a5ad6f972637ef8c Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Fri, 23 Aug 2024 18:24:51 +0200 Subject: [PATCH 37/73] Propagates error from `is_proposal_accepted` instead of defaulting --- crates/governance/src/vp/mod.rs | 4 +--- crates/governance/src/vp/pgf.rs | 4 +--- crates/ibc/src/vp/mod.rs | 4 +--- crates/trans_token/src/vp.rs | 4 +--- 4 files changed, 4 insertions(+), 12 deletions(-) diff --git a/crates/governance/src/vp/mod.rs b/crates/governance/src/vp/mod.rs index 31865cc52b..1cadb0fc98 100644 --- a/crates/governance/src/vp/mod.rs +++ b/crates/governance/src/vp/mod.rs @@ -90,9 +90,7 @@ where if is_proposal_accepted( &self.ctx.pre(), tx_data.tx.data(tx_data.cmt).unwrap_or_default().as_ref(), - ) - .unwrap_or_default() - { + )? { return Ok(()); } diff --git a/crates/governance/src/vp/pgf.rs b/crates/governance/src/vp/pgf.rs index 0b473ef96a..f0ec7019ff 100644 --- a/crates/governance/src/vp/pgf.rs +++ b/crates/governance/src/vp/pgf.rs @@ -65,9 +65,7 @@ where .data(batched_tx.cmt) .unwrap_or_default() .as_ref(), - ) - .unwrap_or_default() - { + )? { return Ok(()); } diff --git a/crates/ibc/src/vp/mod.rs b/crates/ibc/src/vp/mod.rs index 45d8a38cfc..381fcbe6aa 100644 --- a/crates/ibc/src/vp/mod.rs +++ b/crates/ibc/src/vp/mod.rs @@ -157,9 +157,7 @@ where .data(batched_tx.cmt) .unwrap_or_default() .as_ref(), - ) - .unwrap_or_default() - { + )? { return Ok(()); } diff --git a/crates/trans_token/src/vp.rs b/crates/trans_token/src/vp.rs index af56a30823..a53d9bc5f7 100644 --- a/crates/trans_token/src/vp.rs +++ b/crates/trans_token/src/vp.rs @@ -77,9 +77,7 @@ where if Gov::is_proposal_accepted( &self.ctx.pre(), tx_data.tx.data(tx_data.cmt).unwrap_or_default().as_ref(), - ) - .unwrap_or_default() - { + )? { return Ok(()); } From 6db4ceb7eb605b0c58e7688554dba1e076787eb7 Mon Sep 17 00:00:00 2001 From: Marco Granelli Date: Fri, 23 Aug 2024 18:35:03 +0200 Subject: [PATCH 38/73] Changelog #3700 --- .changelog/unreleased/bug-fixes/3700-fix-error-mishandling.md | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/unreleased/bug-fixes/3700-fix-error-mishandling.md diff --git a/.changelog/unreleased/bug-fixes/3700-fix-error-mishandling.md b/.changelog/unreleased/bug-fixes/3700-fix-error-mishandling.md new file mode 100644 index 0000000000..e8bae11d07 --- /dev/null +++ b/.changelog/unreleased/bug-fixes/3700-fix-error-mishandling.md @@ -0,0 +1,3 @@ +- Now we propagate the error coming from + `is_proposal_accepted` instead of falling back on a default. + ([\#3700](https://github.com/anoma/namada/pull/3700)) \ No newline at end of file From 3421d01e66f692452520a0b8a132ded4d92e3c14 Mon Sep 17 00:00:00 2001 From: brentstone Date: Mon, 26 Aug 2024 21:04:32 -0700 Subject: [PATCH 39/73] change quorum to 40% for default proposals --- crates/core/src/dec.rs | 5 ++++ crates/governance/src/utils.rs | 42 +++++++++++++++++----------------- 2 files changed, 26 insertions(+), 21 deletions(-) diff --git a/crates/core/src/dec.rs b/crates/core/src/dec.rs index 35d7b80116..b14b5ce2ba 100644 --- a/crates/core/src/dec.rs +++ b/crates/core/src/dec.rs @@ -149,6 +149,11 @@ impl Dec { Self::two().checked_div(3).expect("Cannot fail") } + /// The representation of 2 / 5 + pub fn two_fifths() -> Self { + Dec::new(4, 1).expect("Cannot fail") + } + /// Create a new [`Dec`] using a mantissa and a scale. pub fn new(mantissa: i128, scale: u8) -> Option { if scale > POS_DECIMAL_PRECISION { diff --git a/crates/governance/src/utils.rs b/crates/governance/src/utils.rs index d93ec8b23d..65c39e87b4 100644 --- a/crates/governance/src/utils.rs +++ b/crates/governance/src/utils.rs @@ -71,7 +71,7 @@ impl Vote { pub enum TallyType { /// The `yay` votes are at least 2/3 of the non-abstain votes, and 2/3 of /// the total voting power has voted - TwoThirds, + TwoFifths, /// There are more `yay` votes than `nay` votes, and at least 1/3 of the /// total voting power has voted OneHalfOverOneThird, @@ -84,8 +84,8 @@ impl TallyType { /// The type of tally used for each proposal type pub fn from(proposal_type: ProposalType, is_steward: bool) -> Self { match (proposal_type, is_steward) { - (ProposalType::Default, _) => TallyType::TwoThirds, - (ProposalType::DefaultWithWasm(_), _) => TallyType::TwoThirds, + (ProposalType::Default, _) => TallyType::TwoFifths, + (ProposalType::DefaultWithWasm(_), _) => TallyType::TwoFifths, (ProposalType::PGFSteward(_), _) => TallyType::OneHalfOverOneThird, (ProposalType::PGFPayment(_), true) => { TallyType::LessOneHalfOverOneThirdNay @@ -142,19 +142,19 @@ impl TallyResult { total_voting_power: VotePower, ) -> Result { let passed = match tally_type { - TallyType::TwoThirds => { - let at_least_two_third_voted = Self::get_total_voted_power( + TallyType::TwoFifths => { + let at_least_two_fifths_voted = Self::get_total_voted_power( yay_voting_power, nay_voting_power, abstain_voting_power, )? >= total_voting_power - .mul_ceil(Dec::two_thirds())?; + .mul_ceil(Dec::two_fifths())?; // yay >= 2/3 * (yay + nay) ---> yay >= 2 * nay let at_least_two_third_voted_yay = yay_voting_power >= checked!(nay_voting_power + nay_voting_power)?; - at_least_two_third_voted && at_least_two_third_voted_yay + at_least_two_fifths_voted && at_least_two_third_voted_yay } TallyType::OneHalfOverOneThird => { let at_least_one_third_voted = Self::get_total_voted_power( @@ -246,7 +246,7 @@ impl ProposalResult { impl Display for ProposalResult { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let threshold = match self.tally_type { - TallyType::TwoThirds => { + TallyType::TwoFifths => { self.total_voting_power.mul_ceil(Dec::two_thirds()) } TallyType::LessOneHalfOverOneThirdNay => Ok(token::Amount::zero()), @@ -462,7 +462,7 @@ mod test { for tally_type in [ TallyType::OneHalfOverOneThird, TallyType::LessOneHalfOverOneThirdNay, - TallyType::TwoThirds, + TallyType::TwoFifths, ] { let proposal_result = compute_proposal_result( proposal_votes.clone(), @@ -500,7 +500,7 @@ mod test { for tally_type in [ TallyType::OneHalfOverOneThird, TallyType::LessOneHalfOverOneThirdNay, - TallyType::TwoThirds, + TallyType::TwoFifths, ] { let proposal_result = compute_proposal_result( proposal_votes.clone(), @@ -549,7 +549,7 @@ mod test { for tally_type in [ TallyType::OneHalfOverOneThird, TallyType::LessOneHalfOverOneThirdNay, - TallyType::TwoThirds, + TallyType::TwoFifths, ] { let proposal_result = compute_proposal_result( proposal_votes.clone(), @@ -598,7 +598,7 @@ mod test { for tally_type in [ TallyType::OneHalfOverOneThird, TallyType::LessOneHalfOverOneThirdNay, - TallyType::TwoThirds, + TallyType::TwoFifths, ] { let proposal_result = compute_proposal_result( proposal_votes.clone(), @@ -659,7 +659,7 @@ mod test { for tally_type in [ TallyType::OneHalfOverOneThird, TallyType::LessOneHalfOverOneThirdNay, - TallyType::TwoThirds, + TallyType::TwoFifths, ] { let proposal_result = compute_proposal_result( proposal_votes.clone(), @@ -727,7 +727,7 @@ mod test { for tally_type in [ TallyType::OneHalfOverOneThird, TallyType::LessOneHalfOverOneThirdNay, - TallyType::TwoThirds, + TallyType::TwoFifths, ] { let proposal_result = compute_proposal_result( proposal_votes.clone(), @@ -786,7 +786,7 @@ mod test { for tally_type in [ TallyType::OneHalfOverOneThird, TallyType::LessOneHalfOverOneThirdNay, - TallyType::TwoThirds, + TallyType::TwoFifths, ] { let proposal_result = compute_proposal_result( proposal_votes.clone(), @@ -843,7 +843,7 @@ mod test { for tally_type in [ TallyType::OneHalfOverOneThird, TallyType::LessOneHalfOverOneThirdNay, - TallyType::TwoThirds, + TallyType::TwoFifths, ] { let proposal_result = compute_proposal_result( proposal_votes.clone(), @@ -916,7 +916,7 @@ mod test { for tally_type in [ TallyType::OneHalfOverOneThird, TallyType::LessOneHalfOverOneThirdNay, - TallyType::TwoThirds, + TallyType::TwoFifths, ] { let proposal_result = compute_proposal_result( proposal_votes.clone(), @@ -989,7 +989,7 @@ mod test { let proposal_result = compute_proposal_result( proposal_votes.clone(), validator_voting_power.add(validator_voting_power_two), - TallyType::TwoThirds, + TallyType::TwoFifths, ) .unwrap(); @@ -1058,7 +1058,7 @@ mod test { let proposal_result = compute_proposal_result( proposal_votes.clone(), validator_voting_power.add(validator_voting_power_two), - TallyType::TwoThirds, + TallyType::TwoFifths, ) .unwrap(); @@ -1114,7 +1114,7 @@ mod test { let proposal_result = compute_proposal_result( proposal_votes.clone(), delegator_voting_power_two.add(delegator_voting_power), - TallyType::TwoThirds, + TallyType::TwoFifths, ) .unwrap(); @@ -1167,7 +1167,7 @@ mod test { let proposal_result = compute_proposal_result( proposal_votes.clone(), token::Amount::from(200), - TallyType::TwoThirds, + TallyType::TwoFifths, ) .unwrap(); From d52f49a42ec7e13b34176e3b1a99240950f77f88 Mon Sep 17 00:00:00 2001 From: brentstone Date: Mon, 26 Aug 2024 22:26:20 -0700 Subject: [PATCH 40/73] changelog: add #3703 --- .../unreleased/improvements/3703-change-default-gov-quorum.md | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 .changelog/unreleased/improvements/3703-change-default-gov-quorum.md diff --git a/.changelog/unreleased/improvements/3703-change-default-gov-quorum.md b/.changelog/unreleased/improvements/3703-change-default-gov-quorum.md new file mode 100644 index 0000000000..b29f7bde13 --- /dev/null +++ b/.changelog/unreleased/improvements/3703-change-default-gov-quorum.md @@ -0,0 +1,2 @@ +- Change the quorum for voting on a default governance proposal from 2/3 to 40%. + ([\#3703](https://github.com/anoma/namada/pull/3703)) \ No newline at end of file From 7f154f49964f4f361e98eaa624a3bebc89d73ce1 Mon Sep 17 00:00:00 2001 From: Kofi Otuo Date: Thu, 11 Jan 2024 02:04:46 +0000 Subject: [PATCH 41/73] Address issue here -> https://github.com/anoma/namada/issues/2151 --- crates/apps_lib/src/client/utils.rs | 21 ++++++++++++--------- crates/apps_lib/src/wallet/mod.rs | 12 ++++++------ 2 files changed, 18 insertions(+), 15 deletions(-) diff --git a/crates/apps_lib/src/client/utils.rs b/crates/apps_lib/src/client/utils.rs index d4b66eae7d..ca8b3559ad 100644 --- a/crates/apps_lib/src/client/utils.rs +++ b/crates/apps_lib/src/client/utils.rs @@ -17,7 +17,7 @@ use namada_sdk::key::*; use namada_sdk::string_encoding::StringEncoded; use namada_sdk::token; use namada_sdk::uint::Uint; -use namada_sdk::wallet::{alias, Wallet}; +use namada_sdk::wallet::{alias, LoadStoreError, Wallet}; use namada_vm::validate_untrusted_wasm; use prost::bytes::Bytes; use serde_json::json; @@ -197,12 +197,12 @@ pub async fn join_network( // Try to load pre-genesis wallet, if any let pre_genesis_wallet_path = base_dir.join(PRE_GENESIS_DIR); let pre_genesis_wallet = - if let Some(wallet) = crate::wallet::load(&pre_genesis_wallet_path) { + if let Ok(wallet) = crate::wallet::load(&pre_genesis_wallet_path) { Some(wallet) } else { validator_alias_and_dir .as_ref() - .and_then(|(_, path)| crate::wallet::load(path)) + .and_then(|(_, path)| crate::wallet::load(path).ok()) }; // Derive wallet from genesis @@ -469,6 +469,7 @@ pub fn derive_genesis_addresses( ) { let maybe_pre_genesis_wallet = try_load_pre_genesis_wallet(&global_args.base_dir) + .ok() .map(|(wallet, _)| wallet); let contents = fs::read_to_string(&args.genesis_txs_path).unwrap_or_else(|err| { @@ -792,7 +793,7 @@ pub fn init_genesis_validator( /// if it cannot be found. pub fn try_load_pre_genesis_wallet( base_dir: &Path, -) -> Option<(Wallet, PathBuf)> { +) -> Result<(Wallet, PathBuf), LoadStoreError> { let pre_genesis_dir = base_dir.join(PRE_GENESIS_DIR); crate::wallet::load(&pre_genesis_dir).map(|wallet| { @@ -805,12 +806,14 @@ pub fn try_load_pre_genesis_wallet( pub fn load_pre_genesis_wallet_or_exit( base_dir: &Path, ) -> (Wallet, PathBuf) { - try_load_pre_genesis_wallet(base_dir).unwrap_or_else(|| { - eprintln!("No pre-genesis wallet found.",); - safe_exit(1) - }) + match try_load_pre_genesis_wallet(base_dir) { + Ok(wallet) => wallet, + Err(e) => { + eprintln!("Error loading the wallet:\n {}", e.to_string()); + safe_exit(1) + } + } } - async fn download_file(url: impl AsRef) -> reqwest::Result { let url = url.as_ref(); let response = reqwest::get(url).await?; diff --git a/crates/apps_lib/src/wallet/mod.rs b/crates/apps_lib/src/wallet/mod.rs index 56b66d5879..e39ffd47af 100644 --- a/crates/apps_lib/src/wallet/mod.rs +++ b/crates/apps_lib/src/wallet/mod.rs @@ -14,7 +14,7 @@ pub use namada_sdk::wallet::alias::Alias; use namada_sdk::wallet::fs::FsWalletStorage; use namada_sdk::wallet::store::Store; use namada_sdk::wallet::{ - ConfirmationResponse, FindKeyError, Wallet, WalletIo, + ConfirmationResponse, FindKeyError, LoadStoreError, Wallet, WalletIo, }; pub use namada_sdk::wallet::{ValidatorData, ValidatorKeys}; use rand_core::OsRng; @@ -259,12 +259,12 @@ pub fn save(wallet: &Wallet) -> std::io::Result<()> { } /// Load a wallet from the store file. -pub fn load(store_dir: &Path) -> Option> { +pub fn load( + store_dir: &Path, +) -> Result, LoadStoreError> { let mut wallet = CliWalletUtils::new(store_dir.to_path_buf()); - if wallet.load().is_err() { - return None; - } - Some(wallet) + wallet.load()?; + Ok(wallet) } /// Load a wallet from the store file or create a new wallet without any From 28d2de24c37b19a9a9351c8e9780ccb64059421a Mon Sep 17 00:00:00 2001 From: Kofi Otuo Date: Thu, 11 Jan 2024 02:26:35 +0000 Subject: [PATCH 42/73] add changelog in .changelog directory --- .changelog/unreleased/bug-fixes/2151-main.md | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 .changelog/unreleased/bug-fixes/2151-main.md diff --git a/.changelog/unreleased/bug-fixes/2151-main.md b/.changelog/unreleased/bug-fixes/2151-main.md new file mode 100644 index 0000000000..a44297dc1b --- /dev/null +++ b/.changelog/unreleased/bug-fixes/2151-main.md @@ -0,0 +1,2 @@ +- Improve Clarity of Wallet Store Decoding Error Message + ([\#2151](https://github.com/anoma/namada/issues/2151)) \ No newline at end of file From 1156aa048c068aacd349c03ed586dbe5b4e43566 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Tue, 27 Aug 2024 12:24:26 +0100 Subject: [PATCH 43/73] fixup! Address issue here -> https://github.com/anoma/namada/issues/2151 --- crates/apps_lib/src/client/utils.rs | 2 +- crates/apps_lib/src/wallet/store.rs | 13 +++++++++---- crates/sdk/src/wallet/mod.rs | 10 ++++++++++ crates/tests/src/integration/setup.rs | 7 ++++--- 4 files changed, 24 insertions(+), 8 deletions(-) diff --git a/crates/apps_lib/src/client/utils.rs b/crates/apps_lib/src/client/utils.rs index ca8b3559ad..a43533afcf 100644 --- a/crates/apps_lib/src/client/utils.rs +++ b/crates/apps_lib/src/client/utils.rs @@ -809,7 +809,7 @@ pub fn load_pre_genesis_wallet_or_exit( match try_load_pre_genesis_wallet(base_dir) { Ok(wallet) => wallet, Err(e) => { - eprintln!("Error loading the wallet:\n {}", e.to_string()); + eprintln!("Error loading the wallet: {e}"); safe_exit(1) } } diff --git a/crates/apps_lib/src/wallet/store.rs b/crates/apps_lib/src/wallet/store.rs index 15cce41b6f..d6f58a9a72 100644 --- a/crates/apps_lib/src/wallet/store.rs +++ b/crates/apps_lib/src/wallet/store.rs @@ -18,10 +18,15 @@ pub fn wallet_file(store_dir: impl AsRef) -> PathBuf { /// Load the store file or create a new one without any keys or addresses. pub fn load_or_new(store_dir: &Path) -> Result { - load(store_dir).or_else(|_| { - let wallet = CliWalletUtils::new(store_dir.to_path_buf()); - wallet.save()?; - Ok(wallet.into()) + load(store_dir).or_else(|err| { + // Only create a new file if not found, otherwise propagate the err + if let LoadStoreError::NotFound { .. } = &err { + let wallet = CliWalletUtils::new(store_dir.to_path_buf()); + wallet.save()?; + Ok(wallet.into()) + } else { + Err(err) + } }) } diff --git a/crates/sdk/src/wallet/mod.rs b/crates/sdk/src/wallet/mod.rs index e3d1c3f5a1..26ddc93cb7 100644 --- a/crates/sdk/src/wallet/mod.rs +++ b/crates/sdk/src/wallet/mod.rs @@ -97,8 +97,12 @@ pub trait WalletIo: Sized + Clone { } /// Errors of wallet loading and storing +#[allow(missing_docs)] #[derive(Error, Debug)] pub enum LoadStoreError { + /// Wallet store file not found + #[error("No wallet store file found at \"{path}\"")] + NotFound { path: String }, /// Wallet store decoding error #[error("Failed decoding the wallet store: {0}")] Decode(toml::de::Error), @@ -169,6 +173,12 @@ pub mod fs { wallet: &mut Wallet, ) -> Result<(), LoadStoreError> { let wallet_file = self.store_dir().join(FILE_NAME); + if !wallet_file.exists() { + return Err(LoadStoreError::NotFound { + path: wallet_file.to_string_lossy().to_string(), + }); + } + let mut options = fs::OpenOptions::new(); options.read(true).write(false); let lock = diff --git a/crates/tests/src/integration/setup.rs b/crates/tests/src/integration/setup.rs index 4d557d747a..524df788bf 100644 --- a/crates/tests/src/integration/setup.rs +++ b/crates/tests/src/integration/setup.rs @@ -163,15 +163,16 @@ fn finalize_wallet( ) }); - // Try to load pre-genesis wallet - let pre_genesis_wallet = namada_apps_lib::wallet::load(&pre_genesis_path); + // Load pre-genesis wallet + let pre_genesis_wallet = + namada_apps_lib::wallet::load(&pre_genesis_path).unwrap(); let chain_dir = global_args .base_dir .join(global_args.chain_id.as_ref().unwrap().as_str()); // Derive wallet from genesis let wallet = genesis.derive_wallet( &chain_dir, - pre_genesis_wallet, + Some(pre_genesis_wallet), validator_alias_and_pre_genesis_wallet, ); namada_apps_lib::wallet::save(&wallet).unwrap(); From 753657470fb7e113aa29de9c938f85b94e83a824 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Tue, 27 Aug 2024 12:26:02 +0100 Subject: [PATCH 44/73] fixup! add changelog in .changelog directory --- .../bug-fixes/{2151-main.md => 3705-wallet-decoding-err.md} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename .changelog/unreleased/bug-fixes/{2151-main.md => 3705-wallet-decoding-err.md} (100%) diff --git a/.changelog/unreleased/bug-fixes/2151-main.md b/.changelog/unreleased/bug-fixes/3705-wallet-decoding-err.md similarity index 100% rename from .changelog/unreleased/bug-fixes/2151-main.md rename to .changelog/unreleased/bug-fixes/3705-wallet-decoding-err.md From 4051cfc57707446d944cb503df96b67fe570de3b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Tue, 27 Aug 2024 12:26:47 +0100 Subject: [PATCH 45/73] fixup! add changelog in .changelog directory --- .changelog/unreleased/bug-fixes/3705-wallet-decoding-err.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.changelog/unreleased/bug-fixes/3705-wallet-decoding-err.md b/.changelog/unreleased/bug-fixes/3705-wallet-decoding-err.md index a44297dc1b..bc223e4e0a 100644 --- a/.changelog/unreleased/bug-fixes/3705-wallet-decoding-err.md +++ b/.changelog/unreleased/bug-fixes/3705-wallet-decoding-err.md @@ -1,2 +1,2 @@ -- Improve Clarity of Wallet Store Decoding Error Message +- Handle errors when loading wallet file and only create a new one if not found. ([\#2151](https://github.com/anoma/namada/issues/2151)) \ No newline at end of file From 130f149cbfaae8880c18e7c50fc21a5e0d8c1b56 Mon Sep 17 00:00:00 2001 From: Tomas Zemanovic Date: Tue, 27 Aug 2024 15:43:09 +0200 Subject: [PATCH 46/73] Apply suggestions from code review Co-authored-by: Tiago Carvalho --- crates/core/src/chain.rs | 5 +++-- crates/node/src/protocol.rs | 3 +-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/crates/core/src/chain.rs b/crates/core/src/chain.rs index 419089d14d..67ae6aa3c2 100644 --- a/crates/core/src/chain.rs +++ b/crates/core/src/chain.rs @@ -18,7 +18,7 @@ use crate::bytes::ByteBuf; use crate::hash::Hash; use crate::time::DateTimeUtc; -/// The length of the block's hash string +/// The length of the block hash pub const BLOCK_HASH_LENGTH: usize = 32; /// The length of the block height pub const BLOCK_HEIGHT_LENGTH: usize = 8; @@ -218,6 +218,7 @@ impl TryFrom for BlockHeight { .map_err(|e| format!("Unexpected height value {}, {}", value, e)) } } + impl BlockHeight { /// The first block height 1. pub const fn first() -> Self { @@ -422,7 +423,7 @@ impl Epoch { /// `Epoch(0)` if overflow occurred. #[must_use = "this returns the result of the operation, without modifying \ the original"] - pub fn sub_or_default(self, rhs: Epoch) -> Self { + pub fn saturating_sub(self, rhs: Epoch) -> Self { self.checked_sub(rhs).unwrap_or_default() } } diff --git a/crates/node/src/protocol.rs b/crates/node/src/protocol.rs index 4012d77506..be8c2e3d17 100644 --- a/crates/node/src/protocol.rs +++ b/crates/node/src/protocol.rs @@ -1339,8 +1339,7 @@ where ), InternalAddress::ReplayProtection => Err( // Replay protection entries should never be - // written to - // via transactions + // written to via transactions Error::AccessForbidden( (*internal_addr).clone(), ), From c71daae2ae1f343a81d1e91ccfaa1a826afa64b5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Tue, 27 Aug 2024 14:52:29 +0100 Subject: [PATCH 47/73] fixup! Apply suggestions from code review --- crates/core/src/chain.rs | 27 +++++++++++-------- crates/proof_of_stake/src/parameters.rs | 2 +- .../src/tests/test_validator.rs | 4 +-- crates/state/src/lib.rs | 13 +++++---- 4 files changed, 25 insertions(+), 21 deletions(-) diff --git a/crates/core/src/chain.rs b/crates/core/src/chain.rs index 67ae6aa3c2..16b0f2bd08 100644 --- a/crates/core/src/chain.rs +++ b/crates/core/src/chain.rs @@ -13,7 +13,6 @@ use serde::{Deserialize, Serialize}; use sha2::{Digest, Sha256}; use thiserror::Error; -use crate::borsh::BorshSerializeExt; use crate::bytes::ByteBuf; use crate::hash::Hash; use crate::time::DateTimeUtc; @@ -286,14 +285,6 @@ pub enum ParseBlockHashError { ParseBlockHash(String), } -impl TryFrom> for BlockHash { - type Error = ParseBlockHashError; - - fn try_from(value: Vec) -> Result { - value.as_slice().try_into() - } -} - impl core::fmt::Debug for BlockHash { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let hash = format!("{}", ByteBuf(&self.0)); @@ -535,8 +526,9 @@ pub struct BlockHeader { impl BlockHeader { /// The number of bytes when this header is encoded - pub fn encoded_len(&self) -> usize { - self.serialize_to_vec().len() + pub const fn encoded_len() -> usize { + // checked in `test_block_header_encoded_len` + 103 } } @@ -749,6 +741,7 @@ mod tests { use proptest::prelude::*; use super::*; + use crate::borsh::BorshSerializeExt; proptest! { /// Test any chain ID that is generated via `from_genesis` function is valid. @@ -933,4 +926,16 @@ mod tests { ); } } + + #[test] + fn test_block_header_encoded_len() { + #[allow(clippy::disallowed_methods)] + let header = BlockHeader { + hash: Hash::zero(), + time: DateTimeUtc::now(), + next_validators_hash: Hash::zero(), + }; + let len = header.serialize_to_vec().len(); + assert_eq!(len, BlockHeader::encoded_len()) + } } diff --git a/crates/proof_of_stake/src/parameters.rs b/crates/proof_of_stake/src/parameters.rs index 776cb6b159..6ed155a3ca 100644 --- a/crates/proof_of_stake/src/parameters.rs +++ b/crates/proof_of_stake/src/parameters.rs @@ -232,7 +232,7 @@ impl OwnedPosParams { infraction_epoch: Epoch, ) -> (Epoch, Epoch) { let start = infraction_epoch - .sub_or_default(Epoch(self.cubic_slashing_window_length)); + .saturating_sub(Epoch(self.cubic_slashing_window_length)); let end = infraction_epoch.unchecked_add(self.cubic_slashing_window_length); (start, end) diff --git a/crates/proof_of_stake/src/tests/test_validator.rs b/crates/proof_of_stake/src/tests/test_validator.rs index e9fc6d987f..9103f8acae 100644 --- a/crates/proof_of_stake/src/tests/test_validator.rs +++ b/crates/proof_of_stake/src/tests/test_validator.rs @@ -967,8 +967,8 @@ fn test_validator_sets() { for e in Epoch::iter_bounds_inclusive( start_epoch, last_epoch - .sub_or_default(Epoch(DEFAULT_NUM_PAST_EPOCHS)) - .sub_or_default(Epoch(1)), + .saturating_sub(Epoch(DEFAULT_NUM_PAST_EPOCHS)) + .saturating_sub(Epoch(1)), ) { assert!( !consensus_validator_set_handle() diff --git a/crates/state/src/lib.rs b/crates/state/src/lib.rs index 6ddce56da9..70de19f402 100644 --- a/crates/state/src/lib.rs +++ b/crates/state/src/lib.rs @@ -166,18 +166,17 @@ pub trait StateRead: StorageRead + Debug { match height { Some(h) if h == self.in_mem().get_block_height().0 => { let header = self.in_mem().header.clone(); - let gas = match header { - Some(ref header) => { - let len = header.encoded_len() as u64; - checked!(len * MEMORY_ACCESS_GAS_PER_BYTE)? - } - None => MEMORY_ACCESS_GAS_PER_BYTE, + let gas = if header.is_some() { + let len = BlockHeader::encoded_len() as u64; + checked!(len * MEMORY_ACCESS_GAS_PER_BYTE)? + } else { + MEMORY_ACCESS_GAS_PER_BYTE }; Ok((header, gas)) } Some(h) => match self.db().read_block_header(h)? { Some(header) => { - let len = header.encoded_len() as u64; + let len = BlockHeader::encoded_len() as u64; let gas = checked!(len * STORAGE_ACCESS_GAS_PER_BYTE)?; Ok((Some(header), gas)) } From 50b42a7be21aabf310fccecc382334fc4138e9c9 Mon Sep 17 00:00:00 2001 From: Tiago Carvalho Date: Tue, 27 Aug 2024 15:04:19 +0100 Subject: [PATCH 48/73] Make set non-generic --- crates/token/src/lib.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/token/src/lib.rs b/crates/token/src/lib.rs index 6ba04fbe7e..125ddb358d 100644 --- a/crates/token/src/lib.rs +++ b/crates/token/src/lib.rs @@ -184,9 +184,9 @@ impl Transfer { } /// Set the key to the given amount - fn set( - map: &mut BTreeMap, - key: K, + fn set( + map: &mut BTreeMap, + key: Account, val: DenominatedAmount, ) { if val.is_zero() { From 9cbb3aa2d8006ca8a06da8bc3f5cc7e1aa384ba6 Mon Sep 17 00:00:00 2001 From: Tiago Carvalho Date: Tue, 27 Aug 2024 15:04:35 +0100 Subject: [PATCH 49/73] Add test coverage on token transfer struct --- crates/token/src/lib.rs | 130 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 130 insertions(+) diff --git a/crates/token/src/lib.rs b/crates/token/src/lib.rs index 125ddb358d..52c5da22f5 100644 --- a/crates/token/src/lib.rs +++ b/crates/token/src/lib.rs @@ -317,3 +317,133 @@ pub mod testing { }) } } + +#[cfg(test)] +mod test_token_transfer_actions { + use namada_core::address::testing::{established_address_1, nam}; + + use super::*; + + #[test] + fn test_set_to_zero() { + let account = Account { + owner: established_address_1(), + token: nam(), + }; + + let mut transfer = Transfer::default(); + + let zero = Amount::zero().native_denominated(); + Transfer::set(&mut transfer.sources, account.clone(), zero); + assert_eq!(transfer, Transfer::default()); + + let one = Amount::from(1).native_denominated(); + Transfer::set(&mut transfer.sources, account.clone(), one); + assert_eq!( + transfer, + Transfer { + sources: BTreeMap::from([(account, one)]), + ..Transfer::default() + } + ); + } + + #[test] + fn test_debit_credit() { + // test debit + test_debit_credit_aux( + Transfer::debit, + Transfer::credit, + |sources| Transfer { + sources, + ..Transfer::default() + }, + |targets| Transfer { + targets, + ..Transfer::default() + }, + ); + + // test credit + test_debit_credit_aux( + Transfer::credit, + Transfer::debit, + |targets| Transfer { + targets, + ..Transfer::default() + }, + |sources| Transfer { + sources, + ..Transfer::default() + }, + ); + } + + fn test_debit_credit_aux( + op1: fn( + Transfer, + Address, + Address, + DenominatedAmount, + ) -> Option, + op2: fn( + Transfer, + Address, + Address, + DenominatedAmount, + ) -> Option, + transfer1: fn(BTreeMap) -> Transfer, + transfer2: fn(BTreeMap) -> Transfer, + ) { + let account = Account { + owner: established_address_1(), + token: nam(), + }; + + let amount_100 = Amount::native_whole(100).native_denominated(); + let amount_90 = Amount::native_whole(90).native_denominated(); + let amount_80 = Amount::native_whole(80).native_denominated(); + let amount_10 = Amount::native_whole(10).native_denominated(); + + let transfer = Transfer::default(); + + let transfer = op1( + transfer, + account.owner.clone(), + account.token.clone(), + amount_10, + ) + .unwrap(); + + assert_eq!( + transfer, + transfer1(BTreeMap::from([(account.clone(), amount_10)])), + ); + + let transfer = op2( + transfer, + account.owner.clone(), + account.token.clone(), + amount_100, + ) + .unwrap(); + + assert_eq!( + transfer, + transfer2(BTreeMap::from([(account.clone(), amount_90)])), + ); + + let transfer = op1( + transfer, + account.owner.clone(), + account.token.clone(), + amount_10, + ) + .unwrap(); + + assert_eq!( + transfer, + transfer2(BTreeMap::from([(account.clone(), amount_80)])), + ); + } +} From d996e910dfb6ade15ff6f231db26a3720d17979e Mon Sep 17 00:00:00 2001 From: satan Date: Tue, 20 Aug 2024 11:28:46 +0200 Subject: [PATCH 50/73] Reimplementing apply snapshot logic --- crates/merkle_tree/src/lib.rs | 2 +- crates/node/src/lib.rs | 8 +- crates/node/src/shell/mod.rs | 13 + crates/node/src/shell/snapshots.rs | 204 +++++++++++++- crates/node/src/shell/testing/node.rs | 7 +- crates/node/src/storage/mod.rs | 4 +- crates/node/src/storage/rocksdb.rs | 281 ++++++++++++++++--- crates/state/src/wl_state.rs | 2 +- crates/tests/src/integration/ledger_tests.rs | 229 +++++++++++++++ 9 files changed, 705 insertions(+), 45 deletions(-) diff --git a/crates/merkle_tree/src/lib.rs b/crates/merkle_tree/src/lib.rs index 6ddb1305fd..a499c5d8c8 100644 --- a/crates/merkle_tree/src/lib.rs +++ b/crates/merkle_tree/src/lib.rs @@ -804,7 +804,7 @@ impl MerkleTree { } /// The root hash of the merkle tree as bytes -#[derive(PartialEq)] +#[derive(Debug, PartialEq)] pub struct MerkleRoot(pub [u8; 32]); impl From for MerkleRoot { diff --git a/crates/node/src/lib.rs b/crates/node/src/lib.rs index 0da59b88e8..0f95ff1154 100644 --- a/crates/node/src/lib.rs +++ b/crates/node/src/lib.rs @@ -184,14 +184,14 @@ impl Shell { Request::ListSnapshots => { self.list_snapshots().map(Response::ListSnapshots) } - Request::OfferSnapshot(_) => { - Ok(Response::OfferSnapshot(Default::default())) + Request::OfferSnapshot(req) => { + Ok(Response::OfferSnapshot(self.offer_snapshot(req))) } Request::LoadSnapshotChunk(req) => self .load_snapshot_chunk(req) .map(Response::LoadSnapshotChunk), - Request::ApplySnapshotChunk(_) => { - Ok(Response::ApplySnapshotChunk(Default::default())) + Request::ApplySnapshotChunk(req) => { + Ok(Response::ApplySnapshotChunk(self.apply_snapshot_chunk(req))) } } } diff --git a/crates/node/src/shell/mod.rs b/crates/node/src/shell/mod.rs index 9617e1cb66..6ab6d292b8 100644 --- a/crates/node/src/shell/mod.rs +++ b/crates/node/src/shell/mod.rs @@ -45,6 +45,7 @@ use namada_sdk::eth_bridge::{EthBridgeQueries, EthereumOracleConfig}; use namada_sdk::ethereum_events::EthereumEvent; use namada_sdk::events::log::EventLog; use namada_sdk::gas::{Gas, TxGasMeter}; +use namada_sdk::hash::Hash; use namada_sdk::key::*; use namada_sdk::migrations::ScheduledMigration; use namada_sdk::parameters::{get_gas_scale, validate_tx_bytes}; @@ -341,6 +342,14 @@ pub enum MempoolTxType { RecheckTransaction, } +#[derive(Debug)] +pub struct SnapshotSync { + pub next_chunk: u64, + pub height: BlockHeight, + pub expected: Vec, + pub strikes: u64, +} + #[derive(Debug)] pub struct Shell where @@ -373,6 +382,9 @@ where /// When set, indicates after how many blocks a new snapshot /// will be taken (counting from the first block) pub blocks_between_snapshots: Option, + /// Data for a node downloading and apply snapshots as part of + /// the fast sync protocol. + pub syncing: Option, } /// Storage key filter to store the diffs into the storage. Return `false` for @@ -608,6 +620,7 @@ where event_log: EventLog::default(), scheduled_migration, blocks_between_snapshots: config.shell.blocks_between_snapshots, + syncing: None, }; shell.update_eth_oracle(&Default::default()); shell diff --git a/crates/node/src/shell/snapshots.rs b/crates/node/src/shell/snapshots.rs index 4128bf20f5..7a40d8b2ef 100644 --- a/crates/node/src/shell/snapshots.rs +++ b/crates/node/src/shell/snapshots.rs @@ -1,8 +1,11 @@ +use borsh::BorshDeserialize; use borsh_ext::BorshSerializeExt; +use namada_sdk::arith::checked; use namada_sdk::hash::{Hash, Sha256Hasher}; -use namada_sdk::state::BlockHeight; +use namada_sdk::state::{BlockHeight, StorageRead, DB}; -use super::{Error, ShellResult}; +use super::{Error, ShellResult, SnapshotSync}; +use crate::facade::tendermint::abci::response::ApplySnapshotChunkResult; use crate::facade::tendermint::abci::types::Snapshot; use crate::facade::tendermint::v0_37::abci::{ request as tm_request, response as tm_response, @@ -11,6 +14,8 @@ use crate::shell::Shell; use crate::storage; use crate::storage::{DbSnapshot, SnapshotMetadata}; +pub const MAX_SENDER_STRIKES: u64 = 5; + impl Shell { /// List the snapshot files held locally. Furthermore, the number /// of chunks, as hash of each chunk, and a hash of the chunk @@ -20,18 +25,21 @@ impl Shell { if self.blocks_between_snapshots.is_none() { Ok(Default::default()) } else { + tracing::info!("Request for snapshots received."); let snapshots = DbSnapshot::files(&self.base_dir) .map_err(Error::Snapshot)? .into_iter() .map(|SnapshotMetadata { height, chunks, .. }| { - let hash = Hash::sha256(chunks.serialize_to_vec()).0; + let hashes = + chunks.iter().map(|c| c.hash).collect::>(); + let hash = Hash::sha256(hashes.serialize_to_vec()).0; Snapshot { height: u32::try_from(height.0).unwrap().into(), format: 0, #[allow(clippy::cast_possible_truncation)] chunks: chunks.len() as u32, hash: hash.into_iter().collect(), - metadata: Default::default(), + metadata: hashes.serialize_to_vec().into(), } }) .collect(); @@ -52,8 +60,196 @@ impl Shell { &self.base_dir, ) .map_err(Error::Snapshot)?; + tracing::info!( + "Loading snapshot at height {}, chunk number {}", + req.height, + req.chunk, + ); Ok(tm_response::LoadSnapshotChunk { chunk: chunk.into_iter().collect(), }) } + + /// Decide if a snapshot should be accepted to sync the node forward in time + pub fn offer_snapshot( + &mut self, + req: tm_request::OfferSnapshot, + ) -> tm_response::OfferSnapshot { + match self.syncing.as_ref() { + None => { + if self.state.get_block_height().unwrap_or_default().0 + < u64::from(req.snapshot.height) + { + let Ok(chunks) = + Vec::::try_from_slice(&req.snapshot.metadata) + else { + return tm_response::OfferSnapshot::Reject; + }; + self.syncing = Some(SnapshotSync { + next_chunk: 0, + height: u64::from(req.snapshot.height).into(), + expected: chunks, + strikes: 0, + }); + tracing::info!("Accepting snapshot offer"); + tm_response::OfferSnapshot::Accept + } else { + tracing::info!("Rejecting snapshot offer"); + tm_response::OfferSnapshot::Reject + } + } + Some(snapshot_sync) => { + if snapshot_sync.height.0 < u64::from(req.snapshot.height) { + let Ok(chunks) = + Vec::::try_from_slice(&req.snapshot.metadata) + else { + tracing::info!("Rejecting snapshot offer"); + return tm_response::OfferSnapshot::Reject; + }; + self.syncing = Some(SnapshotSync { + next_chunk: 0, + height: u64::from(req.snapshot.height).into(), + expected: chunks, + strikes: 0, + }); + tracing::info!("Accepting snapshot offer"); + tm_response::OfferSnapshot::Accept + } else { + tracing::info!("Rejecting snapshot offer"); + tm_response::OfferSnapshot::Reject + } + } + } + } + + /// Write a snapshot chunk to the database + pub fn apply_snapshot_chunk( + &mut self, + req: tm_request::ApplySnapshotChunk, + ) -> tm_response::ApplySnapshotChunk { + let Some(snapshot_sync) = self.syncing.as_mut() else { + tracing::warn!("Received a snapshot although none were requested"); + // if we are not currently syncing, abort this sync protocol + // the syncing status is set by `OfferSnapshot`. + return tm_response::ApplySnapshotChunk { + result: ApplySnapshotChunkResult::Abort, + refetch_chunks: vec![], + reject_senders: vec![], + }; + }; + + // make sure we have been given the correct chunk + if u64::from(req.index) != snapshot_sync.next_chunk { + tracing::error!( + "Received wrong chunk, expected {}, got {}", + snapshot_sync.next_chunk, + req.index, + ); + return tm_response::ApplySnapshotChunk { + result: ApplySnapshotChunkResult::Unknown, + refetch_chunks: vec![ + u32::try_from(snapshot_sync.next_chunk).unwrap(), + ], + reject_senders: vec![], + }; + } + + let Some(expected_hash) = + snapshot_sync.expected.get(req.index as usize) + else { + tracing::error!( + "Received more chunks than expected; rejecting snapshot" + ); + self.syncing = None; + // if we get more chunks than expected, there is something wrong + // with this snapshot and we should reject it. + return tm_response::ApplySnapshotChunk { + result: ApplySnapshotChunkResult::RejectSnapshot, + refetch_chunks: vec![], + reject_senders: vec![], + }; + }; + + // check that the chunk matches the expected hash, otherwise + // re-fetch it in case it was corrupted. If the chunk fails + // to validate too many times, we reject the snapshot and sender. + let chunk_hash = Hash::sha256(&req.chunk); + if *expected_hash != chunk_hash { + tracing::error!( + "Hash of chunk did not match, expected {}, got {}", + expected_hash, + chunk_hash, + ); + snapshot_sync.strikes = + checked!(snapshot_sync.strikes + 1).unwrap(); + if snapshot_sync.strikes == MAX_SENDER_STRIKES { + snapshot_sync.strikes = 0; + self.syncing = None; + + tracing::info!( + "Max number of strikes reached on chunk, rejecting \ + snapshot" + ); + return tm_response::ApplySnapshotChunk { + result: ApplySnapshotChunkResult::RejectSnapshot, + refetch_chunks: vec![], + reject_senders: vec![req.sender], + }; + } else { + return tm_response::ApplySnapshotChunk { + result: ApplySnapshotChunkResult::Retry, + refetch_chunks: vec![req.index], + reject_senders: vec![], + }; + } + } else { + snapshot_sync.strikes = 0; + }; + // when we first start applying a snapshot, + // clear the existing db. + if req.index == 0 { + self.state.db_mut().clear(snapshot_sync.height).unwrap(); + } + // apply snapshot changes to the database + // retry if an error occurs + let mut batch = Default::default(); + for (cf, key, value) in DbSnapshot::parse_chunk(&req.chunk) { + if self + .state + .db() + .insert_entry(&mut batch, None, &cf, &key, value) + .is_err() + { + return tm_response::ApplySnapshotChunk { + result: ApplySnapshotChunkResult::Retry, + refetch_chunks: vec![], + reject_senders: vec![], + }; + } + } + if self.state.db().exec_batch(batch).is_err() { + return tm_response::ApplySnapshotChunk { + result: ApplySnapshotChunkResult::Retry, + refetch_chunks: vec![], + reject_senders: vec![], + }; + } + + // increment the chunk counter + snapshot_sync.next_chunk = + checked!(snapshot_sync.next_chunk + 1).unwrap(); + // check if all chunks have been applied + if snapshot_sync.next_chunk == snapshot_sync.expected.len() as u64 { + tracing::info!("Snapshot completely applied"); + self.syncing = None; + // rebuild the in-memory state + self.state.load_last_state(); + } + + tm_response::ApplySnapshotChunk { + result: ApplySnapshotChunkResult::Accept, + refetch_chunks: vec![], + reject_senders: vec![], + } + } } diff --git a/crates/node/src/shell/testing/node.rs b/crates/node/src/shell/testing/node.rs index 24e324441a..e42d579857 100644 --- a/crates/node/src/shell/testing/node.rs +++ b/crates/node/src/shell/testing/node.rs @@ -30,7 +30,7 @@ use namada_sdk::queries::{ Client, EncodedResponseQuery, RequestCtx, RequestQuery, Router, RPC, }; use namada_sdk::state::{ - LastBlock, Sha256Hasher, StorageRead, EPOCH_SWITCH_BLOCKS_DELAY, + LastBlock, Sha256Hasher, StorageRead, DB, EPOCH_SWITCH_BLOCKS_DELAY, }; use namada_sdk::tendermint::abci::response::Info; use namada_sdk::tendermint::abci::types::VoteInfo; @@ -345,6 +345,11 @@ impl MockNode { self.genesis_dir().join("wallet.toml") } + pub fn db_path(&self) -> PathBuf { + let locked = self.shell.lock().unwrap(); + locked.state.db().path().unwrap().to_path_buf() + } + pub fn block_height(&self) -> BlockHeight { self.shell .lock() diff --git a/crates/node/src/storage/mod.rs b/crates/node/src/storage/mod.rs index 8466bd874a..308af5aa3e 100644 --- a/crates/node/src/storage/mod.rs +++ b/crates/node/src/storage/mod.rs @@ -10,7 +10,9 @@ use arse_merkle_tree::traits::Hasher; use arse_merkle_tree::H256; use blake2b_rs::{Blake2b, Blake2bBuilder}; use namada_sdk::state::{FullAccessState, StorageHasher}; -pub use rocksdb::{open, DbSnapshot, RocksDBUpdateVisitor, SnapshotMetadata}; +pub use rocksdb::{ + open, Chunk, DbSnapshot, RocksDBUpdateVisitor, SnapshotMetadata, +}; #[derive(Default)] pub struct PersistentStorageHasher(Blake2bHasher); diff --git a/crates/node/src/storage/rocksdb.rs b/crates/node/src/storage/rocksdb.rs index b7395e07c1..e8da8e0c2a 100644 --- a/crates/node/src/storage/rocksdb.rs +++ b/crates/node/src/storage/rocksdb.rs @@ -737,6 +737,72 @@ impl RocksDB { .get_cf(rollback_cf, key) .map_err(|e| Error::DBError(e.into_string())) } + + /// Writes an entry directly to the db + pub fn insert_entry( + &self, + batch: &mut RocksDBWriteBatch, + height: Option, + cf: &DbColFam, + key: &Key, + new_value: impl AsRef<[u8]>, + ) -> Result<()> { + let state_cf = self.get_column_family(STATE_CF)?; + let last_height: BlockHeight = self + .read_value(state_cf, BLOCK_HEIGHT_KEY)? + .ok_or_else(|| { + Error::DBError("No block height found".to_string()) + })?; + let desired_height = height.unwrap_or(last_height); + + if desired_height != last_height { + todo!( + "Overwriting values at heights different than the last \ + committed height hast yet to be implemented" + ); + } + // NB: the following code only updates values + // written to at the last committed height + + let val = new_value.as_ref(); + + // Write the new key-val in the Db column family + let cf_name = self.get_column_family(cf.to_str())?; + self.add_value_bytes_to_batch( + cf_name, + key.to_string(), + val.to_vec(), + batch, + ); + Ok(()) + } + + /// Erase the entire db. Use with caution. + pub fn clear(&mut self, height: BlockHeight) -> Result<()> { + let state_cf = self.get_column_family(STATE_CF)?; + for (_, cf) in self.column_families() { + let read_opts = make_iter_read_opts(None); + let iter = + self.inner + .iterator_cf_opt(cf, read_opts, IteratorMode::Start); + + for (key, _, _) in PersistentPrefixIterator( + PrefixIterator::new(iter, String::default()), + // Empty string to prevent prefix stripping, the prefix is + // already in the enclosed iterator + ) { + self.inner + .delete_cf(cf, key.as_bytes()) + .map_err(|e| Error::DBError(e.to_string()))?; + } + } + let height = height.serialize_to_vec(); + self.inner + .put_cf(state_cf, BLOCK_HEIGHT_KEY, height) + .expect("Could not write to DB"); + + Ok(()) + } } /// Information about a particular snapshot @@ -841,11 +907,12 @@ impl<'a> DbSnapshot<'a> { // for a given block height if entry_ext == Some(meta) { let metadata = std::fs::read_to_string(entry_path)?; - let metadata_bytes = HEXLOWER - .decode(metadata.as_bytes()) - .map_err(|e| { - std::io::Error::new(ErrorKind::InvalidData, e) - })?; + let metadata_bytes = base64::decode( + metadata.as_bytes(), + ) + .map_err(|e| { + std::io::Error::new(ErrorKind::InvalidData, e) + })?; let chunks: Vec = BorshDeserialize::try_from_slice( &metadata_bytes[..], @@ -930,9 +997,40 @@ impl<'a> DbSnapshot<'a> { .take(checked!(chunk_end - chunk_start).unwrap()) { bytes.extend(line?.as_bytes()); + bytes.push(b'\n'); } Ok(bytes) } + + pub fn parse_chunk(chunk: &[u8]) -> ChunkIterator<'_> { + let reader = std::io::BufReader::new(chunk); + ChunkIterator { + lines: reader.lines(), + } + } +} + +pub struct ChunkIterator<'a> { + lines: std::io::Lines>, +} + +impl<'a> Iterator for ChunkIterator<'a> { + type Item = (DbColFam, Key, Vec); + + fn next(&mut self) -> Option { + let line = self.lines.next()?.ok()?; + let line = line.trim(); + let mut iter = line.split(':'); + let cf = iter.next()?; + let rest = iter.next()?; + let mut iter = rest.split('='); + let key = iter.next()?; + let value = iter.next()?; + let cf = DbColFam::from_str(cf).ok()?; + let key = Key::parse(key).ok()?; + let value = base64::decode(value.as_bytes()).ok()?; + Some((cf, key, value)) + } } /// A chunk of a snapshot. Includes the last line number in the file @@ -1669,33 +1767,13 @@ impl DB for RocksDB { key: &Key, new_value: impl AsRef<[u8]>, ) -> Result<()> { + self.insert_entry(batch, height, cf, key, new_value.as_ref())?; let state_cf = self.get_column_family(STATE_CF)?; let last_height: BlockHeight = self .read_value(state_cf, BLOCK_HEIGHT_KEY)? .ok_or_else(|| { Error::DBError("No block height found".to_string()) })?; - let desired_height = height.unwrap_or(last_height); - - if desired_height != last_height { - todo!( - "Overwriting values at heights different than the last \ - committed height hast yet to be implemented" - ); - } - // NB: the following code only updates values - // written to at the last committed height - - let val = new_value.as_ref(); - - // Write the new key-val in the Db column family - let cf_name = self.get_column_family(cf.to_str())?; - self.add_value_bytes_to_batch( - cf_name, - key.to_string(), - val.to_vec(), - batch, - ); // If the CF is subspace, additionally update the diffs if cf == &DbColFam::SUBSPACE { @@ -1708,7 +1786,7 @@ impl DB for RocksDB { self.add_value_bytes_to_batch( diffs_cf, diffs_key, - val.to_vec(), + new_value.as_ref().to_vec(), batch, ); } @@ -2692,6 +2770,109 @@ mod test { db.add_block_to_batch(block, batch, true) } + /// Test the clear function deletes all keys from the db + /// and that we can still write keys to it afterward. + #[test] + fn test_clear_db() { + let temp = tempfile::tempdir().expect("Test failed"); + let mut db = open(&temp, false, None).expect("Test failed"); + let state_cf = db.get_column_family(STATE_CF).expect("Test failed"); + db.inner + .put_cf( + state_cf, + BLOCK_HEIGHT_KEY, + BlockHeight(1).serialize_to_vec(), + ) + .expect("Test failed"); + db.write_subspace_val( + 1.into(), + &Key::parse("bing/fucking/bong").expect("Test failed"), + [1u8; 1], + false, + ) + .expect("Test failed"); + db.write_subspace_val( + 1.into(), + &Key::parse("ding/fucking/dong").expect("Test failed"), + [1u8; 1], + false, + ) + .expect("Test failed"); + let mut db_entries = HashMap::new(); + for (_, cf) in db.column_families() { + let read_opts = make_iter_read_opts(None); + let iter = + db.inner.iterator_cf_opt(cf, read_opts, IteratorMode::Start); + + for (key, raw_val, _gas) in PersistentPrefixIterator( + PrefixIterator::new(iter, String::default()), + // Empty string to prevent prefix stripping, the prefix is + // already in the enclosed iterator + ) { + db_entries.insert(key, raw_val); + } + } + let mut expected = HashMap::from([ + ("height".to_string(), vec![1, 0, 0, 0, 0, 0, 0, 0]), + ("bing/fucking/bong".to_string(), vec![1u8]), + ("ding/fucking/dong".to_string(), vec![1u8]), + ("0000000000002/new/bing/fucking/bong".to_string(), vec![1u8]), + ("0000000000002/new/ding/fucking/dong".to_string(), vec![1u8]), + ]); + assert_eq!(db_entries, expected); + db.clear(2.into()).expect("Test failed"); + let mut db_entries = HashMap::new(); + for (_, cf) in db.column_families() { + let read_opts = make_iter_read_opts(None); + let iter = + db.inner.iterator_cf_opt(cf, read_opts, IteratorMode::Start); + + for (key, raw_val, _gas) in PersistentPrefixIterator( + PrefixIterator::new(iter, String::default()), + // Empty string to prevent prefix stripping, the prefix is + // already in the enclosed iterator + ) { + db_entries.insert(key, raw_val); + } + } + let empty = HashMap::from([( + "height".to_string(), + vec![2u8, 0, 0, 0, 0, 0, 0, 0], + )]); + assert_eq!(db_entries, empty,); + db.write_subspace_val( + 1.into(), + &Key::parse("bing/fucking/bong").expect("Test failed"), + [1u8; 1], + false, + ) + .expect("Test failed"); + db.write_subspace_val( + 1.into(), + &Key::parse("ding/fucking/dong").expect("Test failed"), + [1u8; 1], + false, + ) + .expect("Test failed"); + let mut db_entries = HashMap::new(); + for (_, cf) in db.column_families() { + let read_opts = make_iter_read_opts(None); + let iter = + db.inner.iterator_cf_opt(cf, read_opts, IteratorMode::Start); + + for (key, raw_val, _gas) in PersistentPrefixIterator( + PrefixIterator::new(iter, String::default()), + // Empty string to prevent prefix stripping, the prefix is + // already in the enclosed iterator + ) { + db_entries.insert(key, raw_val); + } + } + expected.insert("height".to_string(), vec![2, 0, 0, 0, 0, 0, 0, 0]); + + assert_eq!(db_entries, expected); + } + /// Test that we chunk a series of lines /// up correctly based on a max chunk size. #[test] @@ -2784,7 +2965,7 @@ mod test { let temp = tempfile::tempdir().expect("Test failed"); let base_dir = temp.path().to_path_buf(); let chunks = vec![Chunk::default()]; - let chunk_bytes = HEXLOWER.encode(&chunks.serialize_to_vec()); + let chunk_bytes = base64::encode(chunks.serialize_to_vec()); for i in 0..4 { let mut path = base_dir.clone(); path.push(format!("snapshot_{}.snap", i)); @@ -2990,10 +3171,10 @@ mod test { DbSnapshot::paths(1.into(), temp.path().to_path_buf()); std::fs::write( &snap_file, - "fffffggggghh\naaaa\nbbbbb\ncc\ndddddddd".as_bytes(), + "fffffggggghh\naaaa\nbbbbb\ncc\ndddddddd\n".as_bytes(), ) .expect("Test failed"); - std::fs::write(meta_file, HEXLOWER.encode(&chunks.serialize_to_vec())) + std::fs::write(meta_file, base64::encode(chunks.serialize_to_vec())) .expect("Test failed"); let chunks: Vec<_> = (0..3) .filter_map(|i| { @@ -3001,9 +3182,9 @@ mod test { }) .collect(); let expected = vec![ - "fffffggggghh".as_bytes().to_vec(), - "aaaabbbbb".as_bytes().to_vec(), - "ccdddddddd".as_bytes().to_vec(), + "fffffggggghh\n".as_bytes().to_vec(), + "aaaa\nbbbbb\n".as_bytes().to_vec(), + "cc\ndddddddd\n".as_bytes().to_vec(), ]; assert_eq!(chunks, expected); @@ -3012,4 +3193,38 @@ mod test { std::fs::remove_file(snap_file).unwrap(); assert!(DbSnapshot::load_chunk(0.into(), 0, temp.path()).is_err()); } + + #[test] + fn test_chunk_iterator() { + let chunk = "state:bing/fucking/bong=AQ==\nsubspace:I/AM/BATMAN=Ag==\n"; + let iterator = DbSnapshot::parse_chunk(chunk.as_bytes()); + let expected = vec![ + ( + DbColFam::STATE, + Key::parse("bing/fucking/bong").expect("Test failed"), + vec![1u8], + ), + ( + DbColFam::SUBSPACE, + Key::parse("I/AM/BATMAN").expect("Test failed"), + vec![2u8], + ), + ]; + let parsed: Vec<_> = iterator.collect(); + assert_eq!(parsed, expected); + let bad_chunks = [ + "bloop:bing/fucking/bong=AQ==\n", + "bing/fucking/bong=AQ==\n", + "state:bing/fucking/bong:AQ==\n", + "state=bing/fucking/bong=AQ==\n", + "state:bing/fucking/bong\n", + "state:#bing/fucking/bong=AQ==\n", + "state:bing/fucking/bong=0Z\n", + ]; + for chunk in bad_chunks { + let iterator = DbSnapshot::parse_chunk(chunk.as_bytes()); + let parsed: Vec<_> = iterator.collect(); + assert!(parsed.is_empty()); + } + } } diff --git a/crates/state/src/wl_state.rs b/crates/state/src/wl_state.rs index 57869ba788..a252f2bd81 100644 --- a/crates/state/src/wl_state.rs +++ b/crates/state/src/wl_state.rs @@ -486,7 +486,7 @@ where /// Load the full state at the last committed height, if any. Returns the /// Merkle root hash and the height of the committed block. - fn load_last_state(&mut self) { + pub fn load_last_state(&mut self) { if let Some(BlockStateRead { height, time, diff --git a/crates/tests/src/integration/ledger_tests.rs b/crates/tests/src/integration/ledger_tests.rs index 236f4f6aa2..78690c8c54 100644 --- a/crates/tests/src/integration/ledger_tests.rs +++ b/crates/tests/src/integration/ledger_tests.rs @@ -1,4 +1,5 @@ use std::collections::BTreeSet; +use std::num::NonZeroU64; use std::str::FromStr; use assert_matches::assert_matches; @@ -17,6 +18,8 @@ use namada_core::token::NATIVE_MAX_DECIMAL_PLACES; use namada_node::shell::testing::client::run; use namada_node::shell::testing::node::NodeResults; use namada_node::shell::testing::utils::{Bin, CapturedOutput}; +use namada_node::shell::SnapshotSync; +use namada_node::storage::DbSnapshot; use namada_sdk::account::AccountPublicKeysMap; use namada_sdk::collections::HashMap; use namada_sdk::migrations; @@ -39,6 +42,7 @@ use crate::integration::setup; use crate::strings::{ TX_APPLIED_SUCCESS, TX_INSUFFICIENT_BALANCE, TX_REJECTED, }; +use crate::tendermint::abci::response::ApplySnapshotChunkResult; use crate::tx::tx_host_env::gov_storage::proposal::{ PGFInternalTarget, PGFTarget, }; @@ -1747,6 +1751,231 @@ fn enforce_fee_payment() -> Result<()> { Ok(()) } +/// Test that we can successfully apply a snapshot +/// from one node to another. +#[test] +fn apply_snapshot() -> Result<()> { + use namada_node::facade::tendermint::v0_37::abci::{ + request as tm_request, response as tm_response, + }; + // This address doesn't matter for tests. But an argument is required. + let validator_one_rpc = "http://127.0.0.1:26567"; + + let (mut node, _services) = setup::setup()?; + { + let mut locked = node.shell.lock().unwrap(); + locked.blocks_between_snapshots = + Some(NonZeroU64::try_from(10_000u64).unwrap()); + } + for _ in 0..3 { + node.next_epoch(); + } + let tx_args = vec![ + "transparent-transfer", + "--source", + BERTHA, + "--target", + ALBERT, + "--token", + NAM, + "--amount", + "1234", + "--signing-keys", + BERTHA_KEY, + "--node", + &validator_one_rpc, + "--force", + ]; + + let captured = CapturedOutput::of(|| run(&node, Bin::Client, tx_args)); + assert_matches!(captured.result, Ok(_)); + assert!(captured.contains(TX_APPLIED_SUCCESS)); + + let args = vec![ + "balance", + "--owner", + ALBERT, + "--token", + NAM, + "--node", + &validator_one_rpc, + ]; + let captured = CapturedOutput::of(|| run(&node, Bin::Client, args)); + assert!(captured.contains("1981234")); + + let base_dir = node.test_dir.path(); + let db = namada_node::storage::open(node.db_path(), true, None) + .expect("Could not open DB"); + let snapshot = db.snapshot(); + + let last_height = node.block_height(); + let cfs = db.column_families(); + snapshot + .write_to_file(cfs, base_dir.to_path_buf(), last_height) + .expect("Test failed"); + DbSnapshot::cleanup(last_height, base_dir).expect("Test failed"); + + let (node2, _services) = setup::setup()?; + let (offer, resp) = { + let shell = node.shell.lock().unwrap(); + let offer = shell + .list_snapshots() + .expect("Test failed") + .snapshots + .pop() + .expect("Test failed"); + let mut shell = node2.shell.lock().unwrap(); + ( + offer.clone(), + shell.offer_snapshot(tm_request::OfferSnapshot { + snapshot: offer, + app_hash: Default::default(), + }), + ) + }; + + assert_eq!(tm_response::OfferSnapshot::Accept, resp); + { + let shell = node.shell.lock().unwrap(); + let mut shell2 = node2.shell.lock().unwrap(); + for c in 0..offer.chunks { + let chunk = shell + .load_snapshot_chunk(tm_request::LoadSnapshotChunk { + height: (last_height.0 as u32).into(), + format: 0, + chunk: c, + }) + .expect("Test failed"); + let resp = + shell2.apply_snapshot_chunk(tm_request::ApplySnapshotChunk { + index: c, + chunk: chunk.chunk, + sender: "".to_string(), + }); + assert_eq!( + resp, + tm_response::ApplySnapshotChunk { + result: ApplySnapshotChunkResult::Accept, + refetch_chunks: vec![], + reject_senders: vec![], + } + ); + } + } + let (app_hash1, app_hash2) = { + ( + node.shell.lock().unwrap().state.in_mem().merkle_root(), + node2.shell.lock().unwrap().state.in_mem().merkle_root(), + ) + }; + assert_eq!(app_hash1, app_hash2); + let args = vec![ + "balance", + "--owner", + ALBERT, + "--token", + NAM, + "--node", + &validator_one_rpc, + ]; + let captured = CapturedOutput::of(|| run(&node2, Bin::Client, args)); + assert!(captured.contains("1981234")); + + Ok(()) +} + +/// Test the various failure conditions of state sync +#[test] +fn snapshot_unhappy_flows() -> Result<()> { + use namada_node::facade::tendermint::v0_37::abci::{ + request as tm_request, response as tm_response, + }; + let (node, _services) = setup::setup()?; + + // test we abort if not syncing + let resp = { + let mut shell = node.shell.lock().unwrap(); + shell.apply_snapshot_chunk(tm_request::ApplySnapshotChunk { + index: 0, + chunk: Default::default(), + sender: "".to_string(), + }) + }; + assert_eq!( + resp, + tm_response::ApplySnapshotChunk { + result: ApplySnapshotChunkResult::Abort, + refetch_chunks: vec![], + reject_senders: vec![], + } + ); + + { + let mut locked = node.shell.lock().unwrap(); + locked.syncing = Some(SnapshotSync { + next_chunk: 0, + height: Default::default(), + expected: vec![Default::default()], + strikes: 0, + }); + } + + // test we reject and re-fetch if the wrong chunk is given + let resp = { + let mut shell = node.shell.lock().unwrap(); + shell.apply_snapshot_chunk(tm_request::ApplySnapshotChunk { + index: 1, + chunk: Default::default(), + sender: "".to_string(), + }) + }; + assert_eq!( + resp, + tm_response::ApplySnapshotChunk { + result: ApplySnapshotChunkResult::Unknown, + refetch_chunks: vec![0], + reject_senders: vec![], + } + ); + // test we refetch a chunk if the hash is wrong up to five times. + for _ in 0..4 { + let resp = { + let mut shell = node.shell.lock().unwrap(); + shell.apply_snapshot_chunk(tm_request::ApplySnapshotChunk { + index: 0, + chunk: Default::default(), + sender: "".to_string(), + }) + }; + assert_eq!( + resp, + tm_response::ApplySnapshotChunk { + result: ApplySnapshotChunkResult::Retry, + refetch_chunks: vec![0], + reject_senders: vec![], + } + ); + } + let resp = { + let mut shell = node.shell.lock().unwrap(); + shell.apply_snapshot_chunk(tm_request::ApplySnapshotChunk { + index: 0, + chunk: Default::default(), + sender: "satan".to_string(), + }) + }; + assert_eq!( + resp, + tm_response::ApplySnapshotChunk { + result: ApplySnapshotChunkResult::RejectSnapshot, + refetch_chunks: vec![], + reject_senders: vec!["satan".to_string()], + } + ); + + Ok(()) +} + /// Test that a scheduled migration actually makes changes /// to storage at the scheduled height. #[test] From ce8a918ff9dec8a31a7b9e6dabe493f6ffa8c466 Mon Sep 17 00:00:00 2001 From: satan Date: Thu, 22 Aug 2024 13:59:57 +0200 Subject: [PATCH 51/73] Removed io errors from crashing nodes making snapshots. Added a dummy validator to cometbft genesis to ensure state sync can begin --- crates/node/src/lib.rs | 8 ++--- crates/node/src/shell/snapshots.rs | 34 +++++++++++++------- crates/node/src/tendermint_node.rs | 17 +++++++++- crates/tests/src/integration/ledger_tests.rs | 15 +++------ 4 files changed, 47 insertions(+), 27 deletions(-) diff --git a/crates/node/src/lib.rs b/crates/node/src/lib.rs index 0f95ff1154..53d20a29b0 100644 --- a/crates/node/src/lib.rs +++ b/crates/node/src/lib.rs @@ -182,14 +182,14 @@ impl Shell { Ok(Response::CheckTx(self.mempool_validate(&tx.tx, r#type))) } Request::ListSnapshots => { - self.list_snapshots().map(Response::ListSnapshots) + Ok(Response::ListSnapshots(self.list_snapshots())) } Request::OfferSnapshot(req) => { Ok(Response::OfferSnapshot(self.offer_snapshot(req))) } - Request::LoadSnapshotChunk(req) => self - .load_snapshot_chunk(req) - .map(Response::LoadSnapshotChunk), + Request::LoadSnapshotChunk(req) => { + Ok(Response::LoadSnapshotChunk(self.load_snapshot_chunk(req))) + } Request::ApplySnapshotChunk(req) => { Ok(Response::ApplySnapshotChunk(self.apply_snapshot_chunk(req))) } diff --git a/crates/node/src/shell/snapshots.rs b/crates/node/src/shell/snapshots.rs index 7a40d8b2ef..c1fe457bf4 100644 --- a/crates/node/src/shell/snapshots.rs +++ b/crates/node/src/shell/snapshots.rs @@ -4,7 +4,7 @@ use namada_sdk::arith::checked; use namada_sdk::hash::{Hash, Sha256Hasher}; use namada_sdk::state::{BlockHeight, StorageRead, DB}; -use super::{Error, ShellResult, SnapshotSync}; +use super::SnapshotSync; use crate::facade::tendermint::abci::response::ApplySnapshotChunkResult; use crate::facade::tendermint::abci::types::Snapshot; use crate::facade::tendermint::v0_37::abci::{ @@ -21,13 +21,15 @@ impl Shell { /// of chunks, as hash of each chunk, and a hash of the chunk /// metadata are provided so that syncing nodes can verify can verify /// snapshots they receive. - pub fn list_snapshots(&self) -> ShellResult { + pub fn list_snapshots(&self) -> tm_response::ListSnapshots { if self.blocks_between_snapshots.is_none() { - Ok(Default::default()) + Default::default() } else { tracing::info!("Request for snapshots received."); - let snapshots = DbSnapshot::files(&self.base_dir) - .map_err(Error::Snapshot)? + let Ok(snapshots) = DbSnapshot::files(&self.base_dir) else { + return Default::default(); + }; + let snapshots = snapshots .into_iter() .map(|SnapshotMetadata { height, chunks, .. }| { let hashes = @@ -44,7 +46,7 @@ impl Shell { }) .collect(); - Ok(tm_response::ListSnapshots { snapshots }) + tm_response::ListSnapshots { snapshots } } } @@ -53,21 +55,29 @@ impl Shell { pub fn load_snapshot_chunk( &self, req: tm_request::LoadSnapshotChunk, - ) -> ShellResult { - let chunk = DbSnapshot::load_chunk( + ) -> tm_response::LoadSnapshotChunk { + let Ok(chunk) = DbSnapshot::load_chunk( BlockHeight(req.height.into()), u64::from(req.chunk), &self.base_dir, - ) - .map_err(Error::Snapshot)?; + ) else { + tracing::debug!( + "Received a request for a snapshot we do not possess" + ); + // N.B. if the snapshot is no longer present, + // this will not match the hash in the metadata and will + // be rejected by syncing nodes. We don't return an error + // so as not to crash this node. + return Default::default(); + }; tracing::info!( "Loading snapshot at height {}, chunk number {}", req.height, req.chunk, ); - Ok(tm_response::LoadSnapshotChunk { + tm_response::LoadSnapshotChunk { chunk: chunk.into_iter().collect(), - }) + } } /// Decide if a snapshot should be accepted to sync the node forward in time diff --git a/crates/node/src/tendermint_node.rs b/crates/node/src/tendermint_node.rs index e8df339eed..32411a34cf 100644 --- a/crates/node/src/tendermint_node.rs +++ b/crates/node/src/tendermint_node.rs @@ -16,7 +16,8 @@ use tokio::process::{Child, Command}; use tokio::sync::oneshot::error::RecvError; use tokio::sync::oneshot::{Receiver, Sender}; -use crate::facade::tendermint::{block, Genesis, Moniker}; +use crate::facade::tendermint::validator::Info; +use crate::facade::tendermint::{block, Genesis, Moniker, PublicKey}; use crate::facade::tendermint_config::{ Error as TendermintError, TendermintConfig, }; @@ -362,6 +363,20 @@ async fn write_tm_genesis( .try_into() .expect("Failed to convert initial genesis height"); } + + // N.B. Because we give cometbft our genesis validators only after init + // chain, at this stage, cometbft believes this node is the only + // validator unless we insert a dummy. If cometbft thinks a node is the + // only validator, it won't start state sync. These validators are + // overwritten after init chain is called. + const DUMMY_VALIDATOR: [u8; 32] = [ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + ]; + genesis.validators.push(Info::new( + PublicKey::from_raw_ed25519(&DUMMY_VALIDATOR).unwrap(), + 10u32.into(), + )); const EVIDENCE_AND_PROTOBUF_OVERHEAD: u64 = 10 * 1024 * 1024; let size = block::Size { // maximum size of a serialized Tendermint block. diff --git a/crates/tests/src/integration/ledger_tests.rs b/crates/tests/src/integration/ledger_tests.rs index 78690c8c54..c2303d006f 100644 --- a/crates/tests/src/integration/ledger_tests.rs +++ b/crates/tests/src/integration/ledger_tests.rs @@ -1818,12 +1818,8 @@ fn apply_snapshot() -> Result<()> { let (node2, _services) = setup::setup()?; let (offer, resp) = { let shell = node.shell.lock().unwrap(); - let offer = shell - .list_snapshots() - .expect("Test failed") - .snapshots - .pop() - .expect("Test failed"); + let offer = + shell.list_snapshots().snapshots.pop().expect("Test failed"); let mut shell = node2.shell.lock().unwrap(); ( offer.clone(), @@ -1839,13 +1835,12 @@ fn apply_snapshot() -> Result<()> { let shell = node.shell.lock().unwrap(); let mut shell2 = node2.shell.lock().unwrap(); for c in 0..offer.chunks { - let chunk = shell - .load_snapshot_chunk(tm_request::LoadSnapshotChunk { + let chunk = + shell.load_snapshot_chunk(tm_request::LoadSnapshotChunk { height: (last_height.0 as u32).into(), format: 0, chunk: c, - }) - .expect("Test failed"); + }); let resp = shell2.apply_snapshot_chunk(tm_request::ApplySnapshotChunk { index: c, From 828345905a9d60dd976a1269ae97060673630646 Mon Sep 17 00:00:00 2001 From: satan Date: Thu, 22 Aug 2024 14:09:44 +0200 Subject: [PATCH 52/73] Added changelog --- .changelog/unreleased/improvements/3687-apply-snapshots.md | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 .changelog/unreleased/improvements/3687-apply-snapshots.md diff --git a/.changelog/unreleased/improvements/3687-apply-snapshots.md b/.changelog/unreleased/improvements/3687-apply-snapshots.md new file mode 100644 index 0000000000..24febb1f1e --- /dev/null +++ b/.changelog/unreleased/improvements/3687-apply-snapshots.md @@ -0,0 +1,7 @@ +- Addresses the remaining points of Issue [\#3307](https://github.com/anoma/namada/issues/3307) + + - Implements the `OfferSnapshot` ABCI call + - Implements the `ApplySnapshotChunk` ABCI call + - Adds integration tests + + ([\#3687](https://github.com/anoma/namada/pull/3687)) \ No newline at end of file From e9cdd8d4bf7d00955304f847ca46d4988d6b38dd Mon Sep 17 00:00:00 2001 From: satan Date: Thu, 22 Aug 2024 14:41:12 +0200 Subject: [PATCH 53/73] Allow configuring of snapshots to keep. Don't allow concurrent snapshot tasks --- crates/apps_lib/src/config/mod.rs | 3 ++ crates/node/src/shell/snapshots.rs | 2 +- crates/node/src/shims/abcipp_shim.rs | 53 ++++++++++++-------- crates/node/src/storage/rocksdb.rs | 15 +++--- crates/tests/src/integration/ledger_tests.rs | 2 +- 5 files changed, 47 insertions(+), 28 deletions(-) diff --git a/crates/apps_lib/src/config/mod.rs b/crates/apps_lib/src/config/mod.rs index 428da39e91..9f73b6c48a 100644 --- a/crates/apps_lib/src/config/mod.rs +++ b/crates/apps_lib/src/config/mod.rs @@ -127,6 +127,8 @@ pub struct Shell { /// When set, indicates after how many blocks a new snapshot /// will be taken (counting from the first block) pub blocks_between_snapshots: Option, + /// Number of snapshots to keep + pub snapshots_to_keep: Option, } impl Ledger { @@ -156,6 +158,7 @@ impl Ledger { action_at_height: None, tendermint_mode: mode, blocks_between_snapshots: None, + snapshots_to_keep: None, }, cometbft: tendermint_config, ethereum_bridge: ethereum_bridge::ledger::Config::default(), diff --git a/crates/node/src/shell/snapshots.rs b/crates/node/src/shell/snapshots.rs index c1fe457bf4..6f603ff509 100644 --- a/crates/node/src/shell/snapshots.rs +++ b/crates/node/src/shell/snapshots.rs @@ -19,7 +19,7 @@ pub const MAX_SENDER_STRIKES: u64 = 5; impl Shell { /// List the snapshot files held locally. Furthermore, the number /// of chunks, as hash of each chunk, and a hash of the chunk - /// metadata are provided so that syncing nodes can verify can verify + /// metadata are provided so that syncing nodes can verify /// snapshots they receive. pub fn list_snapshots(&self) -> tm_response::ListSnapshots { if self.blocks_between_snapshots.is_none() { diff --git a/crates/node/src/shims/abcipp_shim.rs b/crates/node/src/shims/abcipp_shim.rs index 14d3709f58..91fef96f02 100644 --- a/crates/node/src/shims/abcipp_shim.rs +++ b/crates/node/src/shims/abcipp_shim.rs @@ -43,6 +43,7 @@ pub struct AbcippShim { tokio::sync::oneshot::Sender>, )>, snapshot_task: Option>>, + snapshots_to_keep: u64, } impl AbcippShim { @@ -65,6 +66,7 @@ impl AbcippShim { let (shell_send, shell_recv) = std::sync::mpsc::channel(); let (server_shutdown, _) = broadcast::channel::<()>(1); let action_at_height = config.shell.action_at_height.clone(); + let snapshots_to_keep = config.shell.snapshots_to_keep.map(|n| n.get()).unwrap_or(1); ( Self { service: Shell::new( @@ -81,6 +83,7 @@ impl AbcippShim { delivered_txs: vec![], shell_recv, snapshot_task: None, + snapshots_to_keep, }, AbciService { shell_send, @@ -186,25 +189,37 @@ impl AbcippShim { } fn update_snapshot_task(&mut self, take_snapshot: TakeSnapshot) { - let snapshot_taken = self - .snapshot_task - .as_ref() - .map(|t| t.is_finished()) - .unwrap_or_default(); - if snapshot_taken { - let task = self.snapshot_task.take().unwrap(); - match task.join() { - Ok(Err(e)) => tracing::error!( - "Failed to create snapshot with error: {:?}", - e - ), - Err(e) => tracing::error!( - "Failed to join thread creating snapshot: {:?}", - e - ), - _ => {} + let snapshot_taken = + self.snapshot_task.as_ref().map(|t| t.is_finished()); + match snapshot_taken { + Some(true) => { + let task = self.snapshot_task.take().unwrap(); + match task.join() { + Ok(Err(e)) => tracing::error!( + "Failed to create snapshot with error: {:?}", + e + ), + Err(e) => tracing::error!( + "Failed to join thread creating snapshot: {:?}", + e + ), + _ => {} + } + } + Some(false) => { + // if a snapshot task is still running, + // we don't start a new one. This is not + // expected to happen if snapshots are spaced + // far enough apart. + tracing::warn!( + "Previous snapshot task was still running when a new \ + snapshot was scheduled" + ); + return; } + _ => {} } + let TakeSnapshot::Yes(db_path) = take_snapshot else { return; }; @@ -225,7 +240,7 @@ impl AbcippShim { .height; let cfs = db.column_families(); snapshot.write_to_file(cfs, base_dir.clone(), last_height)?; - DbSnapshot::cleanup(last_height, &base_dir) + DbSnapshot::cleanup(last_height, &base_dir, self.snapshots_to_keep) }); // it's important that the thread is @@ -235,8 +250,6 @@ impl AbcippShim { if snap_recv.blocking_recv().is_err() { tracing::error!("Failed to start snapshot task.") } else { - // N.B. If a task is still running, it will continue - // in the background but we will forget about it. self.snapshot_task.replace(snapshot_task); } } diff --git a/crates/node/src/storage/rocksdb.rs b/crates/node/src/storage/rocksdb.rs index e8da8e0c2a..b6c3f8f24f 100644 --- a/crates/node/src/storage/rocksdb.rs +++ b/crates/node/src/storage/rocksdb.rs @@ -821,9 +821,9 @@ pub struct SnapshotMetadata { pub struct DbSnapshot<'a>(pub rocksdb::Snapshot<'a>); impl<'a> DbSnapshot<'a> { - /// Write a snapshot of the database out to file. The last line - /// of the file contains metadata about how to break the file into - /// chunks. + /// Write a snapshot of the database out to file. Also + /// creates a file containing metadata about how to break + /// the file into chunks. pub fn write_to_file( &self, cfs: [(&'static str, &'a ColumnFamily); 6], @@ -858,16 +858,19 @@ impl<'a> DbSnapshot<'a> { Ok(()) } - /// Remove snapshots older than the latest + /// Keep `number_to_keep` latest snapshots. All others + /// are deleted. pub fn cleanup( latest_height: BlockHeight, base_dir: &Path, + number_to_keep: u64, ) -> std::io::Result<()> { for SnapshotMetadata { height, path_stem, .. } in Self::files(base_dir)? { - if height < latest_height { + // this is correct... don't worry about it + if height + number_to_keep < latest_height + 1 { let path = PathBuf::from(path_stem); _ = std::fs::remove_file(&path.with_extension("snap")); _ = std::fs::remove_file(path.with_extension("meta")); @@ -2983,7 +2986,7 @@ mod test { let mut path = base_dir.clone(); path.push("snapshot_0.bak"); _ = File::create(path).expect("Test failed"); - DbSnapshot::cleanup(2.into(), &base_dir).expect("Test failed"); + DbSnapshot::cleanup(2.into(), &base_dir, 1).expect("Test failed"); let mut expected = HashSet::from([ "snapshot_2.snap", "snapshot_2.meta", diff --git a/crates/tests/src/integration/ledger_tests.rs b/crates/tests/src/integration/ledger_tests.rs index c2303d006f..5962929601 100644 --- a/crates/tests/src/integration/ledger_tests.rs +++ b/crates/tests/src/integration/ledger_tests.rs @@ -1813,7 +1813,7 @@ fn apply_snapshot() -> Result<()> { snapshot .write_to_file(cfs, base_dir.to_path_buf(), last_height) .expect("Test failed"); - DbSnapshot::cleanup(last_height, base_dir).expect("Test failed"); + DbSnapshot::cleanup(last_height, base_dir, 1).expect("Test failed"); let (node2, _services) = setup::setup()?; let (offer, resp) = { From d01aab1544d970a64f1ccf08d64eba60ba494e64 Mon Sep 17 00:00:00 2001 From: satan Date: Thu, 22 Aug 2024 14:47:59 +0200 Subject: [PATCH 54/73] Formatting and linting --- crates/node/src/shims/abcipp_shim.rs | 6 ++++-- crates/node/src/storage/rocksdb.rs | 2 +- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/crates/node/src/shims/abcipp_shim.rs b/crates/node/src/shims/abcipp_shim.rs index 91fef96f02..64c51c6d1f 100644 --- a/crates/node/src/shims/abcipp_shim.rs +++ b/crates/node/src/shims/abcipp_shim.rs @@ -66,7 +66,8 @@ impl AbcippShim { let (shell_send, shell_recv) = std::sync::mpsc::channel(); let (server_shutdown, _) = broadcast::channel::<()>(1); let action_at_height = config.shell.action_at_height.clone(); - let snapshots_to_keep = config.shell.snapshots_to_keep.map(|n| n.get()).unwrap_or(1); + let snapshots_to_keep = + config.shell.snapshots_to_keep.map(|n| n.get()).unwrap_or(1); ( Self { service: Shell::new( @@ -226,6 +227,7 @@ impl AbcippShim { let base_dir = self.service.base_dir.clone(); let (snap_send, snap_recv) = tokio::sync::oneshot::channel(); + let snapshots_to_keep = self.snapshots_to_keep; let snapshot_task = std::thread::spawn(move || { let db = crate::storage::open(db_path, true, None) .expect("Could not open DB"); @@ -240,7 +242,7 @@ impl AbcippShim { .height; let cfs = db.column_families(); snapshot.write_to_file(cfs, base_dir.clone(), last_height)?; - DbSnapshot::cleanup(last_height, &base_dir, self.snapshots_to_keep) + DbSnapshot::cleanup(last_height, &base_dir, snapshots_to_keep) }); // it's important that the thread is diff --git a/crates/node/src/storage/rocksdb.rs b/crates/node/src/storage/rocksdb.rs index b6c3f8f24f..f975654a77 100644 --- a/crates/node/src/storage/rocksdb.rs +++ b/crates/node/src/storage/rocksdb.rs @@ -870,7 +870,7 @@ impl<'a> DbSnapshot<'a> { } in Self::files(base_dir)? { // this is correct... don't worry about it - if height + number_to_keep < latest_height + 1 { + if checked!(height + number_to_keep <= latest_height).unwrap() { let path = PathBuf::from(path_stem); _ = std::fs::remove_file(&path.with_extension("snap")); _ = std::fs::remove_file(path.with_extension("meta")); From 9ae5cfc3ff7efaf7aac4fd4545f1321595b85ce7 Mon Sep 17 00:00:00 2001 From: satan Date: Tue, 27 Aug 2024 10:35:15 +0200 Subject: [PATCH 55/73] Removed optional block height parameter for the insert entry method on rocksdb --- crates/node/src/storage/rocksdb.rs | 19 ++----------------- 1 file changed, 2 insertions(+), 17 deletions(-) diff --git a/crates/node/src/storage/rocksdb.rs b/crates/node/src/storage/rocksdb.rs index f975654a77..bfec9b2bee 100644 --- a/crates/node/src/storage/rocksdb.rs +++ b/crates/node/src/storage/rocksdb.rs @@ -738,32 +738,17 @@ impl RocksDB { .map_err(|e| Error::DBError(e.into_string())) } - /// Writes an entry directly to the db + /// Writes an entry directly to a db batch update + /// directly pub fn insert_entry( &self, batch: &mut RocksDBWriteBatch, - height: Option, cf: &DbColFam, key: &Key, new_value: impl AsRef<[u8]>, ) -> Result<()> { - let state_cf = self.get_column_family(STATE_CF)?; - let last_height: BlockHeight = self - .read_value(state_cf, BLOCK_HEIGHT_KEY)? - .ok_or_else(|| { - Error::DBError("No block height found".to_string()) - })?; - let desired_height = height.unwrap_or(last_height); - - if desired_height != last_height { - todo!( - "Overwriting values at heights different than the last \ - committed height hast yet to be implemented" - ); - } // NB: the following code only updates values // written to at the last committed height - let val = new_value.as_ref(); // Write the new key-val in the Db column family From 7950a764158da206e42a65a1bfeb7d0320879fc5 Mon Sep 17 00:00:00 2001 From: satan Date: Tue, 27 Aug 2024 10:54:28 +0200 Subject: [PATCH 56/73] Removed optional block height parameter for the insert entry method on rocksdb --- crates/node/src/shell/snapshots.rs | 2 +- crates/node/src/storage/rocksdb.rs | 5 ++--- crates/storage/src/db.rs | 1 - crates/storage/src/mockdb.rs | 1 - 4 files changed, 3 insertions(+), 6 deletions(-) diff --git a/crates/node/src/shell/snapshots.rs b/crates/node/src/shell/snapshots.rs index 6f603ff509..94fa943dfe 100644 --- a/crates/node/src/shell/snapshots.rs +++ b/crates/node/src/shell/snapshots.rs @@ -227,7 +227,7 @@ impl Shell { if self .state .db() - .insert_entry(&mut batch, None, &cf, &key, value) + .insert_entry(&mut batch, &cf, &key, value) .is_err() { return tm_response::ApplySnapshotChunk { diff --git a/crates/node/src/storage/rocksdb.rs b/crates/node/src/storage/rocksdb.rs index bfec9b2bee..dba948ea01 100644 --- a/crates/node/src/storage/rocksdb.rs +++ b/crates/node/src/storage/rocksdb.rs @@ -1750,12 +1750,11 @@ impl DB for RocksDB { fn overwrite_entry( &self, batch: &mut Self::WriteBatch, - height: Option, cf: &DbColFam, key: &Key, new_value: impl AsRef<[u8]>, ) -> Result<()> { - self.insert_entry(batch, height, cf, key, new_value.as_ref())?; + self.insert_entry(batch, cf, key, new_value.as_ref())?; let state_cf = self.get_column_family(STATE_CF)?; let last_height: BlockHeight = self .read_value(state_cf, BLOCK_HEIGHT_KEY)? @@ -1850,7 +1849,7 @@ impl<'db> DBUpdateVisitor for RocksDBUpdateVisitor<'db> { fn write(&mut self, key: &Key, cf: &DbColFam, value: impl AsRef<[u8]>) { self.db - .overwrite_entry(&mut self.batch, None, cf, key, value) + .overwrite_entry(&mut self.batch, cf, key, value) .expect("Failed to overwrite a key in storage") } diff --git a/crates/storage/src/db.rs b/crates/storage/src/db.rs index ce8c36e644..c5f4035099 100644 --- a/crates/storage/src/db.rs +++ b/crates/storage/src/db.rs @@ -285,7 +285,6 @@ pub trait DB: Debug { fn overwrite_entry( &self, batch: &mut Self::WriteBatch, - height: Option, cf: &DbColFam, key: &Key, new_value: impl AsRef<[u8]>, diff --git a/crates/storage/src/mockdb.rs b/crates/storage/src/mockdb.rs index 00d38d662b..a5a4969721 100644 --- a/crates/storage/src/mockdb.rs +++ b/crates/storage/src/mockdb.rs @@ -619,7 +619,6 @@ impl DB for MockDB { fn overwrite_entry( &self, _batch: &mut Self::WriteBatch, - _height: Option, _cf: &DbColFam, _key: &Key, _new_value: impl AsRef<[u8]>, From b3c3680a0740604a53666cac85936f37a83ef6b6 Mon Sep 17 00:00:00 2001 From: Tiago Carvalho Date: Fri, 23 Aug 2024 13:44:44 +0100 Subject: [PATCH 57/73] Add `zstd` and `tar` deps --- Cargo.lock | 24 ++++++++++++++++++++++-- Cargo.toml | 1 + crates/node/Cargo.toml | 2 ++ 3 files changed, 25 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 37cb5f6d84..b024136a0c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5101,6 +5101,7 @@ dependencies = [ "smooth-operator", "sparse-merkle-tree", "sysinfo", + "tar", "tempfile", "test-log", "thiserror", @@ -5112,6 +5113,7 @@ dependencies = [ "tracing", "tracing-subscriber", "warp", + "zstd", ] [[package]] @@ -9936,11 +9938,29 @@ dependencies = [ "syn 2.0.52", ] +[[package]] +name = "zstd" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcf2b778a664581e31e389454a7072dab1647606d44f7feea22cd5abb9c9f3f9" +dependencies = [ + "zstd-safe", +] + +[[package]] +name = "zstd-safe" +version = "7.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "54a3ab4db68cea366acc5c897c7b4d4d1b8994a9cd6e6f841f8964566a419059" +dependencies = [ + "zstd-sys", +] + [[package]] name = "zstd-sys" -version = "2.0.9+zstd.1.5.5" +version = "2.0.13+zstd.1.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e16efa8a874a0481a574084d34cc26fdb3b99627480f785888deb6386506656" +checksum = "38ff0f21cfee8f97d94cef41359e0c89aa6113028ab0291aa8ca0038995a95aa" dependencies = [ "cc", "pkg-config", diff --git a/Cargo.toml b/Cargo.toml index c7b76efee0..e483d554bc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -214,6 +214,7 @@ winapi = "0.3.9" xorf = { version = "0.11.0", features = ["serde"] } yansi = "0.5.1" zeroize = { version = "1.5.5", features = ["zeroize_derive"] } +zstd = "0.13.2" [patch.crates-io] # Patch to the fork containing the correct personalization and basepoints for masp diff --git a/crates/node/Cargo.toml b/crates/node/Cargo.toml index 0074bcbe1a..8c10c79ab1 100644 --- a/crates/node/Cargo.toml +++ b/crates/node/Cargo.toml @@ -91,6 +91,7 @@ serde_json = {workspace = true, features = ["raw_value"]} sha2.workspace = true smooth-operator.workspace = true sysinfo.workspace = true +tar.workspace = true tempfile.workspace = true thiserror.workspace = true tokio = {workspace = true, features = ["full"]} @@ -100,6 +101,7 @@ tower.workspace = true tracing-subscriber = { workspace = true, optional = true, features = ["std", "json", "ansi", "tracing-log"]} tracing.workspace = true warp = "0.3.2" +zstd.workspace = true [dev-dependencies] namada_apps_lib = {path = "../apps_lib", features = ["testing"]} From 8e07a8c2567725d28146628362d4882e321c7825 Mon Sep 17 00:00:00 2001 From: Tiago Carvalho Date: Fri, 23 Aug 2024 14:04:33 +0100 Subject: [PATCH 58/73] Optimize state sync snapshots --- crates/node/src/shell/mod.rs | 49 +- crates/node/src/shell/snapshots.rs | 130 ++--- crates/node/src/shims/abcipp_shim.rs | 21 +- crates/node/src/shims/abcipp_shim_types.rs | 11 +- crates/node/src/storage/mod.rs | 4 +- crates/node/src/storage/rocksdb.rs | 584 ++++++++++++--------- crates/storage/src/db.rs | 7 + crates/storage/src/mockdb.rs | 5 + 8 files changed, 466 insertions(+), 345 deletions(-) diff --git a/crates/node/src/shell/mod.rs b/crates/node/src/shell/mod.rs index 6ab6d292b8..997b6763e4 100644 --- a/crates/node/src/shell/mod.rs +++ b/crates/node/src/shell/mod.rs @@ -348,6 +348,7 @@ pub struct SnapshotSync { pub height: BlockHeight, pub expected: Vec, pub strikes: u64, + pub snapshot: std::fs::File, } #[derive(Debug)] @@ -421,6 +422,48 @@ impl EthereumOracleChannels { } } +impl Shell { + /// Restore the database with data fetched from the State Sync protocol. + pub fn restore_database_from_state_sync(&mut self) { + let Some(syncing) = self.syncing.as_mut() else { + return; + }; + + let db_block_cache_size_bytes = { + let config = crate::config::Config::load( + &self.base_dir, + &self.chain_id, + None, + ); + + config.ledger.shell.block_cache_bytes.unwrap_or_else(|| { + use sysinfo::{RefreshKind, System, SystemExt}; + + let sys = System::new_with_specifics( + RefreshKind::new().with_memory(), + ); + let available_memory_bytes = sys.available_memory(); + + available_memory_bytes / 3 + }) + }; + + let db_cache = rocksdb::Cache::new_lru_cache( + usize::try_from(db_block_cache_size_bytes).expect( + "`db_block_cache_size_bytes` must not exceed `usize::MAX`", + ), + ); + + self.state + .db_mut() + .restore_from((&db_cache, &mut syncing.snapshot)) + .expect("Failed to restore state from snapshot"); + + // rebuild the in-memory state + self.state.load_last_state(); + } +} + impl Shell where D: DB + for<'iter> DBIter<'iter> + Sync + 'static, @@ -774,7 +817,11 @@ where _ => false, }; if take_snapshot { - self.state.db().path().into() + self.state + .db() + .path() + .map(|p| (p, self.state.in_mem().get_last_block_height())) + .into() } else { TakeSnapshot::No } diff --git a/crates/node/src/shell/snapshots.rs b/crates/node/src/shell/snapshots.rs index 94fa943dfe..a7c6efb3de 100644 --- a/crates/node/src/shell/snapshots.rs +++ b/crates/node/src/shell/snapshots.rs @@ -1,3 +1,5 @@ +use std::io::Write; + use borsh::BorshDeserialize; use borsh_ext::BorshSerializeExt; use namada_sdk::arith::checked; @@ -12,7 +14,7 @@ use crate::facade::tendermint::v0_37::abci::{ }; use crate::shell::Shell; use crate::storage; -use crate::storage::{DbSnapshot, SnapshotMetadata}; +use crate::storage::{DbSnapshot, DbSnapshotMeta}; pub const MAX_SENDER_STRIKES: u64 = 5; @@ -26,25 +28,37 @@ impl Shell { Default::default() } else { tracing::info!("Request for snapshots received."); - let Ok(snapshots) = DbSnapshot::files(&self.base_dir) else { + let Ok(snapshot_heights) = + DbSnapshot::heights_of_stored_snapshots(&self.base_dir) + else { + tracing::debug!("Could not read heights of stored snapshots"); return Default::default(); }; - let snapshots = snapshots - .into_iter() - .map(|SnapshotMetadata { height, chunks, .. }| { - let hashes = - chunks.iter().map(|c| c.hash).collect::>(); - let hash = Hash::sha256(hashes.serialize_to_vec()).0; - Snapshot { + let snapshots = DbSnapshot::load_snapshot_metadata( + &self.base_dir, + snapshot_heights, + ); + let Ok(snapshots) = snapshots + .map(|result| { + let DbSnapshotMeta { + height, + chunk_hashes, + root_hash, + } = result?; + std::io::Result::Ok(Snapshot { height: u32::try_from(height.0).unwrap().into(), - format: 0, + format: DbSnapshot::FORMAT_MAGIC, #[allow(clippy::cast_possible_truncation)] - chunks: chunks.len() as u32, - hash: hash.into_iter().collect(), - metadata: hashes.serialize_to_vec().into(), - } + chunks: chunk_hashes.len() as u32, + hash: root_hash.0.to_vec().into(), + metadata: chunk_hashes.serialize_to_vec().into(), + }) }) - .collect(); + .collect() + else { + tracing::debug!("Could not read stored snapshot meta"); + return Default::default(); + }; tm_response::ListSnapshots { snapshots } } @@ -56,19 +70,24 @@ impl Shell { &self, req: tm_request::LoadSnapshotChunk, ) -> tm_response::LoadSnapshotChunk { - let Ok(chunk) = DbSnapshot::load_chunk( + let chunk = match DbSnapshot::load_chunk( BlockHeight(req.height.into()), u64::from(req.chunk), &self.base_dir, - ) else { - tracing::debug!( - "Received a request for a snapshot we do not possess" - ); - // N.B. if the snapshot is no longer present, - // this will not match the hash in the metadata and will - // be rejected by syncing nodes. We don't return an error - // so as not to crash this node. - return Default::default(); + ) { + Ok(chunk) => chunk, + Err(err) => { + tracing::debug!( + ?req, + error = %err, + "Received a request for a snapshot we do not possess" + ); + // N.B. if the snapshot is no longer present, + // this will not match the hash in the metadata and will + // be rejected by syncing nodes. We don't return an error + // so as not to crash this node. + return Default::default(); + } }; tracing::info!( "Loading snapshot at height {}, chunk number {}", @@ -76,7 +95,7 @@ impl Shell { req.chunk, ); tm_response::LoadSnapshotChunk { - chunk: chunk.into_iter().collect(), + chunk: chunk.into(), } } @@ -85,6 +104,13 @@ impl Shell { &mut self, req: tm_request::OfferSnapshot, ) -> tm_response::OfferSnapshot { + if req.snapshot.format != DbSnapshot::FORMAT_MAGIC { + tracing::debug!( + format = req.snapshot.format, + "Received snapshot with an incompatible format" + ); + return tm_response::OfferSnapshot::Reject; + } match self.syncing.as_ref() { None => { if self.state.get_block_height().unwrap_or_default().0 @@ -100,6 +126,8 @@ impl Shell { height: u64::from(req.snapshot.height).into(), expected: chunks, strikes: 0, + snapshot: tempfile::tempfile() + .expect("Failed to create snapshot temp file"), }); tracing::info!("Accepting snapshot offer"); tm_response::OfferSnapshot::Accept @@ -121,6 +149,8 @@ impl Shell { height: u64::from(req.snapshot.height).into(), expected: chunks, strikes: 0, + snapshot: tempfile::tempfile() + .expect("Failed to create snapshot temp file"), }); tracing::info!("Accepting snapshot offer"); tm_response::OfferSnapshot::Accept @@ -139,8 +169,8 @@ impl Shell { ) -> tm_response::ApplySnapshotChunk { let Some(snapshot_sync) = self.syncing.as_mut() else { tracing::warn!("Received a snapshot although none were requested"); - // if we are not currently syncing, abort this sync protocol - // the syncing status is set by `OfferSnapshot`. + // if we are not currently syncing, abort this sync protocol the + // syncing status is set by `OfferSnapshot`. return tm_response::ApplySnapshotChunk { result: ApplySnapshotChunkResult::Abort, refetch_chunks: vec![], @@ -215,45 +245,23 @@ impl Shell { } else { snapshot_sync.strikes = 0; }; - // when we first start applying a snapshot, - // clear the existing db. - if req.index == 0 { - self.state.db_mut().clear(snapshot_sync.height).unwrap(); - } - // apply snapshot changes to the database - // retry if an error occurs - let mut batch = Default::default(); - for (cf, key, value) in DbSnapshot::parse_chunk(&req.chunk) { - if self - .state - .db() - .insert_entry(&mut batch, &cf, &key, value) - .is_err() - { - return tm_response::ApplySnapshotChunk { - result: ApplySnapshotChunkResult::Retry, - refetch_chunks: vec![], - reject_senders: vec![], - }; - } - } - if self.state.db().exec_batch(batch).is_err() { - return tm_response::ApplySnapshotChunk { - result: ApplySnapshotChunkResult::Retry, - refetch_chunks: vec![], - reject_senders: vec![], - }; - } + + // write snapshot chunk + snapshot_sync + .snapshot + .write_all(&req.chunk) + .expect("Failed to save snapshot chunk"); // increment the chunk counter snapshot_sync.next_chunk = checked!(snapshot_sync.next_chunk + 1).unwrap(); - // check if all chunks have been applied + + // check if all chunks have been saved, and restore the + // database from the fetched tar archive if snapshot_sync.next_chunk == snapshot_sync.expected.len() as u64 { - tracing::info!("Snapshot completely applied"); + self.restore_database_from_state_sync(); self.syncing = None; - // rebuild the in-memory state - self.state.load_last_state(); + tracing::info!("Snapshot completely applied"); } tm_response::ApplySnapshotChunk { diff --git a/crates/node/src/shims/abcipp_shim.rs b/crates/node/src/shims/abcipp_shim.rs index 64c51c6d1f..6344ac10e8 100644 --- a/crates/node/src/shims/abcipp_shim.rs +++ b/crates/node/src/shims/abcipp_shim.rs @@ -4,10 +4,11 @@ use std::pin::Pin; use std::task::{Context, Poll}; use futures::future::FutureExt; +use namada_apps_lib::state::DbError; use namada_sdk::chain::BlockHeight; use namada_sdk::hash::Hash; use namada_sdk::migrations::ScheduledMigration; -use namada_sdk::state::{ProcessProposalCachedResult, DB}; +use namada_sdk::state::ProcessProposalCachedResult; use namada_sdk::tendermint::abci::response::ProcessProposal; use namada_sdk::time::{DateTimeUtc, Utc}; use namada_sdk::tx::data::hash_tx; @@ -28,7 +29,6 @@ use crate::facade::tendermint::v0_37::abci::{ }; use crate::facade::tower_abci::BoxError; use crate::shell::{EthereumOracleChannels, Shell}; -use crate::storage::DbSnapshot; /// The shim wraps the shell, which implements ABCI++. /// The shim makes a crude translation between the ABCI interface currently used @@ -221,28 +221,21 @@ impl AbcippShim { _ => {} } - let TakeSnapshot::Yes(db_path) = take_snapshot else { + let TakeSnapshot::Yes(db_path, height) = take_snapshot else { return; }; let base_dir = self.service.base_dir.clone(); let (snap_send, snap_recv) = tokio::sync::oneshot::channel(); - let snapshots_to_keep = self.snapshots_to_keep; + // let snapshots_to_keep = self.snapshots_to_keep; let snapshot_task = std::thread::spawn(move || { let db = crate::storage::open(db_path, true, None) .expect("Could not open DB"); - let snapshot = db.snapshot(); + let snapshot = db.checkpoint(base_dir, height).unwrap(); // signal to main thread that the snapshot has finished snap_send.send(()).unwrap(); - - let last_height = db - .read_last_block() - .expect("Could not read database") - .expect("Last block should exists") - .height; - let cfs = db.column_families(); - snapshot.write_to_file(cfs, base_dir.clone(), last_height)?; - DbSnapshot::cleanup(last_height, &base_dir, snapshots_to_keep) + snapshot.package().unwrap(); + Ok(()) }); // it's important that the thread is diff --git a/crates/node/src/shims/abcipp_shim_types.rs b/crates/node/src/shims/abcipp_shim_types.rs index e7cde20221..32d53e20aa 100644 --- a/crates/node/src/shims/abcipp_shim_types.rs +++ b/crates/node/src/shims/abcipp_shim_types.rs @@ -4,6 +4,7 @@ pub mod shim { use std::fmt::Debug; use std::path::PathBuf; + use namada_sdk::state::BlockHeight; use thiserror::Error; use super::{Request as Req, Response as Resp}; @@ -36,14 +37,16 @@ pub mod shim { /// at a certain point in time pub enum TakeSnapshot { No, - Yes(PathBuf), + Yes(PathBuf, BlockHeight), } - impl> From> for TakeSnapshot { - fn from(value: Option) -> Self { + impl> From> + for TakeSnapshot + { + fn from(value: Option<(T, BlockHeight)>) -> Self { match value { None => TakeSnapshot::No, - Some(p) => TakeSnapshot::Yes(p.as_ref().to_path_buf()), + Some(p) => TakeSnapshot::Yes(p.0.as_ref().to_path_buf(), p.1), } } } diff --git a/crates/node/src/storage/mod.rs b/crates/node/src/storage/mod.rs index 308af5aa3e..95b30088d5 100644 --- a/crates/node/src/storage/mod.rs +++ b/crates/node/src/storage/mod.rs @@ -10,9 +10,7 @@ use arse_merkle_tree::traits::Hasher; use arse_merkle_tree::H256; use blake2b_rs::{Blake2b, Blake2bBuilder}; use namada_sdk::state::{FullAccessState, StorageHasher}; -pub use rocksdb::{ - open, Chunk, DbSnapshot, RocksDBUpdateVisitor, SnapshotMetadata, -}; +pub use rocksdb::{open, DbSnapshot, DbSnapshotMeta, RocksDBUpdateVisitor}; #[derive(Default)] pub struct PersistentStorageHasher(Blake2bHasher); diff --git a/crates/node/src/storage/rocksdb.rs b/crates/node/src/storage/rocksdb.rs index dba948ea01..3503d427fe 100644 --- a/crates/node/src/storage/rocksdb.rs +++ b/crates/node/src/storage/rocksdb.rs @@ -46,7 +46,8 @@ use std::ffi::OsStr; use std::fs::File; -use std::io::{BufRead, BufReader, BufWriter, ErrorKind, Write}; +use std::io::{BufRead, BufReader, BufWriter, ErrorKind, Read, Seek, Write}; +use std::mem::ManuallyDrop; use std::path::{Path, PathBuf}; use std::str::FromStr; use std::sync::Mutex; @@ -114,15 +115,19 @@ const ADDRESS_GEN_KEY_SEGMENT: &str = "address_gen"; const OLD_DIFF_PREFIX: &str = "old"; const NEW_DIFF_PREFIX: &str = "new"; -const MAX_CHUNK_SIZE: usize = 10_000_000; + +// 10 MB +const MAX_STATE_SYNC_CHUNK_SIZE: usize = 10_000_000; /// RocksDB handle #[derive(Debug)] pub struct RocksDB { /// Handle to the db - inner: rocksdb::DB, + inner: ManuallyDrop, /// Indicates if read only read_only: bool, + /// Whether the handle is invalid + invalid_handle: bool, } /// DB Handle for batch writes. @@ -230,16 +235,22 @@ pub fn open( )); Ok(if read_only { RocksDB { - inner: rocksdb::DB::open_cf_descriptors_read_only( - &db_opts, path, cfs, false, - ) - .map_err(|e| Error::DBError(e.into_string()))?, + inner: ManuallyDrop::new( + rocksdb::DB::open_cf_descriptors_read_only( + &db_opts, path, cfs, false, + ) + .map_err(|e| Error::DBError(e.into_string()))?, + ), + invalid_handle: false, read_only: true, } } else { RocksDB { - inner: rocksdb::DB::open_cf_descriptors(&db_opts, path, cfs) - .map_err(|e| Error::DBError(e.into_string()))?, + inner: ManuallyDrop::new( + rocksdb::DB::open_cf_descriptors(&db_opts, path, cfs) + .map_err(|e| Error::DBError(e.into_string()))?, + ), + invalid_handle: false, read_only: false, } }) @@ -247,9 +258,13 @@ pub fn open( impl Drop for RocksDB { fn drop(&mut self) { + if self.invalid_handle { + return; + } if !self.read_only { self.flush(true).expect("flush failed"); } + unsafe { ManuallyDrop::drop(&mut self.inner) } } } @@ -500,8 +515,27 @@ impl RocksDB { buf.flush().expect("Unable to write to output file"); } - pub fn snapshot(&self) -> DbSnapshot<'_> { - DbSnapshot(self.inner.snapshot()) + /// Create a checkpoint of the state in RocksDB at block height + /// `block_height`. + pub fn checkpoint( + &self, + base_dir: PathBuf, + block_height: BlockHeight, + ) -> Result { + let checkpoint = rocksdb::checkpoint::Checkpoint::new(&self.inner) + .map_err(|e| Error::DBError(e.to_string()))?; + let snapshot_path = SnapshotPath(base_dir, block_height); + std::fs::create_dir_all(snapshot_path.base()).or_else(|e| { + if e.kind() == std::io::ErrorKind::AlreadyExists { + Ok(()) + } else { + Err(Error::DBError(e.to_string())) + } + })?; + checkpoint + .create_checkpoint(snapshot_path.temp_rocksdb()) + .map_err(|e| Error::DBError(e.to_string()))?; + Ok(DbSnapshot(snapshot_path)) } /// Rollback to previous block. Given the inner working of tendermint @@ -790,56 +824,187 @@ impl RocksDB { } } -/// Information about a particular snapshot -/// owned by a node -pub struct SnapshotMetadata { - /// The height at which the snapshot was taken +/// The path to a snapshot. +#[derive(Clone, Debug)] +pub struct SnapshotPath(PathBuf, BlockHeight); + +impl SnapshotPath { + /// Return the root path where snapshots are stored. + pub fn snapshot_root_path(mut base_dir: PathBuf) -> PathBuf { + base_dir.push("snapshots"); + base_dir + } + + /// Remove all data pertaining to the current snapshot. + pub fn remove(&self) -> std::io::Result<()> { + std::fs::remove_dir_all(self.base()) + } + + /// Return the base path associated with this [`SnapshotPath`]. + pub fn base(&self) -> PathBuf { + let mut buf = Self::snapshot_root_path(self.0.clone()); + let height = self.1.0; + buf.push(format!("block-{height:016}")); + buf + } + + /// Return the chunk hashes path associated with this [`SnapshotPath`]. + pub fn chunk_hashes(&self) -> PathBuf { + let mut buf = self.base(); + buf.push("chunks-hashed"); + buf + } + + /// Return the root of the chunk hashes tree path associated with this + /// [`SnapshotPath`]. + pub fn chunks_root_hash(&self) -> PathBuf { + let mut buf = self.base(); + buf.push("chunks-root-hash"); + buf + } + + /// Return the temporary rocksdb path associated with this [`SnapshotPath`]. + pub fn temp_rocksdb(&self) -> PathBuf { + let mut buf = self.base(); + buf.push("db"); + buf + } + + /// Return the temporary tarball path associated with this [`SnapshotPath`]. + /// + /// The value of `compression_extension` should reflect the compression + /// algorithm used (e.g. `gz` for Gzip). + pub fn temp_tarball(&self, compression_extension: &str) -> PathBuf { + let mut buf = self.base(); + buf.push(format!("db.tar.{compression_extension}")); + buf + } + + /// Return the path of the chunk `chk` associated with this + /// [`SnapshotPath`]. + pub fn chunk_with_id(&self, chk: usize) -> PathBuf { + let mut buf = self.base(); + buf.push(format!("chunk-{chk:032}")); + buf + } +} + +/// Metadata pertaining to some database snapshot. +#[derive(Debug)] +pub struct DbSnapshotMeta { + /// The height of the snapshot. pub height: BlockHeight, - /// The name of the paths to the file and metadata - /// holding the snapshot minus extensions - pub path_stem: String, - /// Data about the chunks that the snapshot is - /// partitioned into - pub chunks: Vec, + /// List of the hashes of all chunks. + pub chunk_hashes: Vec, + /// Hash of all the chunk hashes, forming a shallow tree. + pub root_hash: Hash, } -pub struct DbSnapshot<'a>(pub rocksdb::Snapshot<'a>); +pub struct DbSnapshot(pub SnapshotPath); -impl<'a> DbSnapshot<'a> { - /// Write a snapshot of the database out to file. Also - /// creates a file containing metadata about how to break - /// the file into chunks. - pub fn write_to_file( - &self, - cfs: [(&'static str, &'a ColumnFamily); 6], - base_dir: PathBuf, - height: BlockHeight, +impl DbSnapshot { + /// The magic number referring to the format of the snapshot. + pub const FORMAT_MAGIC: u32 = 0; + + /// Package and chunk the contents of the db snapshot. + // NB: passing an owned `self` guarantees we don't attempt to call + // this method again, which removes the temporary checkpoint dir + // created by rocksdb + pub fn package(self) -> std::io::Result<()> { + self.build_tarball()?; + self.chunk_snapshot()?; + Ok(()) + } + + pub fn unpack( + archive_file: &mut std::fs::File, + dest: impl AsRef, ) -> std::io::Result<()> { - let [snap_file, metadata_file] = Self::paths(height, base_dir); - let file = File::create(snap_file)?; - let mut buf = BufWriter::new(file); - let mut chunker = Chunker::new(MAX_CHUNK_SIZE); - for (cf_name, cf) in cfs { - let read_opts = make_iter_read_opts(None); - let iter = - self.0.iterator_cf_opt(cf, read_opts, IteratorMode::Start); + use zstd::stream::read::Decoder; - for (key, raw_val, _gas) in PersistentPrefixIterator( - PrefixIterator::new(iter, String::default()), - // Empty string to prevent prefix stripping, the prefix is - // already in the enclosed iterator - ) { - let val = base64::encode(raw_val); - let bytes = format!("{cf_name}:{key}={val}\n"); - chunker.add_line(&bytes); - buf.write_all(bytes.as_bytes())?; + let file_buf_reader = std::io::BufReader::new(archive_file); + let zstd_decoder = Decoder::new(file_buf_reader)?; + + let mut archive = tar::Archive::new(zstd_decoder); + archive.unpack(dest)?; + + Ok(()) + } + + fn build_tarball(&self) -> std::io::Result<()> { + use zstd::stream::write::Encoder; + + let snapshot_temp_db_path = self.0.temp_rocksdb(); + + let mut tar_builder = { + let file_handle = File::create(self.0.temp_tarball("zst"))?; + let zstd_encoder = Encoder::new(file_handle, 0)?.auto_finish(); + tar::Builder::new(zstd_encoder) + }; + + // build tarball with rocksdb checkpoint contents + tar_builder.append_dir_all("db", &snapshot_temp_db_path)?; + tar_builder.finish()?; + _ = tar_builder; + + // remove aux checkpoint dir + std::fs::remove_dir_all(&snapshot_temp_db_path) + } + + fn chunk_snapshot(&self) -> std::io::Result<()> { + let tarball_path = self.0.temp_tarball("zst"); + + let mut buf = vec![0; MAX_STATE_SYNC_CHUNK_SIZE]; + let mut file = File::open(&tarball_path)?; + + let mut eof = false; + let mut chunk_hashes = vec![]; + + // TODO: we can use tokio here to read chunks + // in parallel + // + // 1. determine tar archive size + // 2. spawn len / MAX_STATE_SYNC_CHUNK_SIZE tasks + // 3. spawn one more task if necessary to read chunk smaller than + // MAX_STATE_SYNC_CHUNK_SIZE + // 4. assemble read data (need to store hash of the chunk) + + for chunk_id in 0.. { + let mut read = 0; + + // read up to `MAX_STATE_SYNC_CHUNK_SIZE` bytes + while read != MAX_STATE_SYNC_CHUNK_SIZE { + match file.read(&mut buf[read..]) { + Ok(0) => { + eof = true; + break; + } + Ok(n) => checked!(read += n).unwrap(), + Err(e) if e.kind() == std::io::ErrorKind::Interrupted => {} + Err(e) => return Err(e), + } + } + + let chunk = &buf[..read]; + + std::fs::write(self.0.chunk_with_id(chunk_id), chunk)?; + chunk_hashes.push(Hash::sha256(chunk)); + + if eof { + break; } - buf.flush()?; } - buf.flush()?; - let chunks = chunker.finalize(); - let metadata = base64::encode(chunks.serialize_to_vec()); - std::fs::write(metadata_file, metadata.as_bytes())?; + + let chunk_hashes = chunk_hashes.serialize_to_vec(); + let hash_of_all_chunks = Hash::sha256(&chunk_hashes); + let snapshot_hash = Hash::sha256( + (Self::FORMAT_MAGIC, hash_of_all_chunks).serialize_to_vec(), + ); + + std::fs::remove_file(tarball_path)?; + std::fs::write(self.0.chunk_hashes(), chunk_hashes)?; + std::fs::write(self.0.chunks_root_hash(), snapshot_hash)?; + Ok(()) } @@ -850,92 +1015,78 @@ impl<'a> DbSnapshot<'a> { base_dir: &Path, number_to_keep: u64, ) -> std::io::Result<()> { - for SnapshotMetadata { - height, path_stem, .. - } in Self::files(base_dir)? - { + let latest_height = latest_height.0; + for height in Self::heights_of_stored_snapshots(base_dir)? { // this is correct... don't worry about it if checked!(height + number_to_keep <= latest_height).unwrap() { - let path = PathBuf::from(path_stem); - _ = std::fs::remove_file(&path.with_extension("snap")); - _ = std::fs::remove_file(path.with_extension("meta")); + let snap = SnapshotPath(base_dir.into(), BlockHeight(height)); + snap.remove()?; } } Ok(()) } - /// List all snapshot files along with the block height at which - /// they were created and their chunks. - pub fn files(base_dir: &Path) -> std::io::Result> { - let snap = OsStr::new("snap"); - let meta = OsStr::new("meta"); - let mut files = - HashMap::, Option>)>::new(); - for entry in std::fs::read_dir(base_dir)? { + /// Load the metadata of the given snapshot heights. + pub fn load_snapshot_metadata( + base_dir: &Path, + snapshot_heights: impl IntoIterator, + ) -> impl Iterator> { + let mut iter = snapshot_heights.into_iter(); + let base_dir = base_dir.to_owned(); + + std::iter::from_fn(move || { + let height = BlockHeight(iter.next()?); + + let load = || { + let snap = SnapshotPath(base_dir.clone(), height); + + let chunk_hashes = BorshDeserialize::try_from_slice( + &std::fs::read(snap.chunk_hashes())?, + )?; + let root_hash = BorshDeserialize::try_from_slice( + &std::fs::read(snap.chunks_root_hash())?, + )?; + + Ok(DbSnapshotMeta { + height, + chunk_hashes, + root_hash, + }) + }; + + Some(load()) + }) + } + + /// List all block heights whose state snapshots exist. + pub fn heights_of_stored_snapshots( + base_dir: &Path, + ) -> std::io::Result> { + let snapshot_root = SnapshotPath::snapshot_root_path(base_dir.into()); + let mut heights = vec![]; + + for entry in std::fs::read_dir(snapshot_root)? { let entry = entry?; let entry_path = entry.path(); - let entry_ext = entry_path.extension(); - if entry_path.is_file() - && (Some(snap) == entry_ext || Some(meta) == entry_ext) - { - if let Some(name) = entry.path().file_name() { - // Extract the block height from the file name - // (assuming the file name is of the correct format) - let Some(height) = name - .to_string_lossy() - .strip_prefix("snapshot_") - .and_then(|n| { - n.strip_suffix(".meta").or(n.strip_suffix(".snap")) - }) - .and_then(|h| BlockHeight::from_str(h).ok()) - else { - continue; - }; - // check if we have found the metadata file or snapshot file - // for a given block height - if entry_ext == Some(meta) { - let metadata = std::fs::read_to_string(entry_path)?; - let metadata_bytes = base64::decode( - metadata.as_bytes(), - ) - .map_err(|e| { - std::io::Error::new(ErrorKind::InvalidData, e) - })?; - let chunks: Vec = - BorshDeserialize::try_from_slice( - &metadata_bytes[..], - )?; - files.entry(height).or_default().1 = Some(chunks); - } else { - files.entry(height).or_default().0 = Some( - base_dir - .join(format!("snapshot_{}", height)) - .to_string_lossy() - .into(), - ); - } + + if entry_path.is_dir() { + let Some(file_name) = + entry_path.file_name().and_then(|f| f.to_str()) + else { + continue; }; + let Some(("block", height_str)) = file_name.split_once('-') + else { + continue; + }; + let Some(height): Option = height_str.parse().ok() else { + continue; + }; + heights.push(height); } } - let mut res = Vec::with_capacity(files.len()); - for (height, (path, chunks)) in files { - // only include snapshots which have both a .snap and .meta file. - if let Some((path_stem, chunks)) = path.zip(chunks) { - res.push(SnapshotMetadata { - height, - path_stem, - chunks, - }); - } - } - Ok(res) - } - /// Create a path to save a snapshot at a specific block height. - pub fn paths(height: BlockHeight, base_dir: PathBuf) -> [PathBuf; 2] { - let snap_file = base_dir.join(format!("snapshot_{}.snap", height)); - let metadata_file = base_dir.join(format!("snapshot_{}.meta", height)); - [snap_file, metadata_file] + Ok(heights) } /// Load the specified chunk of a snapshot at the given block height @@ -944,149 +1095,23 @@ impl<'a> DbSnapshot<'a> { chunk: u64, base_dir: &Path, ) -> std::io::Result> { - let files = Self::files(base_dir)?; - let Some(metadata) = files.into_iter().find(|m| m.height == height) - else { - return Err(std::io::Error::new( - ErrorKind::NotFound, - format!( - "Could not find the metadata file for the snapshot at \ - height {}", - height, - ), - )); - }; - let chunk_start = if chunk == 0 { - 0usize - } else { - let prev = checked!(usize::try_from(chunk).unwrap() - 1).unwrap(); - usize::try_from(metadata.chunks[prev].boundary).unwrap() - }; - let chunk_end = metadata - .chunks - .get(usize::try_from(chunk).unwrap()) - .ok_or_else(|| { - std::io::Error::new( - ErrorKind::InvalidInput, - format!("Chunk {} not found", chunk), - ) - })? - .boundary; - let chunk_end = usize::try_from(chunk_end).unwrap(); - - let file = File::open( - PathBuf::from(metadata.path_stem).with_extension("snap"), - )?; - let reader = BufReader::new(file); - let mut bytes: Vec = vec![]; - for line in reader - .lines() - .skip(chunk_start) - .take(checked!(chunk_end - chunk_start).unwrap()) - { - bytes.extend(line?.as_bytes()); - bytes.push(b'\n'); - } - Ok(bytes) - } - - pub fn parse_chunk(chunk: &[u8]) -> ChunkIterator<'_> { - let reader = std::io::BufReader::new(chunk); - ChunkIterator { - lines: reader.lines(), - } - } -} - -pub struct ChunkIterator<'a> { - lines: std::io::Lines>, -} - -impl<'a> Iterator for ChunkIterator<'a> { - type Item = (DbColFam, Key, Vec); - - fn next(&mut self) -> Option { - let line = self.lines.next()?.ok()?; - let line = line.trim(); - let mut iter = line.split(':'); - let cf = iter.next()?; - let rest = iter.next()?; - let mut iter = rest.split('='); - let key = iter.next()?; - let value = iter.next()?; - let cf = DbColFam::from_str(cf).ok()?; - let key = Key::parse(key).ok()?; - let value = base64::decode(value.as_bytes()).ok()?; - Some((cf, key, value)) - } -} - -/// A chunk of a snapshot. Includes the last line number in the file -/// for this chunk and a hash of the chunk contents. -#[derive( - Debug, Clone, Default, PartialEq, Eq, BorshSerialize, BorshDeserialize, Hash, -)] -pub struct Chunk { - /// The line number ending the chunk - pub boundary: u64, - /// Sha256 hash of the chunk - pub hash: Hash, -} - -/// Builds a set of chunks from a stream of lines to be -/// written to a file. -#[derive(Debug, Clone)] -struct Chunker { - chunks: Vec, - max_size: usize, - current_boundary: u64, - current_size: usize, - hasher: Sha256, -} -impl Chunker { - fn new(max_size: usize) -> Self { - Self { - chunks: vec![], - max_size, - current_boundary: 0, - current_size: 0, - hasher: Sha256::default(), - } + let snap = SnapshotPath(base_dir.into(), height); + #[allow(clippy::cast_possible_truncation)] + std::fs::read(snap.chunk_with_id(chunk as _)) } - fn add_line(&mut self, line: &str) { - if checked!(self.current_size + line.as_bytes().len()).unwrap() - > self.max_size - && self.current_boundary != 0 - { - let mut hasher = Sha256::default(); - std::mem::swap(&mut hasher, &mut self.hasher); - let hash: [u8; 32] = hasher.finalize().into(); - self.chunks.push(Chunk { - boundary: self.current_boundary, - hash: Hash(hash), - }); - self.current_size = 0; - } - - checked!(self.current_size += line.as_bytes().len()).unwrap(); - self.hasher.update(line.as_bytes()); - checked!(self.current_boundary += 1).unwrap(); - } - - fn finalize(mut self) -> Vec { - let hash: [u8; 32] = self.hasher.finalize().into(); - self.chunks.push(Chunk { - boundary: self.current_boundary, - hash: Hash(hash), - }); - self.chunks - } + // pub fn parse_chunk(chunk: &[u8]) -> ChunkIterator<'_> { + // let reader = std::io::BufReader::new(chunk); + // ChunkIterator { + // lines: reader.lines(), + // } + //} } impl DB for RocksDB { type Cache = rocksdb::Cache; type Migrator = DbUpdateType; + type RestoreSource<'a> = (&'a rocksdb::Cache, &'a mut std::fs::File); type WriteBatch = RocksDBWriteBatch; fn open( @@ -1096,6 +1121,41 @@ impl DB for RocksDB { open(db_path, false, cache).expect("cannot open the DB") } + fn restore_from( + &mut self, + (cache, snapshot): Self::RestoreSource<'_>, + ) -> Result<()> { + snapshot.rewind().map_err(|e| { + Error::DBError(format!("Failed to rewind snapshot file: {e}",)) + })?; + + let db_dir = self.inner.path().to_owned(); + + let unpack_dir = db_dir.parent().ok_or_else(|| { + Error::DBError(format!( + "Failed to query parent directory of db: {}", + db_dir.to_string_lossy() + )) + })?; + + // NB: close the current database handle. + // DON'T TRY THIS AT HOME KIDS. we are + // trained monkeys. + unsafe { + self.invalid_handle = true; + ManuallyDrop::drop(&mut self.inner); + } + + std::fs::remove_dir_all(&db_dir) + .expect("Failed to nuke database directory"); + DbSnapshot::unpack(snapshot, unpack_dir) + .expect("Failed to unpack new db"); + + *self = Self::open(db_dir, Some(cache)); + + Ok(()) + } + fn path(&self) -> Option<&Path> { Some(self.inner.path()) } diff --git a/crates/storage/src/db.rs b/crates/storage/src/db.rs index c5f4035099..869252aee7 100644 --- a/crates/storage/src/db.rs +++ b/crates/storage/src/db.rs @@ -126,12 +126,19 @@ pub trait DB: Debug { /// change to DB. type Migrator: DbMigration + DeserializeOwned; + /// Source data to restore a database. + type RestoreSource<'a>; + /// Open the database from provided path fn open( db_path: impl AsRef, cache: Option<&Self::Cache>, ) -> Self; + /// Overwrite the contents of the current database + /// with the data read from `source`. + fn restore_from(&mut self, source: Self::RestoreSource<'_>) -> Result<()>; + /// Get the path to the db in the filesystem, /// if it exists (the DB may be in-memory only) fn path(&self) -> Option<&std::path::Path> { diff --git a/crates/storage/src/mockdb.rs b/crates/storage/src/mockdb.rs index a5a4969721..21859f5b7b 100644 --- a/crates/storage/src/mockdb.rs +++ b/crates/storage/src/mockdb.rs @@ -91,12 +91,17 @@ impl DB for MockDB { /// There is no cache for MockDB type Cache = (); type Migrator = (); + type RestoreSource<'a> = (); type WriteBatch = MockDBWriteBatch; fn open(_db_path: impl AsRef, _cache: Option<&Self::Cache>) -> Self { Self::default() } + fn restore_from(&mut self, _source: ()) -> Result<()> { + Ok(()) + } + fn flush(&self, _wait: bool) -> Result<()> { Ok(()) } From 362b503d3c1d7df578fc2ff7a3b004fbfbe8fa37 Mon Sep 17 00:00:00 2001 From: satan Date: Mon, 26 Aug 2024 14:53:06 +0200 Subject: [PATCH 59/73] Fixed snapshot integration tests --- crates/node/src/shell/snapshots.rs | 2 +- crates/node/src/shims/abcipp_shim.rs | 15 ++++++++++----- crates/node/src/storage/rocksdb.rs | 7 +++---- crates/tests/src/integration/ledger_tests.rs | 9 ++++----- 4 files changed, 18 insertions(+), 15 deletions(-) diff --git a/crates/node/src/shell/snapshots.rs b/crates/node/src/shell/snapshots.rs index a7c6efb3de..921165a495 100644 --- a/crates/node/src/shell/snapshots.rs +++ b/crates/node/src/shell/snapshots.rs @@ -4,7 +4,7 @@ use borsh::BorshDeserialize; use borsh_ext::BorshSerializeExt; use namada_sdk::arith::checked; use namada_sdk::hash::{Hash, Sha256Hasher}; -use namada_sdk::state::{BlockHeight, StorageRead, DB}; +use namada_sdk::state::{BlockHeight, StorageRead}; use super::SnapshotSync; use crate::facade::tendermint::abci::response::ApplySnapshotChunkResult; diff --git a/crates/node/src/shims/abcipp_shim.rs b/crates/node/src/shims/abcipp_shim.rs index 6344ac10e8..6b37723b6a 100644 --- a/crates/node/src/shims/abcipp_shim.rs +++ b/crates/node/src/shims/abcipp_shim.rs @@ -29,6 +29,7 @@ use crate::facade::tendermint::v0_37::abci::{ }; use crate::facade::tower_abci::BoxError; use crate::shell::{EthereumOracleChannels, Shell}; +use crate::storage::DbSnapshot; /// The shim wraps the shell, which implements ABCI++. /// The shim makes a crude translation between the ABCI interface currently used @@ -42,7 +43,7 @@ pub struct AbcippShim { Req, tokio::sync::oneshot::Sender>, )>, - snapshot_task: Option>>, + snapshot_task: Option>>, snapshots_to_keep: u64, } @@ -227,15 +228,19 @@ impl AbcippShim { let base_dir = self.service.base_dir.clone(); let (snap_send, snap_recv) = tokio::sync::oneshot::channel(); - // let snapshots_to_keep = self.snapshots_to_keep; + + let snapshots_to_keep = self.snapshots_to_keep; let snapshot_task = std::thread::spawn(move || { let db = crate::storage::open(db_path, true, None) .expect("Could not open DB"); - let snapshot = db.checkpoint(base_dir, height).unwrap(); + let snapshot = db.checkpoint(base_dir.clone(), height)?; // signal to main thread that the snapshot has finished snap_send.send(()).unwrap(); - snapshot.package().unwrap(); - Ok(()) + DbSnapshot::cleanup(height, &base_dir, snapshots_to_keep) + .map_err(|e| DbError::DBError(e.to_string()))?; + snapshot + .package() + .map_err(|e| DbError::DBError(e.to_string())) }); // it's important that the thread is diff --git a/crates/node/src/storage/rocksdb.rs b/crates/node/src/storage/rocksdb.rs index 3503d427fe..406c0f3d82 100644 --- a/crates/node/src/storage/rocksdb.rs +++ b/crates/node/src/storage/rocksdb.rs @@ -44,9 +44,8 @@ //! - `current/{hash}`: a hash included in the current block //! - `{hash}`: a hash included in previous blocks -use std::ffi::OsStr; use std::fs::File; -use std::io::{BufRead, BufReader, BufWriter, ErrorKind, Read, Seek, Write}; +use std::io::{BufWriter, Read, Seek, Write}; use std::mem::ManuallyDrop; use std::path::{Path, PathBuf}; use std::str::FromStr; @@ -58,7 +57,7 @@ use data_encoding::HEXLOWER; use itertools::Either; use namada_replay_protection as replay_protection; use namada_sdk::arith::checked; -use namada_sdk::collections::{HashMap, HashSet}; +use namada_sdk::collections::HashSet; use namada_sdk::eth_bridge::storage::bridge_pool; use namada_sdk::eth_bridge::storage::proof::BridgePoolRootProof; use namada_sdk::hash::Hash; @@ -83,7 +82,6 @@ use rocksdb::{ DBCompressionType, Direction, FlushOptions, IteratorMode, Options, ReadOptions, WriteBatch, }; -use sha2::{Digest, Sha256}; use crate::config::utils::num_of_threads; use crate::storage; @@ -1016,6 +1014,7 @@ impl DbSnapshot { number_to_keep: u64, ) -> std::io::Result<()> { let latest_height = latest_height.0; + for height in Self::heights_of_stored_snapshots(base_dir)? { // this is correct... don't worry about it if checked!(height + number_to_keep <= latest_height).unwrap() { diff --git a/crates/tests/src/integration/ledger_tests.rs b/crates/tests/src/integration/ledger_tests.rs index 5962929601..b5ca2372a6 100644 --- a/crates/tests/src/integration/ledger_tests.rs +++ b/crates/tests/src/integration/ledger_tests.rs @@ -1806,13 +1806,11 @@ fn apply_snapshot() -> Result<()> { let base_dir = node.test_dir.path(); let db = namada_node::storage::open(node.db_path(), true, None) .expect("Could not open DB"); - let snapshot = db.snapshot(); - let last_height = node.block_height(); - let cfs = db.column_families(); - snapshot - .write_to_file(cfs, base_dir.to_path_buf(), last_height) + let snapshot = db + .checkpoint(base_dir.to_path_buf(), last_height) .expect("Test failed"); + snapshot.package().expect("Test failed"); DbSnapshot::cleanup(last_height, base_dir, 1).expect("Test failed"); let (node2, _services) = setup::setup()?; @@ -1912,6 +1910,7 @@ fn snapshot_unhappy_flows() -> Result<()> { height: Default::default(), expected: vec![Default::default()], strikes: 0, + snapshot: tempfile::tempfile().unwrap(), }); } From 6a70c2e066998af4e7be75635f6cdde53b7d5753 Mon Sep 17 00:00:00 2001 From: satan Date: Mon, 26 Aug 2024 16:20:38 +0200 Subject: [PATCH 60/73] Added unit tests to snapshot logic --- crates/node/src/storage/rocksdb.rs | 468 +++++------------------------ 1 file changed, 70 insertions(+), 398 deletions(-) diff --git a/crates/node/src/storage/rocksdb.rs b/crates/node/src/storage/rocksdb.rs index 406c0f3d82..c9ed3a840d 100644 --- a/crates/node/src/storage/rocksdb.rs +++ b/crates/node/src/storage/rocksdb.rs @@ -793,33 +793,6 @@ impl RocksDB { ); Ok(()) } - - /// Erase the entire db. Use with caution. - pub fn clear(&mut self, height: BlockHeight) -> Result<()> { - let state_cf = self.get_column_family(STATE_CF)?; - for (_, cf) in self.column_families() { - let read_opts = make_iter_read_opts(None); - let iter = - self.inner - .iterator_cf_opt(cf, read_opts, IteratorMode::Start); - - for (key, _, _) in PersistentPrefixIterator( - PrefixIterator::new(iter, String::default()), - // Empty string to prevent prefix stripping, the prefix is - // already in the enclosed iterator - ) { - self.inner - .delete_cf(cf, key.as_bytes()) - .map_err(|e| Error::DBError(e.to_string()))?; - } - } - let height = height.serialize_to_vec(); - self.inner - .put_cf(state_cf, BLOCK_HEIGHT_KEY, height) - .expect("Could not write to DB"); - - Ok(()) - } } /// The path to a snapshot. @@ -910,7 +883,7 @@ impl DbSnapshot { // created by rocksdb pub fn package(self) -> std::io::Result<()> { self.build_tarball()?; - self.chunk_snapshot()?; + self.chunk_snapshot(MAX_STATE_SYNC_CHUNK_SIZE)?; Ok(()) } @@ -949,10 +922,10 @@ impl DbSnapshot { std::fs::remove_dir_all(&snapshot_temp_db_path) } - fn chunk_snapshot(&self) -> std::io::Result<()> { + fn chunk_snapshot(&self, max_chunk: usize) -> std::io::Result<()> { let tarball_path = self.0.temp_tarball("zst"); - let mut buf = vec![0; MAX_STATE_SYNC_CHUNK_SIZE]; + let mut buf = vec![0; max_chunk]; let mut file = File::open(&tarball_path)?; let mut eof = false; @@ -971,7 +944,7 @@ impl DbSnapshot { let mut read = 0; // read up to `MAX_STATE_SYNC_CHUNK_SIZE` bytes - while read != MAX_STATE_SYNC_CHUNK_SIZE { + while read != max_chunk { match file.read(&mut buf[read..]) { Ok(0) => { eof = true; @@ -1098,13 +1071,6 @@ impl DbSnapshot { #[allow(clippy::cast_possible_truncation)] std::fs::read(snap.chunk_with_id(chunk as _)) } - - // pub fn parse_chunk(chunk: &[u8]) -> ChunkIterator<'_> { - // let reader = std::io::BufReader::new(chunk); - // ChunkIterator { - // lines: reader.lines(), - // } - //} } impl DB for RocksDB { @@ -2254,16 +2220,14 @@ mod imp { #[allow(clippy::arithmetic_side_effects)] #[cfg(test)] mod test { + use namada_apps_lib::collections::HashMap; use namada_sdk::address::EstablishedAddressGen; - use namada_sdk::collections::HashMap; - use namada_sdk::hash::Hash; use namada_sdk::state::{MerkleTree, Sha256Hasher}; use namada_sdk::storage::conversion_state::ConversionState; use namada_sdk::storage::types::CommitOnlyData; use namada_sdk::storage::{BlockResults, Epochs, EthEventsQueue}; use namada_sdk::time::DateTimeUtc; use tempfile::tempdir; - use test_log::test; use super::*; @@ -2671,7 +2635,7 @@ mod test { // Write second block let mut batch = RocksDB::batch(); - let height_1 = height_0 + 10; + let height_1 = height_0 + 10u64; db.batch_write_subspace_val( &mut batch, height_1, @@ -2816,192 +2780,48 @@ mod test { db.add_block_to_batch(block, batch, true) } - /// Test the clear function deletes all keys from the db - /// and that we can still write keys to it afterward. + /// Test that we chunk a file into + /// pieces respecting the max chunk size. #[test] - fn test_clear_db() { + fn test_chunking() { let temp = tempfile::tempdir().expect("Test failed"); - let mut db = open(&temp, false, None).expect("Test failed"); - let state_cf = db.get_column_family(STATE_CF).expect("Test failed"); - db.inner - .put_cf( - state_cf, - BLOCK_HEIGHT_KEY, - BlockHeight(1).serialize_to_vec(), - ) - .expect("Test failed"); - db.write_subspace_val( - 1.into(), - &Key::parse("bing/fucking/bong").expect("Test failed"), - [1u8; 1], - false, - ) - .expect("Test failed"); - db.write_subspace_val( - 1.into(), - &Key::parse("ding/fucking/dong").expect("Test failed"), - [1u8; 1], - false, - ) - .expect("Test failed"); - let mut db_entries = HashMap::new(); - for (_, cf) in db.column_families() { - let read_opts = make_iter_read_opts(None); - let iter = - db.inner.iterator_cf_opt(cf, read_opts, IteratorMode::Start); - - for (key, raw_val, _gas) in PersistentPrefixIterator( - PrefixIterator::new(iter, String::default()), - // Empty string to prevent prefix stripping, the prefix is - // already in the enclosed iterator - ) { - db_entries.insert(key, raw_val); - } - } - let mut expected = HashMap::from([ - ("height".to_string(), vec![1, 0, 0, 0, 0, 0, 0, 0]), - ("bing/fucking/bong".to_string(), vec![1u8]), - ("ding/fucking/dong".to_string(), vec![1u8]), - ("0000000000002/new/bing/fucking/bong".to_string(), vec![1u8]), - ("0000000000002/new/ding/fucking/dong".to_string(), vec![1u8]), - ]); - assert_eq!(db_entries, expected); - db.clear(2.into()).expect("Test failed"); - let mut db_entries = HashMap::new(); - for (_, cf) in db.column_families() { - let read_opts = make_iter_read_opts(None); - let iter = - db.inner.iterator_cf_opt(cf, read_opts, IteratorMode::Start); + let base_dir = temp.path().to_path_buf(); - for (key, raw_val, _gas) in PersistentPrefixIterator( - PrefixIterator::new(iter, String::default()), - // Empty string to prevent prefix stripping, the prefix is - // already in the enclosed iterator - ) { - db_entries.insert(key, raw_val); - } - } - let empty = HashMap::from([( - "height".to_string(), - vec![2u8, 0, 0, 0, 0, 0, 0, 0], - )]); - assert_eq!(db_entries, empty,); - db.write_subspace_val( - 1.into(), - &Key::parse("bing/fucking/bong").expect("Test failed"), - [1u8; 1], - false, - ) - .expect("Test failed"); - db.write_subspace_val( - 1.into(), - &Key::parse("ding/fucking/dong").expect("Test failed"), - [1u8; 1], - false, - ) - .expect("Test failed"); - let mut db_entries = HashMap::new(); - for (_, cf) in db.column_families() { - let read_opts = make_iter_read_opts(None); - let iter = - db.inner.iterator_cf_opt(cf, read_opts, IteratorMode::Start); + let snap_path = SnapshotPath(base_dir, BlockHeight::first()); + let snapshot_base = snap_path.base().clone(); + std::fs::create_dir_all(&snapshot_base).expect("Test failed"); + let snapshot = DbSnapshot(snap_path); - for (key, raw_val, _gas) in PersistentPrefixIterator( - PrefixIterator::new(iter, String::default()), - // Empty string to prevent prefix stripping, the prefix is - // already in the enclosed iterator - ) { - db_entries.insert(key, raw_val); + let tar = snapshot.0.temp_tarball("zst"); + std::fs::write(tar, vec![16; 21]).expect("Test failed"); + snapshot.chunk_snapshot(10).unwrap(); + let mut file_number = 0; + for entry in std::fs::read_dir(snapshot_base).expect("Test failed") { + let entry = entry.expect("Test failed"); + file_number += 1; + if entry.path().is_file() { + let name = entry.file_name(); + let name = name.to_string_lossy(); + let Some(("chunk", chunk_nbr)) = name.split_once('-') else { + continue; + }; + let chunk_nbr = u64::from_str(chunk_nbr).expect("Test failed"); + match chunk_nbr { + 0 | 1 => assert_eq!( + entry.metadata().expect("Test failed").len(), + 10 + ), + 2 => assert_eq!( + entry.metadata().expect("Test failed").len(), + 1 + ), + _ => panic!("Snapshot too chunky"), + } + } else { + panic!("Found unexpected dir in snapshots") } } - expected.insert("height".to_string(), vec![2, 0, 0, 0, 0, 0, 0, 0]); - - assert_eq!(db_entries, expected); - } - - /// Test that we chunk a series of lines - /// up correctly based on a max chunk size. - #[test] - fn test_chunker() { - let mut chunker = Chunker::new(10); - let lines = vec![ - "fffffggggghh", - "aaaa", - "bbbbb", - "fffffggggghh", - "cc", - "dddddddd", - "eeeeeeeeee", - "ab", - ]; - for l in lines { - chunker.add_line(l); - } - let chunks = chunker.finalize(); - let expected = vec![ - Chunk { - boundary: 1, - hash: Hash::sha256("fffffggggghh"), - }, - Chunk { - boundary: 3, - hash: Hash::sha256("aaaabbbbb".as_bytes()), - }, - Chunk { - boundary: 4, - hash: Hash::sha256("fffffggggghh"), - }, - Chunk { - boundary: 6, - hash: Hash::sha256("ccdddddddd".as_bytes()), - }, - Chunk { - boundary: 7, - hash: Hash::sha256("eeeeeeeeee".as_bytes()), - }, - Chunk { - boundary: 8, - hash: Hash::sha256("ab".as_bytes()), - }, - ]; - assert_eq!(expected, chunks); - let mut chunker = Chunker::new(10); - let lines = vec![ - "aaaa", - "bbbbb", - "fffffggggghh", - "cc", - "dddddddd", - "eeeeeeeeee", - "ab", - ]; - for l in lines { - chunker.add_line(l); - } - let chunks = chunker.finalize(); - let expected = vec![ - Chunk { - boundary: 2, - hash: Hash::sha256("aaaabbbbb".as_bytes()), - }, - Chunk { - boundary: 3, - hash: Hash::sha256("fffffggggghh"), - }, - Chunk { - boundary: 5, - hash: Hash::sha256("ccdddddddd".as_bytes()), - }, - Chunk { - boundary: 6, - hash: Hash::sha256("eeeeeeeeee".as_bytes()), - }, - Chunk { - boundary: 7, - hash: Hash::sha256("ab".as_bytes()), - }, - ]; - assert_eq!(expected, chunks); + assert_eq!(file_number, 5) } /// Test that we correctly delete snapshots @@ -3010,43 +2830,29 @@ mod test { fn test_snapshot_cleanup() { let temp = tempfile::tempdir().expect("Test failed"); let base_dir = temp.path().to_path_buf(); - let chunks = vec![Chunk::default()]; - let chunk_bytes = base64::encode(chunks.serialize_to_vec()); - for i in 0..4 { - let mut path = base_dir.clone(); - path.push(format!("snapshot_{}.snap", i)); - _ = File::create(path).expect("Test failed"); - let mut path = base_dir.clone(); - path.push(format!("snapshot_{}.meta", i)); - std::fs::write(&path, chunk_bytes.as_bytes()).expect("Test failed"); + for height in 1..4 { + let snap_path = SnapshotPath(base_dir.clone(), height.into()); + let snapshot_base = snap_path.base().clone(); + std::fs::create_dir_all(&snapshot_base).expect("Test failed"); } - let mut path = base_dir.clone(); - path.push("snapshot_0_backup.snap"); - _ = File::create(path).expect("Test failed"); - let mut path = base_dir.clone(); - path.push("snapshot_0_backup.meta"); - _ = File::create(path).expect("Test failed"); - let mut path = base_dir.clone(); - path.push("snapshot_0.bak"); - _ = File::create(path).expect("Test failed"); - DbSnapshot::cleanup(2.into(), &base_dir, 1).expect("Test failed"); - let mut expected = HashSet::from([ - "snapshot_2.snap", - "snapshot_2.meta", - "snapshot_3.snap", - "snapshot_3.meta", - "snapshot_0_backup.snap", - "snapshot_0_backup.meta", - "snapshot_0.bak", - ]); - for entry in std::fs::read_dir(base_dir).expect("Test failed") { - let entry = entry.expect("Test failed"); - assert!(entry.path().is_file()); - let path = entry.path(); - let path = path.file_name().expect("Test failed"); - assert!(expected.swap_remove(path.to_str().unwrap())); - } - assert!(expected.is_empty()); + + std::fs::write(base_dir.join("big.chungus"), "howdy".as_bytes()) + .expect("Test failed"); + DbSnapshot::cleanup(3.into(), &base_dir, 2).unwrap(); + // Big Chungus lives!!! + assert_eq!( + std::fs::read_to_string(base_dir.join("big.chungus")) + .expect("Test failed"), + "howdy" + ); + + let heights = { + let mut h = DbSnapshot::heights_of_stored_snapshots(&base_dir) + .expect("Test failed"); + h.sort(); + h + }; + assert_eq!(heights, vec![2, 3]); } /// Test that taking a snapshot actually @@ -3068,7 +2874,11 @@ mod test { drop(db); let db = open(&temp, true, None).expect("Test failed"); // freeze the database at this point in time - let snapshot = db.snapshot(); + let snapshot = db + .checkpoint(temp.path().to_path_buf(), BlockHeight::first()) + .expect("Test failed"); + let db = + open(snapshot.0.temp_rocksdb(), true, None).expect("Test failed"); // write a new entry to the db let mut db2 = open(&temp, false, None).expect("Test failed"); @@ -3088,9 +2898,7 @@ mod test { for (_, cf) in db.column_families() { let read_opts = make_iter_read_opts(None); let iter = - snapshot - .0 - .iterator_cf_opt(cf, read_opts, IteratorMode::Start); + db.inner.iterator_cf_opt(cf, read_opts, IteratorMode::Start); for (key, raw_val, _gas) in PersistentPrefixIterator( PrefixIterator::new(iter, String::default()), @@ -3137,140 +2945,4 @@ mod test { ]); assert_eq!(expected_db, db_entries); } - - /// Test that [`DbSnapshot`] writes a snapshot - /// to disk correctly. - #[test] - fn test_db_snapshot() { - let temp = tempfile::tempdir().expect("Test failed"); - let mut db = open(&temp, false, None).expect("Test failed"); - db.write_subspace_val( - 1.into(), - &Key::parse("bing/fucking/bong").expect("Test failed"), - [1u8; 1], - false, - ) - .expect("Test failed"); - // we need to persist the changes and restart in read-only mode - // as rocksdb doesn't allow multiple read/write instances - drop(db); - let db = open(&temp, true, None).expect("Test failed"); - // freeze the database at this point in time - let snapshot = db.snapshot(); - let path = temp.path().to_path_buf(); - - snapshot - .write_to_file(db.column_families(), path.clone(), 0.into()) - .expect("Test failed"); - let snapshot = - std::fs::read_to_string(path.clone().join("snapshot_0.snap")) - .expect("Test failed"); - let chunks = vec![Chunk { - boundary: 2, - hash: Hash::sha256( - "subspace:bing/fucking/bong=AQ==\nrollback:0000000000002/new/\ - bing/fucking/bong=AQ==\n" - .as_bytes(), - ), - }]; - let chunk_val = base64::encode(chunks.serialize_to_vec()); - let expected = [ - "subspace:bing/fucking/bong=AQ==".to_string(), - "rollback:0000000000002/new/bing/fucking/bong=AQ==".to_string(), - "".to_string(), - ]; - - let lines: Vec<&str> = snapshot.split('\n').collect(); - assert_eq!(lines, expected); - let metadata = std::fs::read_to_string(path.join("snapshot_0.meta")) - .expect("Test failed"); - assert_eq!(metadata, chunk_val); - } - - /// Test that we load chunks correctly - /// from the snapshot file - #[test] - fn test_load_chunks() { - let temp = tempfile::tempdir().expect("Test failed"); - let mut chunker = Chunker::new(10); - let lines = vec!["fffffggggghh", "aaaa", "bbbbb", "cc", "dddddddd"]; - for l in lines { - chunker.add_line(l); - } - let chunks = chunker.finalize(); - let expected = vec![ - Chunk { - boundary: 1, - hash: Hash::sha256("fffffggggghh"), - }, - Chunk { - boundary: 3, - hash: Hash::sha256("aaaabbbbb".as_bytes()), - }, - Chunk { - boundary: 5, - hash: Hash::sha256("ccdddddddd".as_bytes()), - }, - ]; - assert_eq!(chunks, expected); - let [snap_file, meta_file] = - DbSnapshot::paths(1.into(), temp.path().to_path_buf()); - std::fs::write( - &snap_file, - "fffffggggghh\naaaa\nbbbbb\ncc\ndddddddd\n".as_bytes(), - ) - .expect("Test failed"); - std::fs::write(meta_file, base64::encode(chunks.serialize_to_vec())) - .expect("Test failed"); - let chunks: Vec<_> = (0..3) - .filter_map(|i| { - DbSnapshot::load_chunk(1.into(), i, temp.path()).ok() - }) - .collect(); - let expected = vec![ - "fffffggggghh\n".as_bytes().to_vec(), - "aaaa\nbbbbb\n".as_bytes().to_vec(), - "cc\ndddddddd\n".as_bytes().to_vec(), - ]; - assert_eq!(chunks, expected); - - assert!(DbSnapshot::load_chunk(0.into(), 0, temp.path()).is_err()); - assert!(DbSnapshot::load_chunk(0.into(), 4, temp.path()).is_err()); - std::fs::remove_file(snap_file).unwrap(); - assert!(DbSnapshot::load_chunk(0.into(), 0, temp.path()).is_err()); - } - - #[test] - fn test_chunk_iterator() { - let chunk = "state:bing/fucking/bong=AQ==\nsubspace:I/AM/BATMAN=Ag==\n"; - let iterator = DbSnapshot::parse_chunk(chunk.as_bytes()); - let expected = vec![ - ( - DbColFam::STATE, - Key::parse("bing/fucking/bong").expect("Test failed"), - vec![1u8], - ), - ( - DbColFam::SUBSPACE, - Key::parse("I/AM/BATMAN").expect("Test failed"), - vec![2u8], - ), - ]; - let parsed: Vec<_> = iterator.collect(); - assert_eq!(parsed, expected); - let bad_chunks = [ - "bloop:bing/fucking/bong=AQ==\n", - "bing/fucking/bong=AQ==\n", - "state:bing/fucking/bong:AQ==\n", - "state=bing/fucking/bong=AQ==\n", - "state:bing/fucking/bong\n", - "state:#bing/fucking/bong=AQ==\n", - "state:bing/fucking/bong=0Z\n", - ]; - for chunk in bad_chunks { - let iterator = DbSnapshot::parse_chunk(chunk.as_bytes()); - let parsed: Vec<_> = iterator.collect(); - assert!(parsed.is_empty()); - } - } } From 20cd06efa1f32e23a4da88fd98e72662aafd53ab Mon Sep 17 00:00:00 2001 From: Tiago Carvalho Date: Tue, 27 Aug 2024 10:09:09 +0100 Subject: [PATCH 61/73] Invalidate `restore_from` method on `MockDB` --- crates/storage/src/mockdb.rs | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/crates/storage/src/mockdb.rs b/crates/storage/src/mockdb.rs index 21859f5b7b..1693206422 100644 --- a/crates/storage/src/mockdb.rs +++ b/crates/storage/src/mockdb.rs @@ -87,19 +87,25 @@ impl MockDB { } } +/// Source to restore a [`MockDB`] from. +/// +/// Since this enum has no variants, you can't +/// actually restore a [`MockDB`] instance. +pub enum MockDBRestoreSource {} + impl DB for MockDB { /// There is no cache for MockDB type Cache = (); type Migrator = (); - type RestoreSource<'a> = (); + type RestoreSource<'a> = MockDBRestoreSource; type WriteBatch = MockDBWriteBatch; fn open(_db_path: impl AsRef, _cache: Option<&Self::Cache>) -> Self { Self::default() } - fn restore_from(&mut self, _source: ()) -> Result<()> { - Ok(()) + fn restore_from(&mut self, source: MockDBRestoreSource) -> Result<()> { + match source {} } fn flush(&self, _wait: bool) -> Result<()> { From 5dd52ed8581799b1613da29100db1a956dd3f984 Mon Sep 17 00:00:00 2001 From: satan Date: Tue, 27 Aug 2024 12:45:12 +0200 Subject: [PATCH 62/73] Added unit test for restoring db from snapshot in shell --- crates/node/src/shell/mod.rs | 82 ++++++++++++++++++++++++++++++ crates/node/src/storage/mod.rs | 2 + crates/node/src/storage/rocksdb.rs | 5 +- 3 files changed, 87 insertions(+), 2 deletions(-) diff --git a/crates/node/src/shell/mod.rs b/crates/node/src/shell/mod.rs index 997b6763e4..2aaa271b51 100644 --- a/crates/node/src/shell/mod.rs +++ b/crates/node/src/shell/mod.rs @@ -2048,7 +2048,10 @@ pub mod test_utils { #[cfg(test)] mod shell_tests { + use std::fs::File; + use eth_bridge::storage::eth_bridge_queries::is_bridge_comptime_enabled; + use namada_apps_lib::state::StorageWrite; use namada_sdk::address; use namada_sdk::chain::Epoch; use namada_sdk::token::read_denom; @@ -2058,10 +2061,13 @@ mod shell_tests { use namada_vote_ext::{ bridge_pool_roots, ethereum_events, ethereum_tx_data_variants, }; + use tempfile::tempdir; use {namada_replay_protection as replay_protection, wallet}; use super::*; + use crate::shell::test_utils::top_level_directory; use crate::shell::token::DenominatedAmount; + use crate::storage::{DbSnapshot, PersistentDB, SnapshotPath}; const GAS_LIMIT_MULTIPLIER: u64 = 100_000; @@ -2917,4 +2923,80 @@ mod shell_tests { ); assert_eq!(result.code, ResultCode::TooLarge.into()); } + + /// Test the that the shell can restore it's state + /// from a snapshot if it is not syncing + #[test] + fn test_restore_database_from_snapshot() { + let (sender, _receiver) = tokio::sync::mpsc::unbounded_channel(); + + let base_dir = tempdir().unwrap().as_ref().canonicalize().unwrap(); + let vp_wasm_compilation_cache = 50 * 1024 * 1024; // 50 kiB + let tx_wasm_compilation_cache = 50 * 1024 * 1024; // 50 kiB + let config = config::Ledger::new( + base_dir.clone(), + Default::default(), + TendermintMode::Validator, + ); + let mut shell = Shell::::new( + config.clone(), + top_level_directory().join("wasm"), + sender, + None, + None, + None, + vp_wasm_compilation_cache, + tx_wasm_compilation_cache, + ); + shell.state.in_mem_mut().block.height = BlockHeight::first(); + + shell.state.commit_block().expect("Test failed"); + shell.state.db_mut().flush(true).expect("Test failed"); + let original_root = shell.state.in_mem().merkle_root(); + let snapshot = make_snapshot(config.db_dir(), base_dir); + shell + .state + .write( + &Key::parse("bing/fucking/bong").expect("Test failed"), + [1u8; 64], + ) + .expect("Test failed"); + shell.state.commit_block().expect("Test failed"); + let new_root = shell.state.in_mem().merkle_root(); + assert_ne!(new_root, original_root); + + shell.restore_database_from_state_sync(); + assert_eq!(shell.state.in_mem().merkle_root(), new_root,); + shell.syncing = Some(SnapshotSync { + next_chunk: 0, + height: BlockHeight::first(), + expected: vec![], + strikes: 0, + snapshot, + }); + shell.restore_database_from_state_sync(); + assert_eq!(shell.state.in_mem().merkle_root(), original_root,); + } + + /// Helper function for the `test_restore_database_from_snapshot` test + fn make_snapshot(db_dir: PathBuf, base_dir: PathBuf) -> File { + let snapshot = + DbSnapshot(SnapshotPath(base_dir.clone(), BlockHeight::first())); + std::fs::create_dir_all(base_dir.join("snapshots")) + .expect("Test failed"); + std::fs::create_dir_all(snapshot.0.base()).expect("Test failed"); + std::fs::create_dir_all(snapshot.0.temp_rocksdb()) + .expect("Test failed"); + for entry in std::fs::read_dir(db_dir).expect("Test failed") { + let entry = entry.expect("Test failed"); + let dest_file = snapshot + .0 + .base() + .join("db") + .join(entry.file_name().to_string_lossy().to_string()); + std::fs::copy(entry.path(), dest_file).expect("Test failed"); + } + snapshot.clone().build_tarball().expect("Test failed"); + File::open(snapshot.0.temp_tarball("zst")).expect("Test failed") + } } diff --git a/crates/node/src/storage/mod.rs b/crates/node/src/storage/mod.rs index 95b30088d5..94e0e08aee 100644 --- a/crates/node/src/storage/mod.rs +++ b/crates/node/src/storage/mod.rs @@ -10,6 +10,8 @@ use arse_merkle_tree::traits::Hasher; use arse_merkle_tree::H256; use blake2b_rs::{Blake2b, Blake2bBuilder}; use namada_sdk::state::{FullAccessState, StorageHasher}; +#[cfg(test)] +pub use rocksdb::SnapshotPath; pub use rocksdb::{open, DbSnapshot, DbSnapshotMeta, RocksDBUpdateVisitor}; #[derive(Default)] diff --git a/crates/node/src/storage/rocksdb.rs b/crates/node/src/storage/rocksdb.rs index c9ed3a840d..05d24050f5 100644 --- a/crates/node/src/storage/rocksdb.rs +++ b/crates/node/src/storage/rocksdb.rs @@ -797,7 +797,7 @@ impl RocksDB { /// The path to a snapshot. #[derive(Clone, Debug)] -pub struct SnapshotPath(PathBuf, BlockHeight); +pub struct SnapshotPath(pub PathBuf, pub BlockHeight); impl SnapshotPath { /// Return the root path where snapshots are stored. @@ -871,6 +871,7 @@ pub struct DbSnapshotMeta { pub root_hash: Hash, } +#[derive(Clone)] pub struct DbSnapshot(pub SnapshotPath); impl DbSnapshot { @@ -902,7 +903,7 @@ impl DbSnapshot { Ok(()) } - fn build_tarball(&self) -> std::io::Result<()> { + pub(crate) fn build_tarball(&self) -> std::io::Result<()> { use zstd::stream::write::Encoder; let snapshot_temp_db_path = self.0.temp_rocksdb(); From a2e067fa04c097b4b43d3cd63d236b8daab0f82a Mon Sep 17 00:00:00 2001 From: Tiago Carvalho Date: Mon, 26 Aug 2024 15:39:00 +0100 Subject: [PATCH 63/73] Changelog --- .../unreleased/improvements/3701-snapshot-improvements.md | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 .changelog/unreleased/improvements/3701-snapshot-improvements.md diff --git a/.changelog/unreleased/improvements/3701-snapshot-improvements.md b/.changelog/unreleased/improvements/3701-snapshot-improvements.md new file mode 100644 index 0000000000..f2e8c1c4b5 --- /dev/null +++ b/.changelog/unreleased/improvements/3701-snapshot-improvements.md @@ -0,0 +1,4 @@ +- Optimize the format of snapshots taken for state syncing purposes. + Snapshots are taken over the entire RocksDB database, packaged into + a `zstd` compressed `tar` archive, and split into 10 MB chunks. + ([\#3701](https://github.com/anoma/namada/pull/3701)) \ No newline at end of file From 5ec928993a7122a3dc3bdfa454767f9a7b36f3ee Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Fri, 23 Aug 2024 13:03:07 +0100 Subject: [PATCH 64/73] refactor tendermint to only re-export v0.37 --- crates/apps/src/bin/namada-client/main.rs | 2 +- crates/apps_lib/src/cli.rs | 4 +-- crates/apps_lib/src/cli/api.rs | 4 +-- crates/apps_lib/src/client/rpc.rs | 2 +- crates/apps_lib/src/client/tx.rs | 2 +- crates/apps_lib/src/client/utils.rs | 2 +- crates/apps_lib/src/config/genesis/chain.rs | 8 +++--- crates/apps_lib/src/config/mod.rs | 6 ++-- crates/apps_lib/src/config/utils.rs | 2 +- crates/apps_lib/src/lib.rs | 8 ++---- crates/apps_lib/src/tendermint_node.rs | 4 +-- crates/core/src/lib.rs | 20 +++++++++++-- crates/events/src/extend.rs | 4 +-- crates/events/src/lib.rs | 6 ++-- crates/ibc/src/event.rs | 2 +- crates/node/src/bench_utils.rs | 7 ++--- crates/node/src/broadcaster.rs | 4 +-- crates/node/src/lib.rs | 17 ++++++----- crates/node/src/shell/block_alloc.rs | 4 +-- crates/node/src/shell/finalize_block.rs | 15 ++++------ crates/node/src/shell/init_chain.rs | 28 ++++++++----------- crates/node/src/shell/mod.rs | 16 +++++------ crates/node/src/shell/prepare_proposal.rs | 17 ++++++----- crates/node/src/shell/process_proposal.rs | 2 +- crates/node/src/shell/snapshots.rs | 9 +++--- crates/node/src/shell/testing/client.rs | 2 +- crates/node/src/shell/testing/node.rs | 15 +++++----- .../shell/vote_extensions/bridge_pool_vext.rs | 19 +++++++------ .../src/shell/vote_extensions/eth_events.rs | 19 +++++++------ .../shell/vote_extensions/val_set_update.rs | 19 +++++++------ crates/node/src/shims/abcipp_shim.rs | 8 ++---- crates/node/src/shims/abcipp_shim_types.rs | 20 ++++++------- crates/node/src/tendermint_node.rs | 8 ++---- crates/node/src/utils.rs | 2 +- crates/sdk/src/queries/types.rs | 2 +- crates/tests/src/e2e/ibc_tests.rs | 6 ++-- crates/tests/src/e2e/ledger_tests.rs | 2 +- crates/tests/src/integration/ledger_tests.rs | 6 ++-- crates/tests/src/integration/setup.rs | 18 ++++++------ fuzz/fuzz_targets/txs_prepare_proposal.rs | 2 +- 40 files changed, 171 insertions(+), 172 deletions(-) diff --git a/crates/apps/src/bin/namada-client/main.rs b/crates/apps/src/bin/namada-client/main.rs index 666f7d39b5..32fd2d717b 100644 --- a/crates/apps/src/bin/namada-client/main.rs +++ b/crates/apps/src/bin/namada-client/main.rs @@ -1,6 +1,6 @@ use color_eyre::eyre::Result; use namada_apps_lib::cli::api::{CliApi, CliIo}; -use namada_apps_lib::facade::tendermint_rpc::HttpClient; +use namada_apps_lib::tendermint_rpc::HttpClient; use namada_apps_lib::{cli, logging}; use tracing_subscriber::filter::LevelFilter; diff --git a/crates/apps_lib/src/cli.rs b/crates/apps_lib/src/cli.rs index 51eacc4cc3..c1b4c68534 100644 --- a/crates/apps_lib/src/cli.rs +++ b/crates/apps_lib/src/cli.rs @@ -3190,8 +3190,8 @@ pub mod args { use crate::client::utils::PRE_GENESIS_DIR; use crate::config::genesis::AddrOrPk; use crate::config::{self, Action, ActionAtHeight}; - use crate::facade::tendermint::Timeout; - use crate::facade::tendermint_rpc::Url; + use crate::tendermint::Timeout; + use crate::tendermint_rpc::Url; use crate::wrap; pub const ADDRESS: Arg = arg("address"); diff --git a/crates/apps_lib/src/cli/api.rs b/crates/apps_lib/src/cli/api.rs index a955af6644..efe19e0a62 100644 --- a/crates/apps_lib/src/cli/api.rs +++ b/crates/apps_lib/src/cli/api.rs @@ -3,8 +3,8 @@ use namada_sdk::io::Io; use namada_sdk::queries::Client; use namada_sdk::rpc::wait_until_node_is_synched; -use crate::facade::tendermint_rpc::client::CompatMode; -use crate::facade::tendermint_rpc::{HttpClient, Url as TendermintUrl}; +use crate::tendermint_rpc::client::CompatMode; +use crate::tendermint_rpc::{HttpClient, Url as TendermintUrl}; /// Trait for clients that can be used with the CLI. #[async_trait::async_trait(?Send)] diff --git a/crates/apps_lib/src/client/rpc.rs b/crates/apps_lib/src/client/rpc.rs index 9bb20e99c8..ec48adc9ba 100644 --- a/crates/apps_lib/src/client/rpc.rs +++ b/crates/apps_lib/src/client/rpc.rs @@ -49,7 +49,7 @@ use namada_sdk::{ }; use crate::cli::{self, args}; -use crate::facade::tendermint::merkle::proof::ProofOps; +use crate::tendermint::merkle::proof::ProofOps; /// Query the status of a given transaction. /// diff --git a/crates/apps_lib/src/client/tx.rs b/crates/apps_lib/src/client/tx.rs index 46e13e71f0..211f347942 100644 --- a/crates/apps_lib/src/client/tx.rs +++ b/crates/apps_lib/src/client/tx.rs @@ -28,8 +28,8 @@ use crate::cli::{args, safe_exit}; use crate::client::tx::signing::{default_sign, SigningTxData}; use crate::client::tx::tx::ProcessTxResponse; use crate::config::TendermintMode; -use crate::facade::tendermint_rpc::endpoint::broadcast::tx_sync::Response; use crate::tendermint_node; +use crate::tendermint_rpc::endpoint::broadcast::tx_sync::Response; use crate::wallet::{ gen_validator_keys, read_and_confirm_encryption_password, WalletTransport, }; diff --git a/crates/apps_lib/src/client/utils.rs b/crates/apps_lib/src/client/utils.rs index a43533afcf..53826e30f0 100644 --- a/crates/apps_lib/src/client/utils.rs +++ b/crates/apps_lib/src/client/utils.rs @@ -33,7 +33,7 @@ use crate::config::genesis::transactions::{ use crate::config::genesis::{AddrOrPk, GenesisAddress}; use crate::config::global::GlobalConfig; use crate::config::{self, genesis, get_default_namada_folder, TendermintMode}; -use crate::facade::tendermint::node::Id as TendermintNodeId; +use crate::tendermint::node::Id as TendermintNodeId; use crate::wallet::{pre_genesis, CliWalletUtils}; use crate::{tendermint_node, wasm_loader}; diff --git a/crates/apps_lib/src/config/genesis/chain.rs b/crates/apps_lib/src/config/genesis/chain.rs index df50cb07b5..ac4ad408c4 100644 --- a/crates/apps_lib/src/config/genesis/chain.rs +++ b/crates/apps_lib/src/config/genesis/chain.rs @@ -29,8 +29,8 @@ use super::{templates, transactions}; use crate::config::genesis::templates::Validated; use crate::config::utils::{set_ip, set_port}; use crate::config::{Config, TendermintMode}; -use crate::facade::tendermint::node::Id as TendermintNodeId; -use crate::facade::tendermint_config::net::Address as TendermintAddress; +use crate::tendermint::node::Id as TendermintNodeId; +use crate::tendermint_config::net::Address as TendermintAddress; use crate::tendermint_node::id_from_pk; use crate::wallet::{Alias, CliWalletUtils}; use crate::wasm_loader; @@ -497,7 +497,7 @@ pub fn finalize( templates: templates::All, chain_id_prefix: ChainIdPrefix, genesis_time: DateTimeUtc, - consensus_timeout_commit: crate::facade::tendermint::Timeout, + consensus_timeout_commit: crate::tendermint::Timeout, ) -> Finalized { let genesis_time: Rfc3339String = genesis_time.into(); let consensus_timeout_commit: DurationNanos = @@ -883,7 +883,7 @@ mod test { let genesis_time = DateTimeUtc::from_str(GENESIS_TIME).unwrap(); let consensus_timeout_commit = - crate::facade::tendermint::Timeout::from_str("1s").unwrap(); + crate::tendermint::Timeout::from_str("1s").unwrap(); let finalized_0 = finalize( templates.clone(), diff --git a/crates/apps_lib/src/config/mod.rs b/crates/apps_lib/src/config/mod.rs index 9f73b6c48a..b60b0f494f 100644 --- a/crates/apps_lib/src/config/mod.rs +++ b/crates/apps_lib/src/config/mod.rs @@ -18,9 +18,7 @@ use serde::{Deserialize, Serialize}; use thiserror::Error; use crate::cli; -use crate::facade::tendermint_config::{ - TendermintConfig, TxIndexConfig, TxIndexer, -}; +use crate::tendermint_config::{TendermintConfig, TxIndexConfig, TxIndexer}; /// Base directory contains global config and chain directories. pub const DEFAULT_BASE_DIR: &str = ".namada"; @@ -857,7 +855,7 @@ namespace = "cometbft" #[cfg(test)] mod tests { use super::DEFAULT_COMETBFT_CONFIG; - use crate::facade::tendermint_config::TendermintConfig; + use crate::tendermint_config::TendermintConfig; #[test] fn test_default_cometbft_config() { diff --git a/crates/apps_lib/src/config/utils.rs b/crates/apps_lib/src/config/utils.rs index 41658a000c..dbfebf3b3f 100644 --- a/crates/apps_lib/src/config/utils.rs +++ b/crates/apps_lib/src/config/utils.rs @@ -7,7 +7,7 @@ use std::{cmp, env}; use itertools::Either; use crate::cli; -use crate::facade::tendermint_config::net::Address as TendermintAddress; +use crate::tendermint_config::net::Address as TendermintAddress; /// Find how many threads to use from an environment variable if it's set and /// valid (>= 1). If the environment variable is invalid, exits the process with diff --git a/crates/apps_lib/src/lib.rs b/crates/apps_lib/src/lib.rs index 12a7f7991e..3ae8f0a36f 100644 --- a/crates/apps_lib/src/lib.rs +++ b/crates/apps_lib/src/lib.rs @@ -22,10 +22,6 @@ pub mod tendermint_node; pub mod wallet; pub mod wasm_loader; +pub use namada_core::{tendermint, tendermint_proto}; pub use namada_sdk::*; - -pub mod facade { - // TODO(namada#3248): only re-export v037 `tendermint-rs` - pub use namada_core::{tendermint, tendermint_proto}; - pub use {tendermint_config, tendermint_rpc}; -} +pub use {tendermint_config, tendermint_rpc}; diff --git a/crates/apps_lib/src/tendermint_node.rs b/crates/apps_lib/src/tendermint_node.rs index 1fc100e027..3c2197915a 100644 --- a/crates/apps_lib/src/tendermint_node.rs +++ b/crates/apps_lib/src/tendermint_node.rs @@ -6,8 +6,8 @@ use serde_json::json; use sha2::{Digest, Sha256}; use thiserror::Error; -use crate::facade::tendermint::node::Id as TendermintNodeId; -use crate::facade::tendermint_config::Error as TendermintError; +use crate::tendermint::node::Id as TendermintNodeId; +use crate::tendermint_config::Error as TendermintError; /// Env. var to output Tendermint log to stdout pub const ENV_VAR_TM_STDOUT: &str = "NAMADA_CMT_STDOUT"; diff --git a/crates/core/src/lib.rs b/crates/core/src/lib.rs index 377d30737d..bf94f26e4f 100644 --- a/crates/core/src/lib.rs +++ b/crates/core/src/lib.rs @@ -23,8 +23,24 @@ pub mod bytes; pub mod control_flow; pub mod hints; -// TODO(namada#3248): only re-export v037 `tendermint-rs` -pub use {masp_primitives, tendermint, tendermint_proto}; +pub use masp_primitives; +/// Re-export of tendermint v0.37 +pub mod tendermint { + /// Re-export of tendermint v0.37 ABCI + pub mod abci { + pub use tendermint::abci::response::ApplySnapshotChunkResult; + pub use tendermint::abci::{ + types, Code, Event, EventAttribute, MethodKind, + }; + pub use tendermint::v0_37::abci::*; + } + pub use tendermint::*; +} +/// Re-export of tendermint-proto v0.37 +pub mod tendermint_proto { + pub use tendermint_proto::google; // 💩 + pub use tendermint_proto::v0_37::*; +} /// Borsh binary encoding (re-exported) from official crate with custom ext. pub mod borsh { pub use borsh::*; diff --git a/crates/events/src/extend.rs b/crates/events/src/extend.rs index ae68e66470..8f3fdcbbb7 100644 --- a/crates/events/src/extend.rs +++ b/crates/events/src/extend.rs @@ -142,7 +142,7 @@ impl AttributesMap for Vec { } impl AttributesMap - for Vec + for Vec { #[inline] fn insert_attribute(&mut self, key: K, value: V) @@ -150,7 +150,7 @@ impl AttributesMap K: Into, V: Into, { - self.push(namada_core::tendermint_proto::v0_37::abci::EventAttribute { + self.push(namada_core::tendermint_proto::abci::EventAttribute { key: key.into(), value: value.into(), index: true, diff --git a/crates/events/src/lib.rs b/crates/events/src/lib.rs index 71e0df8ff5..45e20c43f0 100644 --- a/crates/events/src/lib.rs +++ b/crates/events/src/lib.rs @@ -466,7 +466,7 @@ impl Event { } } -impl From for namada_core::tendermint_proto::v0_37::abci::Event { +impl From for namada_core::tendermint_proto::abci::Event { fn from(event: Event) -> Self { Self { r#type: { @@ -485,14 +485,14 @@ impl From for namada_core::tendermint_proto::v0_37::abci::Event { .attributes .into_iter() .map(|(key, value)| { - namada_core::tendermint_proto::v0_37::abci::EventAttribute { + namada_core::tendermint_proto::abci::EventAttribute { key, value, index: true, } }) .chain(std::iter::once_with(|| { - namada_core::tendermint_proto::v0_37::abci::EventAttribute { + namada_core::tendermint_proto::abci::EventAttribute { key: "event-level".to_string(), value: event.level.to_string(), index: true, diff --git a/crates/ibc/src/event.rs b/crates/ibc/src/event.rs index 5d340aee25..51c81fe2b7 100644 --- a/crates/ibc/src/event.rs +++ b/crates/ibc/src/event.rs @@ -431,7 +431,7 @@ pub fn packet_from_event_attributes( #[cfg(test)] mod tests { use namada_core::hash::Hash; - use namada_core::tendermint_proto::v0_37::abci::Event as AbciEventV037; + use namada_core::tendermint_proto::abci::Event as AbciEventV037; use namada_events::extend::{ ComposeEvent as _, Domain, Height, Log, RawReadFromEventAttributes as _, TxHash, diff --git a/crates/node/src/bench_utils.rs b/crates/node/src/bench_utils.rs index d6b6b8a256..9dc6da077f 100644 --- a/crates/node/src/bench_utils.rs +++ b/crates/node/src/bench_utils.rs @@ -112,11 +112,10 @@ use tempfile::TempDir; use crate::config::global::GlobalConfig; use crate::config::TendermintMode; -use crate::facade::tendermint::v0_37::abci::request::InitChain; -use crate::facade::tendermint_proto::google::protobuf::Timestamp; -use crate::facade::tendermint_rpc; use crate::shell::Shell; -use crate::{config, dry_run_tx}; +use crate::tendermint::abci::request::InitChain; +use crate::tendermint_proto::google::protobuf::Timestamp; +use crate::{config, dry_run_tx, tendermint_rpc}; pub const WASM_DIR: &str = "../../wasm"; diff --git a/crates/node/src/broadcaster.rs b/crates/node/src/broadcaster.rs index 6f0fc652c5..a5d854737d 100644 --- a/crates/node/src/broadcaster.rs +++ b/crates/node/src/broadcaster.rs @@ -5,8 +5,8 @@ use namada_sdk::control_flow::time; use namada_sdk::time::{DateTimeUtc, Utc}; use tokio::sync::mpsc::UnboundedReceiver; -use crate::facade::tendermint_rpc::client::CompatMode; -use crate::facade::tendermint_rpc::{Client, HttpClient}; +use crate::tendermint_rpc::client::CompatMode; +use crate::tendermint_rpc::{Client, HttpClient}; const DEFAULT_BROADCAST_TIMEOUT: u64 = 180; const BROADCASTER_TIMEOUT_ENV_VAR: &str = "NAMADA_BROADCASTER_TIMEOUT_SECS"; diff --git a/crates/node/src/lib.rs b/crates/node/src/lib.rs index 53d20a29b0..bbf5c09cd4 100644 --- a/crates/node/src/lib.rs +++ b/crates/node/src/lib.rs @@ -40,6 +40,9 @@ use namada_apps_lib::config::utils::{ convert_tm_addr_to_socket_addr, num_of_threads, }; use namada_apps_lib::{config, wasm_loader}; +pub use namada_apps_lib::{ + tendermint, tendermint_config, tendermint_proto, tendermint_rpc, +}; use namada_sdk::chain::BlockHeight; use namada_sdk::eth_bridge::ethers::providers::{Http, Provider}; use namada_sdk::migrations::ScheduledMigration; @@ -60,18 +63,14 @@ use self::shims::abcipp_shim::AbciService; use crate::broadcaster::Broadcaster; use crate::config::{ethereum_bridge, TendermintMode}; use crate::ethereum_oracle as oracle; -use crate::facade::tendermint::v0_37::abci::response; -use crate::facade::tower_abci::{split, Server}; use crate::shell::{Error, MempoolTxType, Shell}; use crate::shims::abcipp_shim::AbcippShim; use crate::shims::abcipp_shim_types::shim::{Request, Response}; - -pub mod facade { - pub use namada_apps_lib::facade::*; - pub mod tower_abci { - pub use tower_abci::v037::*; - pub use tower_abci::BoxError; - } +use crate::tendermint::abci::response; +use crate::tower_abci::{split, Server}; +pub mod tower_abci { + pub use tower_abci::v037::*; + pub use tower_abci::BoxError; } /// Env. var to set a number of Tokio RT worker threads diff --git a/crates/node/src/shell/block_alloc.rs b/crates/node/src/shell/block_alloc.rs index ceabd38565..88404bfd49 100644 --- a/crates/node/src/shell/block_alloc.rs +++ b/crates/node/src/shell/block_alloc.rs @@ -44,9 +44,9 @@ use std::marker::PhantomData; use namada_sdk::parameters; use namada_sdk::state::{self, WlState}; -#[allow(unused_imports)] -use crate::facade::tendermint_proto::abci::RequestPrepareProposal; use crate::shell::block_alloc::states::WithNormalTxs; +#[allow(unused_imports)] +use crate::tendermint_proto::abci::RequestPrepareProposal; /// Block allocation failure status responses. #[derive(Debug, Copy, Clone, Eq, PartialEq)] diff --git a/crates/node/src/shell/finalize_block.rs b/crates/node/src/shell/finalize_block.rs index 9bf7e65809..57c52ffac6 100644 --- a/crates/node/src/shell/finalize_block.rs +++ b/crates/node/src/shell/finalize_block.rs @@ -31,10 +31,10 @@ use namada_vote_ext::ethereum_tx_data_variants; use tendermint::abci::types::Misbehavior; use super::*; -use crate::facade::tendermint::abci::types::VoteInfo; -use crate::facade::tendermint_proto; use crate::protocol::{DispatchArgs, DispatchError}; use crate::shell::stats::InternalStats; +use crate::tendermint::abci::types::VoteInfo; +use crate::tendermint_proto; impl Shell where @@ -248,14 +248,11 @@ where // Apply validator set update response.validator_updates = self .get_abci_validator_updates(false, |pk, power| { - let pub_key = tendermint_proto::v0_37::crypto::PublicKey { + let pub_key = tendermint_proto::crypto::PublicKey { sum: Some(key_to_tendermint(&pk).unwrap()), }; let pub_key = Some(pub_key); - tendermint_proto::v0_37::abci::ValidatorUpdate { - pub_key, - power, - } + tendermint_proto::abci::ValidatorUpdate { pub_key, power } }) .expect("Must be able to update validator set"); } @@ -1093,7 +1090,7 @@ fn pos_votes_from_abci( validator, sig_info, }| { - let crate::facade::tendermint::abci::types::Validator { + let crate::tendermint::abci::types::Validator { address, power, } = validator; @@ -1265,12 +1262,12 @@ mod test_finalize_block { use test_log::test; use super::*; - use crate::facade::tendermint::abci::types::Validator; use crate::oracle::control::Command; use crate::shell::test_utils::*; use crate::shims::abcipp_shim_types::shim::request::{ FinalizeBlock, ProcessedTx, }; + use crate::tendermint::abci::types::Validator; const WRAPPER_GAS_LIMIT: u64 = 1_500_000; const STORAGE_VALUE: &str = "test_value"; diff --git a/crates/node/src/shell/init_chain.rs b/crates/node/src/shell/init_chain.rs index c962e7fa39..7f26a5c106 100644 --- a/crates/node/src/shell/init_chain.rs +++ b/crates/node/src/shell/init_chain.rs @@ -27,7 +27,7 @@ use crate::config::genesis::templates::{TokenBalances, TokenConfig}; use crate::config::genesis::transactions::{ BondTx, EstablishedAccountTx, Signed as SignedTx, ValidatorAccountTx, }; -use crate::facade::tendermint_proto::google::protobuf; +use crate::tendermint_proto::google::protobuf; use crate::wasm_loader; /// Errors that represent panics in normal flow but get demoted to errors @@ -100,13 +100,10 @@ where let rsp = response::InitChain { validators: self .get_abci_validator_updates(true, |pk, power| { - let pub_key: crate::facade::tendermint::PublicKey = - pk.into(); + let pub_key: crate::tendermint::PublicKey = pk.into(); let power = - crate::facade::tendermint::vote::Power::try_from( - power, - ) - .unwrap(); + crate::tendermint::vote::Power::try_from(power) + .unwrap(); validator::Update { pub_key, power } }) .expect("Must be able to set genesis validator set"), @@ -194,10 +191,9 @@ where // Set the initial validator set response.validators = self .get_abci_validator_updates(true, |pk, power| { - let pub_key: crate::facade::tendermint::PublicKey = pk.into(); + let pub_key: crate::tendermint::PublicKey = pk.into(); let power = - crate::facade::tendermint::vote::Power::try_from(power) - .unwrap(); + crate::tendermint::vote::Power::try_from(power).unwrap(); validator::Update { pub_key, power } }) .expect("Must be able to set genesis validator set"); @@ -781,13 +777,11 @@ where chain_id: String, genesis: config::genesis::chain::Finalized, ) { - use crate::facade::tendermint::block::Size; - use crate::facade::tendermint::consensus::params::ValidatorParams; - use crate::facade::tendermint::consensus::Params; - use crate::facade::tendermint::evidence::{ - Duration, Params as Evidence, - }; - use crate::facade::tendermint::time::Time; + use crate::tendermint::block::Size; + use crate::tendermint::consensus::params::ValidatorParams; + use crate::tendermint::consensus::Params; + use crate::tendermint::evidence::{Duration, Params as Evidence}; + use crate::tendermint::time::Time; // craft a request to initialize the chain let init = request::InitChain { diff --git a/crates/node/src/shell/mod.rs b/crates/node/src/shell/mod.rs index 2aaa271b51..457129ee56 100644 --- a/crates/node/src/shell/mod.rs +++ b/crates/node/src/shell/mod.rs @@ -75,13 +75,13 @@ use tokio::sync::mpsc::{Receiver, UnboundedSender}; use super::ethereum_oracle::{self as oracle, last_processed_block}; use crate::config::{self, genesis, TendermintMode, ValidatorLocalConfig}; -use crate::facade::tendermint::v0_37::abci::{request, response}; -use crate::facade::tendermint::{self, validator}; -use crate::facade::tendermint_proto::v0_37::crypto::public_key; use crate::protocol::ShellParams; use crate::shims::abcipp_shim_types::shim; use crate::shims::abcipp_shim_types::shim::response::TxResult; use crate::shims::abcipp_shim_types::shim::TakeSnapshot; +use crate::tendermint::abci::{request, response}; +use crate::tendermint::{self, validator}; +use crate::tendermint_proto::crypto::public_key; use crate::{protocol, storage, tendermint_node}; fn key_to_tendermint( @@ -1529,15 +1529,15 @@ pub mod test_utils { use super::*; use crate::config::ethereum_bridge::ledger::ORACLE_CHANNEL_BUFFER_SIZE; - use crate::facade::tendermint::abci::types::Misbehavior; - use crate::facade::tendermint_proto::google::protobuf::Timestamp; - use crate::facade::tendermint_proto::v0_37::abci::{ - RequestPrepareProposal, RequestProcessProposal, - }; use crate::shims::abcipp_shim_types; use crate::shims::abcipp_shim_types::shim::request::{ FinalizeBlock, ProcessedTx, }; + use crate::tendermint::abci::types::Misbehavior; + use crate::tendermint_proto::abci::{ + RequestPrepareProposal, RequestProcessProposal, + }; + use crate::tendermint_proto::google::protobuf::Timestamp; #[derive(Error, Debug)] pub enum TestError { diff --git a/crates/node/src/shell/prepare_proposal.rs b/crates/node/src/shell/prepare_proposal.rs index 27529cfe07..f5271c1115 100644 --- a/crates/node/src/shell/prepare_proposal.rs +++ b/crates/node/src/shell/prepare_proposal.rs @@ -21,11 +21,11 @@ use super::block_alloc::states::{ }; use super::block_alloc::{AllocFailure, BlockAllocator, BlockResources}; use crate::config::ValidatorLocalConfig; -use crate::facade::tendermint_proto::google::protobuf::Timestamp; -use crate::facade::tendermint_proto::v0_37::abci::RequestPrepareProposal; use crate::protocol::{self, ShellParams}; use crate::shell::ShellMode; use crate::shims::abcipp_shim_types::shim::{response, TxBytes}; +use crate::tendermint_proto::abci::RequestPrepareProposal; +use crate::tendermint_proto::google::protobuf::Timestamp; impl Shell where @@ -696,23 +696,22 @@ mod test_prepare_proposal { address: pkh1, power: (u128::try_from(val1.bonded_stake).expect("Test failed") as u64).try_into().unwrap(), }, - sig_info: crate::facade::tendermint::abci::types::BlockSignatureInfo::LegacySigned, + sig_info: crate::tendermint::abci::types::BlockSignatureInfo::LegacySigned, }, VoteInfo { validator: Validator { address: pkh2, power: (u128::try_from(val2.bonded_stake).expect("Test failed") as u64).try_into().unwrap(), }, - sig_info: crate::facade::tendermint::abci::types::BlockSignatureInfo::LegacySigned, + sig_info: crate::tendermint::abci::types::BlockSignatureInfo::LegacySigned, }, ]; let req = FinalizeBlock { proposer_address: pkh1.to_vec(), - decided_last_commit: - crate::facade::tendermint::abci::types::CommitInfo { - round: 0u8.into(), - votes, - }, + decided_last_commit: crate::tendermint::abci::types::CommitInfo { + round: 0u8.into(), + votes, + }, ..Default::default() }; shell.start_new_epoch(Some(req)); diff --git a/crates/node/src/shell/process_proposal.rs b/crates/node/src/shell/process_proposal.rs index e7f5ee9db7..1485b06f15 100644 --- a/crates/node/src/shell/process_proposal.rs +++ b/crates/node/src/shell/process_proposal.rs @@ -9,10 +9,10 @@ use namada_vote_ext::ethereum_tx_data_variants; use super::block_alloc::{BlockGas, BlockSpace}; use super::*; -use crate::facade::tendermint_proto::v0_37::abci::RequestProcessProposal; use crate::shell::block_alloc::{AllocFailure, TxBin}; use crate::shims::abcipp_shim_types::shim::response::ProcessProposal; use crate::shims::abcipp_shim_types::shim::TxBytes; +use crate::tendermint_proto::abci::RequestProcessProposal; /// Validation metadata, to keep track of used resources or /// transaction numbers, in a block proposal. diff --git a/crates/node/src/shell/snapshots.rs b/crates/node/src/shell/snapshots.rs index 921165a495..40572677f6 100644 --- a/crates/node/src/shell/snapshots.rs +++ b/crates/node/src/shell/snapshots.rs @@ -7,14 +7,13 @@ use namada_sdk::hash::{Hash, Sha256Hasher}; use namada_sdk::state::{BlockHeight, StorageRead}; use super::SnapshotSync; -use crate::facade::tendermint::abci::response::ApplySnapshotChunkResult; -use crate::facade::tendermint::abci::types::Snapshot; -use crate::facade::tendermint::v0_37::abci::{ - request as tm_request, response as tm_response, -}; use crate::shell::Shell; use crate::storage; use crate::storage::{DbSnapshot, DbSnapshotMeta}; +use crate::tendermint::abci::types::Snapshot; +use crate::tendermint::abci::{ + request as tm_request, response as tm_response, ApplySnapshotChunkResult, +}; pub const MAX_SENDER_STRIKES: u64 = 5; diff --git a/crates/node/src/shell/testing/client.rs b/crates/node/src/shell/testing/client.rs index 2a209c6f35..21ff520bff 100644 --- a/crates/node/src/shell/testing/client.rs +++ b/crates/node/src/shell/testing/client.rs @@ -94,7 +94,7 @@ pub fn run( #[async_trait::async_trait(?Send)] impl CliClient for MockNode { - fn from_tendermint_address(_: &crate::facade::tendermint_rpc::Url) -> Self { + fn from_tendermint_address(_: &crate::tendermint_rpc::Url) -> Self { unreachable!("MockNode should always be instantiated at test start.") } diff --git a/crates/node/src/shell/testing/node.rs b/crates/node/src/shell/testing/node.rs index e42d579857..a6e67d3987 100644 --- a/crates/node/src/shell/testing/node.rs +++ b/crates/node/src/shell/testing/node.rs @@ -48,20 +48,19 @@ use crate::ethereum_oracle::test_tools::mock_web3_client::{ use crate::ethereum_oracle::{ control, last_processed_block, try_process_eth_events, }; -use crate::facade::tendermint_proto::v0_37::abci::{ - RequestPrepareProposal, RequestProcessProposal, -}; -use crate::facade::tendermint_rpc::endpoint::block; -use crate::facade::tendermint_rpc::error::Error as RpcError; -use crate::facade::tendermint_rpc::SimpleRequest; -use crate::facade::{tendermint, tendermint_rpc}; use crate::shell::testing::utils::TestDir; use crate::shell::{EthereumOracleChannels, Shell}; use crate::shims::abcipp_shim_types::shim::request::{ FinalizeBlock, ProcessedTx, }; use crate::shims::abcipp_shim_types::shim::response::TxResult; -use crate::{dry_run_tx, storage}; +use crate::tendermint_proto::abci::{ + RequestPrepareProposal, RequestProcessProposal, +}; +use crate::tendermint_rpc::endpoint::block; +use crate::tendermint_rpc::error::Error as RpcError; +use crate::tendermint_rpc::SimpleRequest; +use crate::{dry_run_tx, storage, tendermint, tendermint_rpc}; /// Mock Ethereum oracle used for testing purposes. struct MockEthOracle { diff --git a/crates/node/src/shell/vote_extensions/bridge_pool_vext.rs b/crates/node/src/shell/vote_extensions/bridge_pool_vext.rs index b46dbd8a69..29104bebe3 100644 --- a/crates/node/src/shell/vote_extensions/bridge_pool_vext.rs +++ b/crates/node/src/shell/vote_extensions/bridge_pool_vext.rs @@ -140,19 +140,22 @@ mod test_bp_vote_extensions { Epoch::default(), ); let votes = vec![VoteInfo { - validator: crate::facade::tendermint::abci::types::Validator { + validator: crate::tendermint::abci::types::Validator { address: pkh1, - power: (u128::try_from(val1.bonded_stake).expect("Test failed") as u64).try_into().unwrap(), + power: (u128::try_from(val1.bonded_stake).expect("Test failed") + as u64) + .try_into() + .unwrap(), }, - sig_info: crate::facade::tendermint::abci::types::BlockSignatureInfo::LegacySigned, + sig_info: + crate::tendermint::abci::types::BlockSignatureInfo::LegacySigned, }]; let req = FinalizeBlock { proposer_address: pkh1.to_vec(), - decided_last_commit: - crate::facade::tendermint::abci::types::CommitInfo { - round: 0u8.into(), - votes, - }, + decided_last_commit: crate::tendermint::abci::types::CommitInfo { + round: 0u8.into(), + votes, + }, ..Default::default() }; assert_eq!(shell.start_new_epoch(Some(req)).0, 1); diff --git a/crates/node/src/shell/vote_extensions/eth_events.rs b/crates/node/src/shell/vote_extensions/eth_events.rs index 9dfbe59b48..c522c2bbb1 100644 --- a/crates/node/src/shell/vote_extensions/eth_events.rs +++ b/crates/node/src/shell/vote_extensions/eth_events.rs @@ -465,19 +465,22 @@ mod test_vote_extensions { Epoch::default(), ); let votes = vec![VoteInfo { - validator: crate::facade::tendermint::abci::types::Validator { + validator: crate::tendermint::abci::types::Validator { address: pkh1, - power: (u128::try_from(val1.bonded_stake).expect("Test failed") as u64).try_into().unwrap(), + power: (u128::try_from(val1.bonded_stake).expect("Test failed") + as u64) + .try_into() + .unwrap(), }, - sig_info: crate::facade::tendermint::abci::types::BlockSignatureInfo::LegacySigned, + sig_info: + crate::tendermint::abci::types::BlockSignatureInfo::LegacySigned, }]; let req = FinalizeBlock { proposer_address: pkh1.to_vec(), - decided_last_commit: - crate::facade::tendermint::abci::types::CommitInfo { - round: 0u8.into(), - votes, - }, + decided_last_commit: crate::tendermint::abci::types::CommitInfo { + round: 0u8.into(), + votes, + }, ..Default::default() }; assert_eq!(shell.start_new_epoch(Some(req)).0, 1); diff --git a/crates/node/src/shell/vote_extensions/val_set_update.rs b/crates/node/src/shell/vote_extensions/val_set_update.rs index 16e519c628..12a01830a5 100644 --- a/crates/node/src/shell/vote_extensions/val_set_update.rs +++ b/crates/node/src/shell/vote_extensions/val_set_update.rs @@ -315,19 +315,22 @@ mod test_vote_extensions { Epoch::default(), ); let votes = vec![VoteInfo { - validator: crate::facade::tendermint::abci::types::Validator { + validator: crate::tendermint::abci::types::Validator { address: pkh1, - power: (u128::try_from(val1.bonded_stake).expect("Test failed") as u64).try_into().unwrap(), + power: (u128::try_from(val1.bonded_stake).expect("Test failed") + as u64) + .try_into() + .unwrap(), }, - sig_info: crate::facade::tendermint::abci::types::BlockSignatureInfo::LegacySigned, + sig_info: + crate::tendermint::abci::types::BlockSignatureInfo::LegacySigned, }]; let req = FinalizeBlock { proposer_address: pkh1.to_vec(), - decided_last_commit: - crate::facade::tendermint::abci::types::CommitInfo { - round: 0u8.into(), - votes, - }, + decided_last_commit: crate::tendermint::abci::types::CommitInfo { + round: 0u8.into(), + votes, + }, ..Default::default() }; assert_eq!(shell.start_new_epoch(Some(req)).0, 1); diff --git a/crates/node/src/shims/abcipp_shim.rs b/crates/node/src/shims/abcipp_shim.rs index 6b37723b6a..5cccceb8bd 100644 --- a/crates/node/src/shims/abcipp_shim.rs +++ b/crates/node/src/shims/abcipp_shim.rs @@ -24,12 +24,10 @@ use super::abcipp_shim_types::shim::{ }; use crate::config; use crate::config::{Action, ActionAtHeight}; -use crate::facade::tendermint::v0_37::abci::{ - request, Request as Req, Response as Resp, -}; -use crate::facade::tower_abci::BoxError; use crate::shell::{EthereumOracleChannels, Shell}; use crate::storage::DbSnapshot; +use crate::tendermint::abci::{request, Request as Req, Response as Resp}; +use crate::tower_abci::BoxError; /// The shim wraps the shell, which implements ABCI++. /// The shim makes a crude translation between the ABCI interface currently used @@ -152,7 +150,7 @@ impl AbcippShim { .map_err(Error::from) .and_then(|res| match res { Response::FinalizeBlock(resp) => { - Ok(Resp::EndBlock(crate::facade::tendermint_proto::v0_37::abci::ResponseEndBlock::from(resp).try_into().unwrap())) + Ok(Resp::EndBlock(crate::tendermint_proto::abci::ResponseEndBlock::from(resp).try_into().unwrap())) } _ => Err(Error::ConvertResp(res)), }) diff --git a/crates/node/src/shims/abcipp_shim_types.rs b/crates/node/src/shims/abcipp_shim_types.rs index 32d53e20aa..620c78c989 100644 --- a/crates/node/src/shims/abcipp_shim_types.rs +++ b/crates/node/src/shims/abcipp_shim_types.rs @@ -1,4 +1,4 @@ -use crate::facade::tendermint::v0_37::abci::{Request, Response}; +use crate::tendermint::abci::{Request, Response}; pub mod shim { use std::fmt::Debug; @@ -8,10 +8,10 @@ pub mod shim { use thiserror::Error; use super::{Request as Req, Response as Resp}; - use crate::facade::tendermint::v0_37::abci::{ + use crate::shell; + use crate::tendermint::abci::{ request as tm_request, response as tm_response, }; - use crate::shell; pub type TxBytes = prost::bytes::Bytes; @@ -180,8 +180,8 @@ pub mod shim { use namada_sdk::tendermint::time::Time; use namada_sdk::time::DateTimeUtc; - use crate::facade::tendermint::abci::types::Misbehavior; - use crate::facade::tendermint::v0_37::abci::request as tm_request; + use crate::tendermint::abci::request as tm_request; + use crate::tendermint::abci::types::Misbehavior; pub struct VerifyHeader; @@ -316,13 +316,13 @@ pub mod shim { pub mod response { use namada_sdk::events::Event; - pub use crate::facade::tendermint::v0_37::abci::response::{ + pub use crate::tendermint::abci::response::{ PrepareProposal, ProcessProposal, }; - use crate::facade::tendermint_proto::v0_37::abci::{ + use crate::tendermint_proto::abci::{ Event as TmEvent, ValidatorUpdate, }; - use crate::facade::tendermint_proto::v0_37::types::ConsensusParams; + use crate::tendermint_proto::types::ConsensusParams; #[derive(Debug, Default)] pub struct VerifyHeader; @@ -343,9 +343,7 @@ pub mod shim { pub consensus_param_updates: Option, } - impl From - for crate::facade::tendermint_proto::v0_37::abci::ResponseEndBlock - { + impl From for crate::tendermint_proto::abci::ResponseEndBlock { fn from(resp: FinalizeBlock) -> Self { Self { events: resp diff --git a/crates/node/src/tendermint_node.rs b/crates/node/src/tendermint_node.rs index 32411a34cf..67336c0ad3 100644 --- a/crates/node/src/tendermint_node.rs +++ b/crates/node/src/tendermint_node.rs @@ -16,11 +16,9 @@ use tokio::process::{Child, Command}; use tokio::sync::oneshot::error::RecvError; use tokio::sync::oneshot::{Receiver, Sender}; -use crate::facade::tendermint::validator::Info; -use crate::facade::tendermint::{block, Genesis, Moniker, PublicKey}; -use crate::facade::tendermint_config::{ - Error as TendermintError, TendermintConfig, -}; +use crate::tendermint::validator::Info; +use crate::tendermint::{block, Genesis, Moniker, PublicKey}; +use crate::tendermint_config::{Error as TendermintError, TendermintConfig}; /// Env. var to output Tendermint log to stdout pub const ENV_VAR_TM_STDOUT: &str = "NAMADA_CMT_STDOUT"; diff --git a/crates/node/src/utils.rs b/crates/node/src/utils.rs index 913e2c718d..b2c02f40b2 100644 --- a/crates/node/src/utils.rs +++ b/crates/node/src/utils.rs @@ -10,7 +10,7 @@ use namada_sdk::address::{Address, ImplicitAddress}; use namada_sdk::key::common; use namada_sdk::wallet::FindKeyError; -use crate::facade::tendermint::Timeout; +use crate::tendermint::Timeout; pub fn test_genesis(args: TestGenesis, global_args: args::Global) { let TestGenesis { diff --git a/crates/sdk/src/queries/types.rs b/crates/sdk/src/queries/types.rs index 35970ab1fa..f5aeb2c298 100644 --- a/crates/sdk/src/queries/types.rs +++ b/crates/sdk/src/queries/types.rs @@ -5,8 +5,8 @@ use namada_state::{DBIter, StorageHasher, WlState, DB}; use thiserror::Error; use crate::events::log::EventLog; +pub use crate::tendermint::abci::request::Query as RequestQuery; use crate::tendermint::merkle::proof::ProofOps; -pub use crate::tendermint::v0_37::abci::request::Query as RequestQuery; /// A request context provides read-only access to storage and WASM compilation /// caches to request handlers. #[derive(Debug, Clone)] diff --git a/crates/tests/src/e2e/ibc_tests.rs b/crates/tests/src/e2e/ibc_tests.rs index 992f911586..694f1d52bb 100644 --- a/crates/tests/src/e2e/ibc_tests.rs +++ b/crates/tests/src/e2e/ibc_tests.rs @@ -24,9 +24,9 @@ use namada_apps_lib::client::utils::id_from_pk; use namada_apps_lib::config::genesis::{chain, templates}; use namada_apps_lib::config::utils::set_port; use namada_apps_lib::config::{ethereum_bridge, TendermintMode}; -use namada_apps_lib::facade::tendermint::block::Header as TmHeader; -use namada_apps_lib::facade::tendermint::merkle::proof::ProofOps as TmProof; -use namada_apps_lib::facade::tendermint_rpc::{Client, HttpClient, Url}; +use namada_apps_lib::tendermint::block::Header as TmHeader; +use namada_apps_lib::tendermint::merkle::proof::ProofOps as TmProof; +use namada_apps_lib::tendermint_rpc::{Client, HttpClient, Url}; use namada_core::string_encoding::StringEncoded; use namada_sdk::address::{Address, InternalAddress, MASP}; use namada_sdk::chain::{BlockHeight, Epoch}; diff --git a/crates/tests/src/e2e/ledger_tests.rs b/crates/tests/src/e2e/ledger_tests.rs index 07dd87e101..822400db5a 100644 --- a/crates/tests/src/e2e/ledger_tests.rs +++ b/crates/tests/src/e2e/ledger_tests.rs @@ -23,7 +23,7 @@ use namada_apps_lib::cli::context::ENV_VAR_CHAIN_ID; use namada_apps_lib::client::utils::PRE_GENESIS_DIR; use namada_apps_lib::config::utils::convert_tm_addr_to_socket_addr; use namada_apps_lib::config::{self, ethereum_bridge}; -use namada_apps_lib::facade::tendermint_config::net::Address as TendermintAddress; +use namada_apps_lib::tendermint_config::net::Address as TendermintAddress; use namada_apps_lib::wallet; use namada_core::chain::ChainId; use namada_core::token::NATIVE_MAX_DECIMAL_PLACES; diff --git a/crates/tests/src/integration/ledger_tests.rs b/crates/tests/src/integration/ledger_tests.rs index b5ca2372a6..fdd8d7233c 100644 --- a/crates/tests/src/integration/ledger_tests.rs +++ b/crates/tests/src/integration/ledger_tests.rs @@ -42,7 +42,7 @@ use crate::integration::setup; use crate::strings::{ TX_APPLIED_SUCCESS, TX_INSUFFICIENT_BALANCE, TX_REJECTED, }; -use crate::tendermint::abci::response::ApplySnapshotChunkResult; +use crate::tendermint::abci::ApplySnapshotChunkResult; use crate::tx::tx_host_env::gov_storage::proposal::{ PGFInternalTarget, PGFTarget, }; @@ -1755,7 +1755,7 @@ fn enforce_fee_payment() -> Result<()> { /// from one node to another. #[test] fn apply_snapshot() -> Result<()> { - use namada_node::facade::tendermint::v0_37::abci::{ + use namada_node::tendermint::abci::{ request as tm_request, response as tm_response, }; // This address doesn't matter for tests. But an argument is required. @@ -1880,7 +1880,7 @@ fn apply_snapshot() -> Result<()> { /// Test the various failure conditions of state sync #[test] fn snapshot_unhappy_flows() -> Result<()> { - use namada_node::facade::tendermint::v0_37::abci::{ + use namada_node::tendermint::abci::{ request as tm_request, response as tm_response, }; let (node, _services) = setup::setup()?; diff --git a/crates/tests/src/integration/setup.rs b/crates/tests/src/integration/setup.rs index 524df788bf..fc472e8a6e 100644 --- a/crates/tests/src/integration/setup.rs +++ b/crates/tests/src/integration/setup.rs @@ -12,8 +12,8 @@ use namada_apps_lib::config::genesis::chain::Finalized; use namada_apps_lib::config::genesis::templates; use namada_apps_lib::config::genesis::templates::load_and_validate; use namada_apps_lib::config::TendermintMode; -use namada_apps_lib::facade::tendermint::Timeout; -use namada_apps_lib::facade::tendermint_proto::google::protobuf::Timestamp; +use namada_apps_lib::tendermint::Timeout; +use namada_apps_lib::tendermint_proto::google::protobuf::Timestamp; use namada_apps_lib::wallet::defaults::derive_template_dir; use namada_apps_lib::wallet::pre_genesis; use namada_core::chain::ChainIdPrefix; @@ -229,7 +229,7 @@ fn create_node( auto_drive_services, }; let init_req = - namada_apps_lib::facade::tendermint::v0_37::abci::request::InitChain { + namada_apps_lib::tendermint::abci::request::InitChain { time: Timestamp { seconds: 0, nanos: 0, @@ -237,23 +237,23 @@ fn create_node( .try_into().unwrap(), chain_id: chain_id.to_string(), consensus_params: - namada_apps_lib::facade::tendermint::consensus::params::Params { - block: namada_apps_lib::facade::tendermint::block::Size { + namada_apps_lib::tendermint::consensus::params::Params { + block: namada_apps_lib::tendermint::block::Size { max_bytes: 0, max_gas: 0, time_iota_ms: 0, }, evidence: - namada_apps_lib::facade::tendermint::evidence::Params { + namada_apps_lib::tendermint::evidence::Params { max_age_num_blocks: 0, - max_age_duration: namada_apps_lib::facade::tendermint::evidence::Duration(core::time::Duration::MAX), + max_age_duration: namada_apps_lib::tendermint::evidence::Duration(core::time::Duration::MAX), max_bytes: 0, }, - validator: namada_apps_lib::facade::tendermint::consensus::params::ValidatorParams { + validator: namada_apps_lib::tendermint::consensus::params::ValidatorParams { pub_key_types: vec![] }, version: None, - abci: namada_apps_lib::facade::tendermint::consensus::params::AbciParams { + abci: namada_apps_lib::tendermint::consensus::params::AbciParams { vote_extensions_enable_height: None, }, }, diff --git a/fuzz/fuzz_targets/txs_prepare_proposal.rs b/fuzz/fuzz_targets/txs_prepare_proposal.rs index 444eb2e6b7..66a68dcbfa 100644 --- a/fuzz/fuzz_targets/txs_prepare_proposal.rs +++ b/fuzz/fuzz_targets/txs_prepare_proposal.rs @@ -2,10 +2,10 @@ use lazy_static::lazy_static; use libfuzzer_sys::fuzz_target; -use namada_node::facade::tendermint_proto::v0_37::abci::RequestPrepareProposal; use namada_node::shell; use namada_node::shell::test_utils::TestShell; use namada_node::shims::abcipp_shim_types::shim::TxBytes; +use namada_node::tendermint_proto::abci::RequestPrepareProposal; use namada_tx::Tx; lazy_static! { From a899f28511eb8f2af4920da47d917ca5aaa5a3c1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Fri, 23 Aug 2024 13:11:24 +0100 Subject: [PATCH 65/73] changelog: add #3697 --- .../unreleased/improvements/3697-refactor-tm-reexports.md | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 .changelog/unreleased/improvements/3697-refactor-tm-reexports.md diff --git a/.changelog/unreleased/improvements/3697-refactor-tm-reexports.md b/.changelog/unreleased/improvements/3697-refactor-tm-reexports.md new file mode 100644 index 0000000000..f20855feaf --- /dev/null +++ b/.changelog/unreleased/improvements/3697-refactor-tm-reexports.md @@ -0,0 +1,2 @@ +- Only re-exporting tendermint-rs v0.37 modules. + ([\#3697](https://github.com/anoma/namada/pull/3697)) \ No newline at end of file From 16c49100fa615567bf64d0a1322091b56d92d8f2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Mon, 26 Aug 2024 14:39:46 +0100 Subject: [PATCH 66/73] core: remove `ByteBuf` and re-export `HEXLOWER` and `HEXUPPER` --- crates/core/Cargo.toml | 1 + crates/core/src/bytes.rs | 25 ++----------------------- crates/core/src/chain.rs | 5 ++--- crates/merkle_tree/src/lib.rs | 6 +++--- 4 files changed, 8 insertions(+), 29 deletions(-) diff --git a/crates/core/Cargo.toml b/crates/core/Cargo.toml index affc2339b5..c597070c59 100644 --- a/crates/core/Cargo.toml +++ b/crates/core/Cargo.toml @@ -94,6 +94,7 @@ tokio = { workspace = true, optional = true, default-features = false, features [dev-dependencies] assert_matches.workspace = true +lazy_static.workspace = true proptest.workspace = true rand.workspace = true rand_core.workspace = true diff --git a/crates/core/src/bytes.rs b/crates/core/src/bytes.rs index e157c80428..f2768446da 100644 --- a/crates/core/src/bytes.rs +++ b/crates/core/src/bytes.rs @@ -1,24 +1,3 @@ -//! A helper module for dealing with bytes +//! Bytes hex formatting -use std::fmt::Display; - -/// A helper to show bytes in hex -pub struct ByteBuf<'a>(pub &'a [u8]); - -impl<'a> std::fmt::LowerHex for ByteBuf<'a> { - fn fmt( - &self, - f: &mut std::fmt::Formatter<'_>, - ) -> std::result::Result<(), std::fmt::Error> { - for byte in self.0 { - f.write_fmt(format_args!("{:02x}", byte))?; - } - Ok(()) - } -} - -impl<'a> Display for ByteBuf<'a> { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{:x}", self) - } -} +pub use data_encoding::{HEXLOWER, HEXUPPER}; diff --git a/crates/core/src/chain.rs b/crates/core/src/chain.rs index 16b0f2bd08..44f76556af 100644 --- a/crates/core/src/chain.rs +++ b/crates/core/src/chain.rs @@ -5,7 +5,7 @@ use std::num::ParseIntError; use std::str::FromStr; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; -use data_encoding::HEXUPPER; +use data_encoding::{HEXLOWER, HEXUPPER}; use namada_macros::BorshDeserializer; #[cfg(feature = "migrations")] use namada_migrations::*; @@ -13,7 +13,6 @@ use serde::{Deserialize, Serialize}; use sha2::{Digest, Sha256}; use thiserror::Error; -use crate::bytes::ByteBuf; use crate::hash::Hash; use crate::time::DateTimeUtc; @@ -287,7 +286,7 @@ pub enum ParseBlockHashError { impl core::fmt::Debug for BlockHash { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let hash = format!("{}", ByteBuf(&self.0)); + let hash = HEXLOWER.encode(&self.0); f.debug_tuple("BlockHash").field(&hash).finish() } } diff --git a/crates/merkle_tree/src/lib.rs b/crates/merkle_tree/src/lib.rs index a499c5d8c8..72aa5ffacf 100644 --- a/crates/merkle_tree/src/lib.rs +++ b/crates/merkle_tree/src/lib.rs @@ -36,7 +36,7 @@ use ics23::{ExistenceProof, NonExistenceProof}; use ics23_specs::ibc_leaf_spec; use namada_core::address::{Address, InternalAddress}; use namada_core::borsh::{BorshDeserialize, BorshSerialize, BorshSerializeExt}; -use namada_core::bytes::ByteBuf; +use namada_core::bytes::HEXLOWER; pub use namada_core::chain::{BlockHeight, Epoch}; use namada_core::eth_bridge_pool::{is_pending_transfer_key, PendingTransfer}; pub use namada_core::hash::{Hash, StorageHasher}; @@ -470,7 +470,7 @@ pub struct MerkleTree { impl core::fmt::Debug for MerkleTree { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let root_hash = format!("{}", ByteBuf(self.base.root().as_slice())); + let root_hash = HEXLOWER.encode(self.base.root().as_slice()); f.debug_struct("MerkleTree") .field("root_hash", &root_hash) .finish() @@ -846,7 +846,7 @@ impl From for Hash { impl fmt::Display for MerkleRoot { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", ByteBuf(&self.0)) + write!(f, "{}", HEXLOWER.encode(&self.0)) } } From ac430600685b58809ceab2a97b13d82250ccd219 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Mon, 26 Aug 2024 14:59:52 +0100 Subject: [PATCH 67/73] test/core/ibc: add basic unit tests --- crates/core/src/ibc.rs | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/crates/core/src/ibc.rs b/crates/core/src/ibc.rs index 757d5a512b..aa250e38a7 100644 --- a/crates/core/src/ibc.rs +++ b/crates/core/src/ibc.rs @@ -164,3 +164,41 @@ impl borsh::BorshSchema for PGFIbcTarget { std::any::type_name::().into() } } + +#[cfg(test)] +mod test { + use super::*; + use crate::{decode, encode}; + + #[test] + fn test_ibc_token_hash() { + let hash = IbcTokenHash([ + 0_u8, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]); + + let hash_str = hash.to_string(); + assert_eq!("00ff000000000000000000000000000000000000", &hash_str); + + let decoded = IbcTokenHash::from_str(&hash_str).unwrap(); + assert_eq!(decoded, hash); + + // Hex decoding is case-insensitive + let decoded = + IbcTokenHash::from_str("00FF000000000000000000000000000000000000") + .unwrap(); + assert_eq!(decoded, hash); + } + + #[test] + fn test_ibc_pgf_target() { + let target = PGFIbcTarget { + target: "123".to_string(), + amount: token::Amount::from_u64(123456789), + port_id: PortId::new("10".to_string()).unwrap(), + channel_id: ChannelId::new(5), + }; + let bytes = encode(&target); + let decoded: PGFIbcTarget = decode(bytes).unwrap(); + assert_eq!(target, decoded); + } +} From 984e9d32aacde0c30de25e2a1ac7a67d932af5fb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Mon, 26 Aug 2024 15:15:26 +0100 Subject: [PATCH 68/73] test/core/masp: test basics --- crates/core/src/masp.rs | 103 ++++++++++++++++++++++++----- crates/core/src/string_encoding.rs | 15 +++++ 2 files changed, 103 insertions(+), 15 deletions(-) diff --git a/crates/core/src/masp.rs b/crates/core/src/masp.rs index 7bc4429b27..1ae82ee00a 100644 --- a/crates/core/src/masp.rs +++ b/crates/core/src/masp.rs @@ -10,6 +10,7 @@ use borsh_ext::BorshSerializeExt; use masp_primitives::asset_type::AssetType; use masp_primitives::sapling::ViewingKey; use masp_primitives::transaction::TransparentAddress; +pub use masp_primitives::transaction::TxId as TxIdInner; use namada_macros::BorshDeserializer; #[cfg(feature = "migrations")] use namada_migrations::*; @@ -27,10 +28,7 @@ use crate::string_encoding::{ use crate::token::{Denomination, MaspDigitPos}; /// Serialize the given TxId -pub fn serialize_txid( - txid: &masp_primitives::transaction::TxId, - s: S, -) -> Result +pub fn serialize_txid(txid: &TxIdInner, s: S) -> Result where S: Serializer, { @@ -38,15 +36,13 @@ where } /// Deserialize the given TxId -pub fn deserialize_txid<'de, D>( - deserializer: D, -) -> Result +pub fn deserialize_txid<'de, D>(deserializer: D) -> Result where D: Deserializer<'de>, { - Ok(masp_primitives::transaction::TxId::from_bytes( - Deserialize::deserialize(deserializer)?, - )) + Ok(TxIdInner::from_bytes(Deserialize::deserialize( + deserializer, + )?)) } /// Wrapper for masp_primitive's TxId @@ -71,11 +67,11 @@ pub struct MaspTxId( serialize_with = "serialize_txid", deserialize_with = "deserialize_txid" )] - masp_primitives::transaction::TxId, + TxIdInner, ); -impl From for MaspTxId { - fn from(txid: masp_primitives::transaction::TxId) -> Self { +impl From for MaspTxId { + fn from(txid: TxIdInner) -> Self { Self(txid) } } @@ -111,8 +107,8 @@ impl FromStr for MaspEpoch { type Err = ParseIntError; fn from_str(s: &str) -> std::result::Result { - let raw: u64 = u64::from_str(s)?; - Ok(Self(Epoch(raw))) + let inner: Epoch = Epoch::from_str(s)?; + Ok(Self(inner)) } } @@ -467,6 +463,18 @@ impl string_encoding::Format for ExtendedSpendingKey { impl_display_and_from_str_via_format!(ExtendedSpendingKey); +impl ExtendedSpendingKey { + /// Derive a viewing key + pub fn to_viewing_key(&self) -> ExtendedViewingKey { + ExtendedViewingKey::from( + #[allow(deprecated)] + { + self.0.to_extended_full_viewing_key() + }, + ) + } +} + impl From for masp_primitives::zip32::ExtendedSpendingKey { fn from(key: ExtendedSpendingKey) -> Self { key.0 @@ -775,3 +783,68 @@ impl FromStr for MaspTxRefs { serde_json::from_str(s) } } + +#[cfg(test)] +mod test { + use super::*; + use crate::address; + + #[test] + fn test_masp_tx_id_basics() { + let tx_id = MaspTxId::from(TxIdInner::from_bytes([ + 0, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ])); + let tx_id_str = serde_json::to_string(&tx_id).unwrap(); + let decoded: MaspTxId = serde_json::from_str(&tx_id_str).unwrap(); + assert_eq!(tx_id, decoded); + } + + #[test] + fn test_masp_epoch_basics() { + let epoch = MaspEpoch::new(123); + let epoch_str = epoch.to_string(); + assert_eq!(&epoch_str, "123"); + let decoded = MaspEpoch::from_str(&epoch_str).unwrap(); + assert_eq!(epoch, decoded); + } + + #[test] + fn test_masp_asset_data_basics() { + let mut data = AssetData { + token: address::testing::nam(), + denom: Denomination(6), + position: MaspDigitPos::One, + epoch: None, + }; + + data.undate(); + assert!(data.epoch.is_none()); + + let epoch_0 = MaspEpoch::new(3); + let old = data.redate(epoch_0); + assert!(old.is_none()); + assert!(data.epoch.is_none()); + data.epoch = Some(epoch_0); + + let epoch_1 = MaspEpoch::new(5); + let old = data.redate(epoch_1); + assert_eq!(old, Some(epoch_0)); + assert_eq!(data.epoch, Some(epoch_1)); + } + + #[test] + fn test_masp_keys_basics() { + let sk = ExtendedSpendingKey::from( + masp_primitives::zip32::ExtendedSpendingKey::master(&[0_u8]), + ); + string_encoding::testing::test_string_formatting(&sk); + + let vk = sk.to_viewing_key(); + string_encoding::testing::test_string_formatting(&vk); + + let (_diversifier, pa) = sk.0.default_address(); + let pa = PaymentAddress::from(pa); + string_encoding::testing::test_string_formatting(&pa); + } +} diff --git a/crates/core/src/string_encoding.rs b/crates/core/src/string_encoding.rs index c853fde433..90b04470f1 100644 --- a/crates/core/src/string_encoding.rs +++ b/crates/core/src/string_encoding.rs @@ -224,3 +224,18 @@ where let val_str: String = serde::Deserialize::deserialize(deserializer)?; FromStr::from_str(&val_str).map_err(serde::de::Error::custom) } + +/// Testing helpers +#[cfg(any(test, feature = "testing"))] +pub mod testing { + use core::fmt::Debug; + + use super::Format; + + /// String encoding roundtrip test + pub fn test_string_formatting(val: &T) { + let str = Format::encode(val); + let decoded: T = Format::decode(str).unwrap(); + assert_eq!(val, &decoded) + } +} From 6a1a8af714fed9b1974ea7e5ecfa5132a9231765 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Tue, 27 Aug 2024 11:23:19 +0100 Subject: [PATCH 69/73] codecov: ignore more utils & testing code --- codecov.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/codecov.yml b/codecov.yml index d17d314298..c401ce211f 100644 --- a/codecov.yml +++ b/codecov.yml @@ -37,4 +37,6 @@ ignore: - crates/apps/src/bin - crates/apps_lib/src/cli - crates/apps_lib/src/client - - crates/apps_lib/src/wasm_loader \ No newline at end of file + - crates/apps_lib/src/wasm_loader + - crates/node/src/bench_utils + - crates/node/src/shell/testing From ff5f6ba57e5a0974477a11b12d462478a0e295c4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Tue, 27 Aug 2024 13:17:56 +0100 Subject: [PATCH 70/73] tests: remove "integration" feature --- .github/workflows/ci.yml | 2 +- Makefile | 11 +++------ crates/apps_lib/Cargo.toml | 3 +-- crates/apps_lib/src/config/genesis.rs | 5 +--- crates/core/Cargo.toml | 2 +- crates/node/Cargo.toml | 3 +-- crates/node/src/shell/init_chain.rs | 33 ++++++++++----------------- crates/node/src/shell/mod.rs | 32 ++++++++++++++------------ crates/sdk/Cargo.toml | 2 +- crates/state/Cargo.toml | 2 +- crates/tests/Cargo.toml | 1 - 11 files changed, 40 insertions(+), 56 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 876458ef07..9e7d081315 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -388,7 +388,7 @@ jobs: name: wasm-for-tests-${{ github.event.pull_request.head.sha|| github.sha }} path: wasm_for_tests - name: Run integration tests - run: cargo +${{ env.NIGHTLY }} nextest run -E 'test(integration)' --test-threads 1 --no-fail-fast --features integration + run: cargo +${{ env.NIGHTLY }} nextest run -E 'test(integration)' --test-threads 1 --no-fail-fast env: RUSTFLAGS: "-C linker=clang -C link-arg=-fuse-ld=/usr/local/bin/mold -Z threads=8" - name: Clean cargo cache diff --git a/Makefile b/Makefile index 8f232d1521..cc792d58e1 100644 --- a/Makefile +++ b/Makefile @@ -155,15 +155,10 @@ audit: test: test-unit test-e2e test-wasm test-benches test-coverage: - # Run integration tests separately because they require `integration` - # feature (and without coverage) $(cargo) +$(nightly) llvm-cov --output-path lcov.info \ --lcov \ - -- --skip e2e --skip pos_state_machine_test --skip integration \ - -Z unstable-options --report-time && \ - $(cargo) +$(nightly) test integration:: \ - --features integration \ - -- -Z unstable-options --report-time + -- --skip e2e --skip pos_state_machine_test \ + -Z unstable-options --report-time # NOTE: `TEST_FILTER` is prepended with `e2e::`. Since filters in `cargo test` # work with a substring search, TEST_FILTER only works if it contains a string @@ -183,7 +178,7 @@ test-e2e: # Run integration tests test-integration: RUST_BACKTRACE=$(RUST_BACKTRACE) \ - $(cargo) +$(nightly) test --lib $(jobs) integration::$(TEST_FILTER) --features integration \ + $(cargo) +$(nightly) test --lib $(jobs) integration::$(TEST_FILTER) \ -Z unstable-options \ -- \ --test-threads=1 \ diff --git a/crates/apps_lib/Cargo.toml b/crates/apps_lib/Cargo.toml index c5b76cc2ea..d86d20844a 100644 --- a/crates/apps_lib/Cargo.toml +++ b/crates/apps_lib/Cargo.toml @@ -17,10 +17,9 @@ default = ["migrations"] mainnet = [ "namada_sdk/mainnet", ] -# for integration tests and test utilities +# for tests and test utilities testing = ["lazy_static", "namada_sdk/testing"] benches = ["lazy_static", "namada_sdk/benches"] -integration = [] migrations = [ "namada_migrations", "namada_sdk/migrations", diff --git a/crates/apps_lib/src/config/genesis.rs b/crates/apps_lib/src/config/genesis.rs index 794ac01c7d..af8face842 100644 --- a/crates/apps_lib/src/config/genesis.rs +++ b/crates/apps_lib/src/config/genesis.rs @@ -422,10 +422,7 @@ pub struct Parameters { /// This includes adding the Ethereum bridge parameters and /// adding a specified number of validators. #[allow(clippy::arithmetic_side_effects)] -#[cfg(all( - any(test, feature = "benches", feature = "testing"), - not(feature = "integration") -))] +#[cfg(any(test, feature = "benches", feature = "testing"))] pub fn make_dev_genesis( num_validators: u64, target_chain_dir: &std::path::Path, diff --git a/crates/core/Cargo.toml b/crates/core/Cargo.toml index c597070c59..17a1e1e462 100644 --- a/crates/core/Cargo.toml +++ b/crates/core/Cargo.toml @@ -19,7 +19,7 @@ rand = ["dep:rand", "rand_core"] ethers-derive = [ "ethbridge-structs/ethers-derive" ] -# for integration tests and test utilities +# for tests and test utilities testing = [ "rand", "proptest", diff --git a/crates/node/Cargo.toml b/crates/node/Cargo.toml index 8c10c79ab1..5492fafd27 100644 --- a/crates/node/Cargo.toml +++ b/crates/node/Cargo.toml @@ -17,7 +17,7 @@ default = ["migrations"] mainnet = [ "namada_sdk/mainnet", ] -# for integration tests and test utilities +# for tests and test utilities testing = [ "namada_apps_lib/testing", "namada_test_utils", @@ -31,7 +31,6 @@ benches = [ "tracing-subscriber", "rand_core" ] -integration = ["namada_apps_lib/integration"] jemalloc = ["rocksdb/jemalloc"] migrations = [ "namada_migrations", diff --git a/crates/node/src/shell/init_chain.rs b/crates/node/src/shell/init_chain.rs index 7f26a5c106..ca13aa6cc2 100644 --- a/crates/node/src/shell/init_chain.rs +++ b/crates/node/src/shell/init_chain.rs @@ -86,7 +86,7 @@ where &mut self, init: request::InitChain, #[cfg(any(test, feature = "testing", feature = "benches"))] - _num_validators: u64, + num_validators: u64, ) -> ShellResult { let mut response = response::InitChain::default(); let chain_id = self.state.in_mem().chain_id.as_str(); @@ -127,38 +127,29 @@ where } // Read the genesis files - #[cfg(any( - feature = "integration", - not(any(test, fuzzing, feature = "benches")) - ))] + #[cfg(not(any(test, fuzzing, feature = "benches")))] let genesis = { let chain_dir = self.base_dir.join(chain_id); genesis::chain::Finalized::read_toml_files(&chain_dir) .expect("Missing genesis files") }; - #[cfg(all( - any(test, fuzzing, feature = "benches"), - not(feature = "integration") - ))] + #[cfg(any(test, fuzzing, feature = "benches"))] let genesis = { let chain_dir = self.base_dir.join(chain_id); - genesis::make_dev_genesis(_num_validators, &chain_dir) + if chain_dir.join(genesis::chain::METADATA_FILE_NAME).exists() { + genesis::chain::Finalized::read_toml_files(&chain_dir) + .expect("Missing genesis files") + } else { + genesis::make_dev_genesis(num_validators, &chain_dir) + } }; - #[cfg(all( - any(test, fuzzing, feature = "benches"), - not(feature = "integration") - ))] - { - // update the native token from the genesis file - let native_token = genesis.get_native_token().clone(); - self.state.in_mem_mut().native_token = native_token; - } + let mut validation = InitChainValidation::new(self, false); validation.run( init, genesis, #[cfg(any(test, feature = "testing"))] - _num_validators, + num_validators, ); // propagate errors or panic validation.error_out()?; @@ -969,7 +960,7 @@ impl Policy { } } -#[cfg(all(test, not(feature = "integration")))] +#[cfg(test)] mod test { use std::str::FromStr; diff --git a/crates/node/src/shell/mod.rs b/crates/node/src/shell/mod.rs index 457129ee56..728a141ddb 100644 --- a/crates/node/src/shell/mod.rs +++ b/crates/node/src/shell/mod.rs @@ -493,23 +493,27 @@ where .expect("Creating directory for Namada should not fail"); } - // For all tests except integration use hard-coded native token addr ... - #[cfg(all( - any(test, fuzzing, feature = "testing", feature = "benches"), - not(feature = "integration"), - ))] - let native_token = namada_sdk::address::testing::nam(); + // For tests, fuzzing and benches use hard-coded native token addr ... + #[cfg(any(test, fuzzing, feature = "benches"))] + let native_token = { + let chain_dir = base_dir.join(chain_id.as_str()); + // Use genesis file only if it exists + if chain_dir + .join(genesis::templates::TOKENS_FILE_NAME) + .exists() + { + genesis::chain::Finalized::read_native_token(&chain_dir) + .expect("Missing genesis files") + } else { + namada_sdk::address::testing::nam() + } + }; // ... Otherwise, look it up from the genesis file - #[cfg(not(all( - any(test, fuzzing, feature = "testing", feature = "benches"), - not(feature = "integration"), - )))] + #[cfg(not(any(test, fuzzing, feature = "benches")))] let native_token = { let chain_dir = base_dir.join(chain_id.as_str()); - let genesis = - genesis::chain::Finalized::read_toml_files(&chain_dir) - .expect("Missing genesis files"); - genesis.get_native_token().clone() + genesis::chain::Finalized::read_native_token(&chain_dir) + .expect("Missing genesis files") }; // load last state from storage diff --git a/crates/sdk/Cargo.toml b/crates/sdk/Cargo.toml index ff5209547c..6cad716918 100644 --- a/crates/sdk/Cargo.toml +++ b/crates/sdk/Cargo.toml @@ -24,7 +24,7 @@ namada-eth-bridge = ["namada_ethereum_bridge/namada-eth-bridge"] benches = ["namada_core/benches", "namada_core/testing", "namada_state/benches"] wasm-runtime = ["namada_vm/wasm-runtime"] -# for integration tests and test utilities +# for tests and test utilities testing = [ "masp_primitives/test-dependencies", "namada_account/testing", diff --git a/crates/state/Cargo.toml b/crates/state/Cargo.toml index bc327a7aaf..b535d6ecc7 100644 --- a/crates/state/Cargo.toml +++ b/crates/state/Cargo.toml @@ -15,7 +15,7 @@ version.workspace = true [features] default = [] -# for integration tests and test utilities +# for tests and test utilities testing = [ "namada_core/testing", "namada_merkle_tree/testing", diff --git a/crates/tests/Cargo.toml b/crates/tests/Cargo.toml index 263d1bc826..6f0134b207 100644 --- a/crates/tests/Cargo.toml +++ b/crates/tests/Cargo.toml @@ -17,7 +17,6 @@ default = [] mainnet = [ "namada_sdk/mainnet", ] -integration = ["namada_node/integration", "namada_apps_lib/integration"] migrations = [ "namada_sdk/migrations", "namada_core/migrations", From dcd356b684fe1203f3d24aefaf87e4f560837dfd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Wed, 28 Aug 2024 11:34:01 +0100 Subject: [PATCH 71/73] make: add `test-unit-with-coverage` --- Makefile | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index cc792d58e1..486f40f92b 100644 --- a/Makefile +++ b/Makefile @@ -202,7 +202,16 @@ test-unit-with-eth-bridge: test-unit-with-coverage: $(cargo) +$(nightly) llvm-cov --output-path lcov.info \ --lcov \ - -- --skip e2e --skip pos_state_machine_test --skip integration \ + -- --lib \ + --skip e2e --skip pos_state_machine_test --skip integration \ + -Z unstable-options --report-time + +test-integration-with-coverage: + $(cargo) +$(nightly) llvm-cov --output-path lcov.info \ + --lcov \ + -- integration \ + --lib \ + --test-threads=1 \ -Z unstable-options --report-time test-unit-mainnet: From c7ea77d216608d29d8c42151f1f48ff6993ec367 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Wed, 28 Aug 2024 11:42:20 +0100 Subject: [PATCH 72/73] ci: run integration tests with coverage --- .github/workflows/ci.yml | 44 ++++++++++++++++++++++++++++++++-------- Makefile | 8 +++----- 2 files changed, 39 insertions(+), 13 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 9e7d081315..209bb3e875 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -290,12 +290,11 @@ jobs: # run: cargo +${{ env.NIGHTLY }} llvm-cov nextest run -E 'not test(e2e)' -E 'not test(integration)' -E 'not test(pos_state_machine_test)' --features namada/testing --no-fail-fast --lcov --output-path lcov.info env: RUSTFLAGS: "-C linker=clang -C link-arg=-fuse-ld=/usr/local/bin/mold -Z threads=8" - - name: Upload coverage - uses: codecov/codecov-action@v4 + - name: Store coverage file artifact + uses: actions/upload-artifact@v4 with: - files: lcov.info - fail_ci_if_error: true - token: ${{ secrets.CODECOV_TOKEN }} + name: unit-cov-${{ github.event.pull_request.head.sha || github.sha }}.info + path: lcov.info # output of `make test-unit-with-coverage` - name: Clean cargo cache if: steps.cache.outputs.cache-hit != 'true' run: cargo cache --autoclean-expensive @@ -387,10 +386,16 @@ jobs: with: name: wasm-for-tests-${{ github.event.pull_request.head.sha|| github.sha }} path: wasm_for_tests - - name: Run integration tests - run: cargo +${{ env.NIGHTLY }} nextest run -E 'test(integration)' --test-threads 1 --no-fail-fast + - name: Run integration tests with coverage + run: make test-integration-with-coverage + # run: cargo +${{ env.NIGHTLY }} nextest run -E 'test(integration)' --test-threads 1 --no-fail-fast env: RUSTFLAGS: "-C linker=clang -C link-arg=-fuse-ld=/usr/local/bin/mold -Z threads=8" + - name: Store coverage file artifact + uses: actions/upload-artifact@v4 + with: + name: integration-cov-${{ github.event.pull_request.head.sha || github.sha }}.info + path: lcov.info # output of `make test-integration-with-coverage` - name: Clean cargo cache if: steps.cache.outputs.cache-hit != 'true' run: cargo cache --autoclean-expensive @@ -607,4 +612,27 @@ jobs: run: cargo cache --autoclean-expensive - name: Stop sccache if: always() && steps.sccache.conclusion == 'success' - run: sccache --stop-server || true \ No newline at end of file + run: sccache --stop-server || true + + upload-coverage: + runs-on: [ubuntu-latest] + timeout-minutes: 20 + needs: [test-unit, test-integration] + + steps: + - name: Download unit coverage artifacts + uses: actions/download-artifact@v4 + with: + name: unit-cov-${{ github.event.pull_request.head.sha || github.sha }}.info + path: unit-cov.info + - name: Download integration coverage artifacts + uses: actions/download-artifact@v4 + with: + name: integration-cov-${{ github.event.pull_request.head.sha || github.sha }}.info + path: integration-cov.info + - name: Upload coverage to codecov + uses: codecov/codecov-action@v4 + with: + files: integration-cov.info, unit-cov.info + fail_ci_if_error: true + token: ${{ secrets.CODECOV_TOKEN }} diff --git a/Makefile b/Makefile index 486f40f92b..3105c234f1 100644 --- a/Makefile +++ b/Makefile @@ -200,17 +200,15 @@ test-unit-with-eth-bridge: -Z unstable-options --report-time test-unit-with-coverage: - $(cargo) +$(nightly) llvm-cov --output-path lcov.info \ + $(cargo) +$(nightly) llvm-cov --lib --output-path lcov.info \ --lcov \ - -- --lib \ - --skip e2e --skip pos_state_machine_test --skip integration \ + -- --skip e2e --skip pos_state_machine_test --skip integration \ -Z unstable-options --report-time test-integration-with-coverage: - $(cargo) +$(nightly) llvm-cov --output-path lcov.info \ + $(cargo) +$(nightly) llvm-cov --lib --output-path lcov.info \ --lcov \ -- integration \ - --lib \ --test-threads=1 \ -Z unstable-options --report-time From a0b8946b4f9dc9a8c122c1ffe77b8265d7a8d20e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Zemanovi=C4=8D?= Date: Wed, 28 Aug 2024 15:08:30 +0100 Subject: [PATCH 73/73] ci: fix coverage files paths --- .github/workflows/ci.yml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 209bb3e875..580a529cfd 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -293,7 +293,7 @@ jobs: - name: Store coverage file artifact uses: actions/upload-artifact@v4 with: - name: unit-cov-${{ github.event.pull_request.head.sha || github.sha }}.info + name: unit-cov-${{ github.event.pull_request.head.sha || github.sha }} path: lcov.info # output of `make test-unit-with-coverage` - name: Clean cargo cache if: steps.cache.outputs.cache-hit != 'true' @@ -394,7 +394,7 @@ jobs: - name: Store coverage file artifact uses: actions/upload-artifact@v4 with: - name: integration-cov-${{ github.event.pull_request.head.sha || github.sha }}.info + name: integration-cov-${{ github.event.pull_request.head.sha || github.sha }} path: lcov.info # output of `make test-integration-with-coverage` - name: Clean cargo cache if: steps.cache.outputs.cache-hit != 'true' @@ -623,16 +623,16 @@ jobs: - name: Download unit coverage artifacts uses: actions/download-artifact@v4 with: - name: unit-cov-${{ github.event.pull_request.head.sha || github.sha }}.info - path: unit-cov.info + name: unit-cov-${{ github.event.pull_request.head.sha || github.sha }} + path: unit-cov - name: Download integration coverage artifacts uses: actions/download-artifact@v4 with: - name: integration-cov-${{ github.event.pull_request.head.sha || github.sha }}.info - path: integration-cov.info + name: integration-cov-${{ github.event.pull_request.head.sha || github.sha }} + path: integration-cov - name: Upload coverage to codecov uses: codecov/codecov-action@v4 with: - files: integration-cov.info, unit-cov.info + files: integration-cov/lcov.info, unit-cov/lcov.info fail_ci_if_error: true token: ${{ secrets.CODECOV_TOKEN }}