diff --git a/Cargo.lock b/Cargo.lock index d49f0ccdacd..66783963b47 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -150,9 +150,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.75" +version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6" +checksum = "080e9890a082662b09c1ad45f567faeeb47f22b5fb23895fbe1e651e718e25ca" [[package]] name = "arbitrary" @@ -203,7 +203,7 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] @@ -214,7 +214,7 @@ checksum = "a66537f1bb974b254c98ed142ff995236e81b9d0fe4db0575f46612cb15eb0f9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] @@ -612,7 +612,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] @@ -1072,7 +1072,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] @@ -1099,7 +1099,7 @@ dependencies = [ "proc-macro2", "quote", "scratch", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] @@ -1116,7 +1116,7 @@ checksum = "587663dd5fb3d10932c8aecfe7c844db1bcf0aee93eeab08fac13dc1212c2e7f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] @@ -1140,7 +1140,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] @@ -1151,7 +1151,7 @@ checksum = "836a9bbc7ad63342d6d6e7b815ccab164bc77a2d95d84bc3117a8c0d5c98e2d5" dependencies = [ "darling_core", "quote", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] @@ -1279,7 +1279,7 @@ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] @@ -1562,9 +1562,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff4dd66668b557604244583e3e1e1eada8c5c2e96a6d0d6653ede395b78bbacb" +checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" dependencies = [ "futures-core", "futures-sink", @@ -1572,9 +1572,9 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb1d22c66e66d9d72e1758f0bd7d4fd0bee04cad842ee34587d68c07e45d088c" +checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" [[package]] name = "futures-executor" @@ -1589,38 +1589,38 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bf34a163b5c4c52d0478a4d757da8fb65cabef42ba90515efee0f6f9fa45aaa" +checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" [[package]] name = "futures-macro" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53b153fd91e4b0147f4aced87be237c98248656bb01050b96bf3ee89220a8ddb" +checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] name = "futures-sink" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e36d3378ee38c2a36ad710c5d30c2911d752cb941c00c72dbabfb786a7970817" +checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" [[package]] name = "futures-task" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efd193069b0ddadc69c46389b740bbccdd97203899b48d09c5f7969591d6bae2" +checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" [[package]] name = "futures-util" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a19526d624e703a3179b3d322efec918b6246ea0fa51d41124525f00f1cc8104" +checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" dependencies = [ "futures-channel", "futures-core", @@ -1984,7 +1984,7 @@ checksum = "02a5bcaf6704d9354a3071cede7e77d366a5980c7352e102e2c2f9b645b1d3ae" dependencies = [ "proc-macro2", "quote", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] @@ -2597,6 +2597,7 @@ dependencies = [ "once_cell", "owo-colors", "path-absolutize", + "rand 0.8.5", "serde_json", "serial_test", "supports-color 2.1.0", @@ -2852,7 +2853,7 @@ dependencies = [ "quote", "serde", "serde_json", - "syn 2.0.41", + "syn 2.0.48", "trybuild", ] @@ -2866,7 +2867,7 @@ dependencies = [ "manyhow", "proc-macro2", "quote", - "syn 2.0.41", + "syn 2.0.48", "trybuild", ] @@ -2891,7 +2892,7 @@ dependencies = [ "manyhow", "proc-macro2", "quote", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] @@ -2917,7 +2918,7 @@ dependencies = [ "proc-macro2", "quote", "rustc-hash", - "syn 2.0.41", + "syn 2.0.48", "trybuild", ] @@ -2943,7 +2944,7 @@ dependencies = [ "manyhow", "proc-macro2", "quote", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] @@ -2998,7 +2999,7 @@ dependencies = [ "proc-macro2", "quote", "syn 1.0.109", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] @@ -3052,7 +3053,7 @@ dependencies = [ "manyhow", "proc-macro2", "quote", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] @@ -3076,7 +3077,7 @@ dependencies = [ "manyhow", "proc-macro2", "quote", - "syn 2.0.41", + "syn 2.0.48", "trybuild", ] @@ -3112,7 +3113,7 @@ dependencies = [ "manyhow", "proc-macro2", "quote", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] @@ -3242,7 +3243,7 @@ dependencies = [ "manyhow", "proc-macro2", "quote", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] @@ -3273,7 +3274,7 @@ dependencies = [ "quote", "serde", "serde_json", - "syn 2.0.41", + "syn 2.0.48", "trybuild", ] @@ -3409,9 +3410,9 @@ dependencies = [ [[package]] name = "k256" -version = "0.13.2" +version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f01b677d82ef7a676aa37e099defd83a28e15687112cafdd112d60236b6115b" +checksum = "956ff9b67e26e1a6a866cb758f12c6f8746208489e3e4a4b5580802f2f0a587b" dependencies = [ "cfg-if", "ecdsa", @@ -3585,7 +3586,7 @@ dependencies = [ "proc-macro2", "quote", "syn 1.0.109", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] @@ -3843,9 +3844,9 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openssl" -version = "0.10.61" +version = "0.10.62" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b8419dc8cc6d866deb801274bba2e6f8f6108c1bb7fcc10ee5ab864931dbb45" +checksum = "8cde4d2d9200ad5909f8dac647e29482e07c3a35de8a13fce7c9c7747ad9f671" dependencies = [ "bitflags 2.4.1", "cfg-if", @@ -3864,7 +3865,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] @@ -3884,9 +3885,9 @@ dependencies = [ [[package]] name = "openssl-sys" -version = "0.9.97" +version = "0.9.98" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3eaad34cdd97d81de97964fc7f29e2d104f483840d906ef56daa1912338460b" +checksum = "c1665caf8ab2dc9aef43d1c0023bd904633a6a05cb30b0ad59bec2ae986e57a7" dependencies = [ "cc", "libc", @@ -4004,7 +4005,7 @@ dependencies = [ "regex", "regex-syntax 0.7.5", "structmeta", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] @@ -4074,7 +4075,7 @@ dependencies = [ "pest_meta", "proc-macro2", "quote", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] @@ -4115,7 +4116,7 @@ checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ "proc-macro2", "quote", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] @@ -4250,9 +4251,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.70" +version = "1.0.76" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39278fbbf5fb4f646ce651690877f89d1c5811a3d4acb27700c1cb3cdb78fd3b" +checksum = "95fc56cda0b5c3325f5fbbd7ff9fda9e02bb00bb3dac51252d2f1bfa1cb8cc8c" dependencies = [ "unicode-ident", ] @@ -4317,7 +4318,7 @@ dependencies = [ "itertools 0.11.0", "proc-macro2", "quote", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] @@ -4346,9 +4347,9 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quote" -version = "1.0.33" +version = "1.0.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" +checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef" dependencies = [ "proc-macro2", ] @@ -4735,9 +4736,9 @@ dependencies = [ [[package]] name = "secp256k1" -version = "0.28.0" +version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2acea373acb8c21ecb5a23741452acd2593ed44ee3d343e72baaa143bc89d0d5" +checksum = "3f622567e3b4b38154fb8190bcf6b160d7a4301d70595a49195b48c116007a27" dependencies = [ "rand 0.8.5", "secp256k1-sys", @@ -4746,9 +4747,9 @@ dependencies = [ [[package]] name = "secp256k1-sys" -version = "0.9.1" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dd97a086ec737e30053fd5c46f097465d25bb81dd3608825f65298c4c98be83" +checksum = "e5d1746aae42c19d583c3c1a8c646bfad910498e2051c551a7f2e3c0c9fbb7eb" dependencies = [ "cc", ] @@ -4808,7 +4809,7 @@ checksum = "43576ca501357b9b071ac53cdc7da8ef0cbd9493d8df094cd821777ea6e894d3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] @@ -4853,7 +4854,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] @@ -4891,7 +4892,7 @@ checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] @@ -4924,9 +4925,9 @@ dependencies = [ [[package]] name = "sha256" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7895c8ae88588ccead14ff438b939b0c569cd619116f14b4d13fdff7b8333386" +checksum = "18278f6a914fa3070aa316493f7d2ddfb9ac86ebc06fa3b83bffda487e9065b0" dependencies = [ "async-trait", "bytes", @@ -5139,7 +5140,7 @@ dependencies = [ "proc-macro2", "quote", "structmeta-derive", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] @@ -5150,7 +5151,7 @@ checksum = "a60bcaff7397072dca0017d1db428e30d5002e00b6847703e2e42005c95fbe00" dependencies = [ "proc-macro2", "quote", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] @@ -5191,7 +5192,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] @@ -5242,9 +5243,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.41" +version = "2.0.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44c8b28c477cc3bf0e7966561e3460130e1255f7a1cf71931075f1c5e7a7e269" +checksum = "0f3531638e407dfc0814761abb7c00a5b54992b849452a0646b7f65c9f770f3f" dependencies = [ "proc-macro2", "quote", @@ -5327,22 +5328,22 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.50" +version = "1.0.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9a7210f5c9a7156bb50aa36aed4c95afb51df0df00713949448cf9e97d382d2" +checksum = "d54378c645627613241d077a3a79db965db602882668f9136ac42af9ecb730ad" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.50" +version = "1.0.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "266b2e40bc00e5a6c09c3584011e08b06f123c00362c92b975ba9843aaaa14b8" +checksum = "fa0faa943b50f3db30a20aa7e265dbc66076993efed8463e8de414e5d06d3471" dependencies = [ "proc-macro2", "quote", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] @@ -5464,7 +5465,7 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] @@ -5636,7 +5637,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] @@ -6033,7 +6034,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.41", + "syn 2.0.48", "wasm-bindgen-shared", ] @@ -6055,7 +6056,7 @@ checksum = "f0eb82fcb7930ae6219a7ecfd55b217f5f0893484b7a13022ebb2b2bf20b5283" dependencies = [ "proc-macro2", "quote", - "syn 2.0.41", + "syn 2.0.48", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -6209,7 +6210,7 @@ dependencies = [ "anyhow", "proc-macro2", "quote", - "syn 2.0.41", + "syn 2.0.48", "wasmtime-component-util", "wasmtime-wit-bindgen", "wit-parser", @@ -6397,7 +6398,7 @@ checksum = "f50f51f8d79bfd2aa8e9d9a0ae7c2d02b45fe412e62ff1b87c0c81b07c738231" dependencies = [ "proc-macro2", "quote", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] @@ -6796,7 +6797,7 @@ checksum = "b3c129550b3e6de3fd0ba67ba5c81818f9805e58b8d7fee80a3a59d2c9fc601a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] @@ -6816,7 +6817,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.41", + "syn 2.0.48", ] [[package]] diff --git a/cli/Cargo.toml b/cli/Cargo.toml index 8507b7d77ef..5da8701beba 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -63,6 +63,7 @@ tokio = { workspace = true, features = ["macros", "signal"] } once_cell = { workspace = true } owo-colors = { workspace = true, features = ["supports-colors"] } supports-color = { workspace = true } +rand = { workspace = true } thread-local-panic-hook = { version = "0.1.0", optional = true } diff --git a/cli/src/lib.rs b/cli/src/lib.rs index b63e0880efd..259ee983ff3 100644 --- a/cli/src/lib.rs +++ b/cli/src/lib.rs @@ -251,6 +251,7 @@ impl Iroha { let kura_thread_handler = Kura::start(Arc::clone(&kura)); let sumeragi = SumeragiHandle::start(SumeragiStartArgs { + chain_id: config.chain_id, configuration: &config.sumeragi, events_sender: events_sender.clone(), wsv, @@ -271,6 +272,7 @@ impl Iroha { .start(); let gossiper = TransactionGossiper::from_configuration( + config.chain_id, &config.sumeragi, network.clone(), Arc::clone(&queue), @@ -300,6 +302,7 @@ impl Iroha { let kiso = KisoHandle::new(config.clone()); let torii = Torii::new( + config.chain_id, kiso.clone(), &config.torii, Arc::clone(&queue), @@ -579,7 +582,7 @@ pub fn read_config( .wrap_err("Invalid genesis configuration")? { Some( - GenesisNetwork::new(raw_block, &key_pair) + GenesisNetwork::new(raw_block, config.chain_id, &key_pair) .wrap_err("Failed to construct the genesis")?, ) } else { @@ -630,21 +633,24 @@ mod tests { use super::*; fn config_factory() -> Result { - let mut base = ConfigurationProxy::default(); - let key_pair = KeyPair::generate()?; - base.public_key = Some(key_pair.public_key().clone()); - base.private_key = Some(key_pair.private_key().clone()); + let mut base = ConfigurationProxy { + chain_id: Some(0), - let torii = base.torii.as_mut().unwrap(); - torii.p2p_addr = Some(socket_addr!(127.0.0.1:1337)); - torii.api_url = Some(socket_addr!(127.0.0.1:1337)); + public_key: Some(key_pair.public_key().clone()), + private_key: Some(key_pair.private_key().clone()), + ..ConfigurationProxy::default() + }; let genesis = base.genesis.as_mut().unwrap(); genesis.private_key = Some(Some(key_pair.private_key().clone())); genesis.public_key = Some(key_pair.public_key().clone()); + let torii = base.torii.as_mut().unwrap(); + torii.p2p_addr = Some(socket_addr!(127.0.0.1:1337)); + torii.api_url = Some(socket_addr!(127.0.0.1:1337)); + Ok(base) } @@ -686,10 +692,6 @@ mod tests { // No need to check whether genesis.file is resolved - if not, genesis wouldn't be read assert!(genesis.is_some()); - assert_eq!( - config.kura.block_store_path.absolutize()?, - dir.path().join("storage") - ); assert_eq!( config.snapshot.dir_path.absolutize()?, dir.path().join("snapshots") diff --git a/cli/src/samples.rs b/cli/src/samples.rs index 1a59f2b0a25..e6165343fe1 100644 --- a/cli/src/samples.rs +++ b/cli/src/samples.rs @@ -9,6 +9,7 @@ use iroha_config::{ use iroha_crypto::{KeyPair, PublicKey}; use iroha_data_model::{peer::PeerId, prelude::*}; use iroha_primitives::unique_vec::UniqueVec; +use rand::Rng; /// Get sample trusted peers. The public key must be the same as `configuration.public_key` /// @@ -52,12 +53,19 @@ pub fn get_trusted_peers(public_key: Option<&PublicKey>) -> HashSet { /// /// # Panics /// - when [`KeyPair`] generation fails (rare case). -pub fn get_config_proxy(peers: UniqueVec, key_pair: Option) -> ConfigurationProxy { +pub fn get_config_proxy( + peers: UniqueVec, + chain_id: Option, + key_pair: Option, +) -> ConfigurationProxy { + let chain_id = chain_id.unwrap_or_else(|| rand::thread_rng().gen_range(0..u16::MAX)); + let (public_key, private_key) = key_pair .unwrap_or_else(|| KeyPair::generate().expect("Key pair generation failed")) .into(); iroha_logger::info!(%public_key); ConfigurationProxy { + chain_id: Some(chain_id), public_key: Some(public_key.clone()), private_key: Some(private_key.clone()), sumeragi: Some(Box::new(iroha_config::sumeragi::ConfigurationProxy { @@ -94,8 +102,12 @@ pub fn get_config_proxy(peers: UniqueVec, key_pair: Option) -> /// /// # Panics /// - when [`KeyPair`] generation fails (rare case). -pub fn get_config(trusted_peers: UniqueVec, key_pair: Option) -> Configuration { - get_config_proxy(trusted_peers, key_pair) +pub fn get_config( + trusted_peers: UniqueVec, + chain_id: Option, + key_pair: Option, +) -> Configuration { + get_config_proxy(trusted_peers, chain_id, key_pair) .build() .expect("Iroha config should build as all required fields were provided") } diff --git a/client/Cargo.toml b/client/Cargo.toml index 5a3aba4aadb..9e0c96bd929 100644 --- a/client/Cargo.toml +++ b/client/Cargo.toml @@ -58,7 +58,7 @@ iroha_version = { workspace = true, features = ["http"] } attohttpc = { version = "0.26.1", default-features = false } eyre = { workspace = true } -http = "0.2.9" +http = "0.2.11" url = { workspace = true } rand = { workspace = true } serde = { workspace = true, features = ["derive"] } @@ -71,7 +71,7 @@ parity-scale-codec = { workspace = true, default-features = false, features = [" tokio = { workspace = true, features = ["rt"] } tokio-tungstenite = { workspace = true } tungstenite = { workspace = true } -futures-util = "0.3.28" +futures-util = "0.3.30" [dev-dependencies] iroha_wasm_builder = { workspace = true } diff --git a/client/benches/torii.rs b/client/benches/torii.rs index 088ec7eb406..2f36495df75 100644 --- a/client/benches/torii.rs +++ b/client/benches/torii.rs @@ -12,7 +12,7 @@ use iroha_client::{ use iroha_genesis::{GenesisNetwork, RawGenesisBlockBuilder}; use iroha_primitives::unique_vec; use iroha_version::Encode; -use test_network::{get_key_pair, Peer as TestPeer, PeerBuilder, TestRuntime}; +use test_network::{get_chain_id, get_key_pair, Peer as TestPeer, PeerBuilder, TestRuntime}; use tokio::runtime::Runtime; const MINIMUM_SUCCESS_REQUEST_RATIO: f32 = 0.9; @@ -30,7 +30,11 @@ fn get_genesis_key_pair(config: &iroha_config::iroha::Configuration) -> KeyPair fn query_requests(criterion: &mut Criterion) { let mut peer = ::new().expect("Failed to create peer"); - let configuration = get_config(unique_vec![peer.id.clone()], Some(get_key_pair())); + let configuration = get_config( + unique_vec![peer.id.clone()], + Some(get_chain_id()), + Some(get_key_pair()), + ); let rt = Runtime::test(); let genesis = GenesisNetwork::new( @@ -45,6 +49,7 @@ fn query_requests(criterion: &mut Criterion) { construct_executor("../default_executor").expect("Failed to construct executor"), ) .build(), + get_chain_id(), &get_genesis_key_pair(&configuration), ) .expect("genesis creation failed"); @@ -76,7 +81,8 @@ fn query_requests(criterion: &mut Criterion) { quantity, AssetId::new(asset_definition_id, account_id.clone()), ); - let mut client_config = iroha_client::samples::get_client_config(&get_key_pair()); + let mut client_config = + iroha_client::samples::get_client_config(get_chain_id(), &get_key_pair()); client_config.torii_api_url = format!("http://{}", peer.api_address).parse().unwrap(); @@ -128,9 +134,14 @@ fn query_requests(criterion: &mut Criterion) { fn instruction_submits(criterion: &mut Criterion) { println!("instruction submits"); + let chain_id = 0; let rt = Runtime::test(); let mut peer = ::new().expect("Failed to create peer"); - let configuration = get_config(unique_vec![peer.id.clone()], Some(get_key_pair())); + let configuration = get_config( + unique_vec![peer.id.clone()], + Some(get_chain_id()), + Some(get_key_pair()), + ); let genesis = GenesisNetwork::new( RawGenesisBlockBuilder::default() .domain("wonderland".parse().expect("Valid")) @@ -143,6 +154,7 @@ fn instruction_submits(criterion: &mut Criterion) { construct_executor("../default_executor").expect("Failed to construct executor"), ) .build(), + chain_id, &get_genesis_key_pair(&configuration), ) .expect("failed to create genesis"); @@ -159,7 +171,8 @@ fn instruction_submits(criterion: &mut Criterion) { .into(); let create_account = Register::account(Account::new(account_id.clone(), [public_key])).into(); let asset_definition_id = AssetDefinitionId::new("xor".parse().expect("Valid"), domain_id); - let mut client_config = iroha_client::samples::get_client_config(&get_key_pair()); + let mut client_config = + iroha_client::samples::get_client_config(get_chain_id(), &get_key_pair()); client_config.torii_api_url = format!("http://{}", peer.api_address).parse().unwrap(); let iroha_client = Client::new(&client_config).expect("Invalid client configuration"); thread::sleep(std::time::Duration::from_millis(5000)); diff --git a/client/benches/tps/utils.rs b/client/benches/tps/utils.rs index c1a3494260f..21260ff56a9 100644 --- a/client/benches/tps/utils.rs +++ b/client/benches/tps/utils.rs @@ -207,6 +207,8 @@ impl MeasurerUnit { /// Spawn who periodically submits transactions fn spawn_transaction_submitter(&self, shutdown_signal: mpsc::Receiver<()>) -> JoinHandle<()> { + let chain_id = 0; + let submitter = self.client.clone(); let interval_us_per_tx = self.config.interval_us_per_tx; let instructions = self.instructions(); @@ -218,7 +220,7 @@ impl MeasurerUnit { for instruction in instructions { match shutdown_signal.try_recv() { Err(mpsc::TryRecvError::Empty) => { - let mut transaction = TransactionBuilder::new(alice_id.clone()) + let mut transaction = TransactionBuilder::new(chain_id, alice_id.clone()) .with_instructions([instruction]); transaction.set_nonce(nonce); // Use nonce to avoid transaction duplication within the same thread diff --git a/client/examples/million_accounts_genesis.rs b/client/examples/million_accounts_genesis.rs index a6de431c796..32e1766e9e6 100644 --- a/client/examples/million_accounts_genesis.rs +++ b/client/examples/million_accounts_genesis.rs @@ -7,7 +7,8 @@ use iroha_data_model::isi::InstructionBox; use iroha_genesis::{GenesisNetwork, RawGenesisBlock, RawGenesisBlockBuilder}; use iroha_primitives::unique_vec; use test_network::{ - get_key_pair, wait_for_genesis_committed, Peer as TestPeer, PeerBuilder, TestRuntime, + get_chain_id, get_key_pair, wait_for_genesis_committed, Peer as TestPeer, PeerBuilder, + TestRuntime, }; use tokio::runtime::Runtime; @@ -36,9 +37,13 @@ fn generate_genesis(num_domains: u32) -> RawGenesisBlock { fn main_genesis() { let mut peer = ::new().expect("Failed to create peer"); - let configuration = get_config(unique_vec![peer.id.clone()], Some(get_key_pair())); + let configuration = get_config( + unique_vec![peer.id.clone()], + Some(get_chain_id()), + Some(get_key_pair()), + ); let rt = Runtime::test(); - let genesis = GenesisNetwork::new(generate_genesis(1_000_000_u32), &{ + let genesis = GenesisNetwork::new(generate_genesis(1_000_000_u32), get_chain_id(), &{ let private_key = configuration .genesis .private_key diff --git a/client/src/client.rs b/client/src/client.rs index 3a4c7615397..c915971b628 100644 --- a/client/src/client.rs +++ b/client/src/client.rs @@ -344,6 +344,8 @@ impl_query_output! { )] #[display(fmt = "{}@{torii_url}", "key_pair.public_key()")] pub struct Client { + /// Unique id of the blockchain. Used for simple replay attack protection. + pub chain_id: u16, /// Url for accessing iroha node pub torii_url: Url, /// Accounts keypair @@ -440,6 +442,7 @@ impl Client { } Ok(Self { + chain_id: configuration.chain_id, torii_url: configuration.torii_api_url.clone(), key_pair: KeyPair::new( configuration.public_key.clone(), @@ -466,7 +469,7 @@ impl Client { instructions: impl Into, metadata: UnlimitedMetadata, ) -> Result { - let tx_builder = TransactionBuilder::new(self.account_id.clone()); + let tx_builder = TransactionBuilder::new(self.chain_id, self.account_id.clone()); let mut tx_builder = match instructions.into() { Executable::Instructions(instructions) => tx_builder.with_instructions(instructions), @@ -1664,6 +1667,7 @@ mod tests { let (public_key, private_key) = KeyPair::generate().unwrap().into(); let cfg = ConfigurationProxy { + chain_id: Some(0), public_key: Some(public_key), private_key: Some(private_key), account_id: Some( @@ -1706,6 +1710,7 @@ mod tests { }; let cfg = ConfigurationProxy { + chain_id: Some(0), public_key: Some( "ed01207233BFC89DCBD68C19FDE6CE6158225298EC1131B6A130D1AEB454C1AB5183C0" .parse() diff --git a/client/src/lib.rs b/client/src/lib.rs index 78a3cbeac13..21e21f65350 100644 --- a/client/src/lib.rs +++ b/client/src/lib.rs @@ -15,9 +15,10 @@ pub mod samples { }; /// Get sample client configuration. - pub fn get_client_config(key_pair: &KeyPair) -> Configuration { + pub fn get_client_config(chain_id: u16, key_pair: &KeyPair) -> Configuration { let (public_key, private_key) = key_pair.clone().into(); ConfigurationProxy { + chain_id: Some(chain_id), public_key: Some(public_key), private_key: Some(private_key), account_id: Some( diff --git a/client/tests/integration/asset.rs b/client/tests/integration/asset.rs index 3b151b99ec8..52c8b038e09 100644 --- a/client/tests/integration/asset.rs +++ b/client/tests/integration/asset.rs @@ -277,10 +277,12 @@ fn find_rate_and_make_exchange_isi_should_succeed() { alice_id.clone(), ); - let grant_asset_transfer_tx = TransactionBuilder::new(asset_id.account_id().clone()) - .with_instructions([allow_alice_to_transfer_asset]) - .sign(owner_keypair) - .expect("Failed to sign seller transaction"); + let chain_id = 0; + let grant_asset_transfer_tx = + TransactionBuilder::new(chain_id, asset_id.account_id().clone()) + .with_instructions([allow_alice_to_transfer_asset]) + .sign(owner_keypair) + .expect("Failed to sign seller transaction"); test_client .submit_transaction_blocking(&grant_asset_transfer_tx) diff --git a/client/tests/integration/burn_public_keys.rs b/client/tests/integration/burn_public_keys.rs index f207894995d..d4c0c98846b 100644 --- a/client/tests/integration/burn_public_keys.rs +++ b/client/tests/integration/burn_public_keys.rs @@ -14,7 +14,7 @@ fn submit( eyre::Result>, ) { let tx = if let Some((account_id, keypair)) = submitter { - TransactionBuilder::new(account_id) + TransactionBuilder::new(0, account_id) .with_instructions(instructions) .sign(keypair) .unwrap() diff --git a/client/tests/integration/domain_owner.rs b/client/tests/integration/domain_owner.rs index eeeb881b324..15a5836dac4 100644 --- a/client/tests/integration/domain_owner.rs +++ b/client/tests/integration/domain_owner.rs @@ -122,7 +122,7 @@ fn domain_owner_asset_definition_permissions() -> Result<()> { // register asset definitions by "bob@kingdom" so he is owner of it let coin = AssetDefinition::quantity(coin_id.clone()); - let transaction = TransactionBuilder::new(bob_id.clone()) + let transaction = TransactionBuilder::new(0, bob_id.clone()) .with_instructions([Register::asset_definition(coin)]) .sign(bob_keypair)?; test_client.submit_transaction_blocking(&transaction)?; @@ -181,7 +181,7 @@ fn domain_owner_asset_permissions() -> Result<()> { // register asset definitions by "bob@kingdom" so he is owner of it let coin = AssetDefinition::quantity(coin_id.clone()); let store = AssetDefinition::store(store_id.clone()); - let transaction = TransactionBuilder::new(bob_id.clone()) + let transaction = TransactionBuilder::new(0, bob_id.clone()) .with_instructions([ Register::asset_definition(coin), Register::asset_definition(store), diff --git a/client/tests/integration/permissions.rs b/client/tests/integration/permissions.rs index a6250427fbd..d1455c6af93 100644 --- a/client/tests/integration/permissions.rs +++ b/client/tests/integration/permissions.rs @@ -94,7 +94,7 @@ fn permissions_disallow_asset_transfer() { quantity, alice_id.clone(), ); - let transfer_tx = TransactionBuilder::new(mouse_id) + let transfer_tx = TransactionBuilder::new(0, mouse_id) .with_instructions([transfer_asset]) .sign(mouse_keypair) .expect("Failed to sign mouse transaction"); @@ -144,7 +144,7 @@ fn permissions_disallow_asset_burn() { quantity, AssetId::new(asset_definition_id, mouse_id.clone()), ); - let burn_tx = TransactionBuilder::new(mouse_id) + let burn_tx = TransactionBuilder::new(0, mouse_id) .with_instructions([burn_asset]) .sign(mouse_keypair) .expect("Failed to sign mouse transaction"); @@ -226,7 +226,7 @@ fn permissions_differ_not_only_by_names() { alice_id.clone(), ); - let grant_hats_access_tx = TransactionBuilder::new(mouse_id.clone()) + let grant_hats_access_tx = TransactionBuilder::new(0, mouse_id.clone()) .with_instructions([allow_alice_to_set_key_value_in_hats]) .sign(mouse_keypair.clone()) .expect("Failed to sign mouse transaction"); @@ -263,7 +263,7 @@ fn permissions_differ_not_only_by_names() { alice_id, ); - let grant_shoes_access_tx = TransactionBuilder::new(mouse_id) + let grant_shoes_access_tx = TransactionBuilder::new(0, mouse_id) .with_instructions([allow_alice_to_set_key_value_in_shoes]) .sign(mouse_keypair) .expect("Failed to sign mouse transaction"); @@ -313,7 +313,7 @@ fn stored_vs_granted_token_payload() -> Result<()> { alice_id, ); - let transaction = TransactionBuilder::new(mouse_id) + let transaction = TransactionBuilder::new(0, mouse_id) .with_instructions([allow_alice_to_set_key_value_in_mouse_asset]) .sign(mouse_keypair) .expect("Failed to sign mouse transaction"); diff --git a/client/tests/integration/roles.rs b/client/tests/integration/roles.rs index ef1884806a3..ceab82cee1a 100644 --- a/client/tests/integration/roles.rs +++ b/client/tests/integration/roles.rs @@ -76,7 +76,7 @@ fn register_and_grant_role_for_metadata_access() -> Result<()> { // Mouse grants role to Alice let grant_role = Grant::role(role_id.clone(), alice_id.clone()); - let grant_role_tx = TransactionBuilder::new(mouse_id.clone()) + let grant_role_tx = TransactionBuilder::new(0, mouse_id.clone()) .with_instructions([grant_role]) .sign(mouse_key_pair)?; test_client.submit_transaction_blocking(&grant_role_tx)?; diff --git a/client/tests/integration/unregister_peer.rs b/client/tests/integration/unregister_peer.rs index 3121bf1de76..16f6ad87c10 100644 --- a/client/tests/integration/unregister_peer.rs +++ b/client/tests/integration/unregister_peer.rs @@ -112,14 +112,11 @@ fn init() -> Result<( let asset_definition_id: AssetDefinitionId = "xor#domain".parse()?; let create_asset = Register::asset_definition(AssetDefinition::quantity(asset_definition_id.clone())); - let instructions = parameters.into_iter().chain( - [ - create_domain.into(), - create_account.into(), - create_asset.into(), - ] - .into_iter(), - ); + let instructions = parameters.into_iter().chain([ + create_domain.into(), + create_account.into(), + create_asset.into(), + ]); client.submit_all_blocking(instructions)?; iroha_logger::info!("Init"); Ok(( diff --git a/client/tests/integration/upgrade.rs b/client/tests/integration/upgrade.rs index 3ec49a84600..a6a5956f0d9 100644 --- a/client/tests/integration/upgrade.rs +++ b/client/tests/integration/upgrade.rs @@ -30,7 +30,7 @@ fn executor_upgrade_should_work() -> Result<()> { let alice_rose: AssetId = "rose##alice@wonderland".parse()?; let admin_rose: AccountId = "admin@admin".parse()?; let transfer_alice_rose = Transfer::asset_quantity(alice_rose, 1_u32, admin_rose); - let transfer_rose_tx = TransactionBuilder::new(admin_id.clone()) + let transfer_rose_tx = TransactionBuilder::new(0, admin_id.clone()) .with_instructions([transfer_alice_rose.clone()]) .sign(admin_keypair.clone())?; let _ = client @@ -44,7 +44,7 @@ fn executor_upgrade_should_work() -> Result<()> { // Check that admin can transfer alice's rose now // Creating new transaction instead of cloning, because we need to update it's creation time - let transfer_rose_tx = TransactionBuilder::new(admin_id) + let transfer_rose_tx = TransactionBuilder::new(0, admin_id) .with_instructions([transfer_alice_rose]) .sign(admin_keypair)?; client diff --git a/client_cli/Cargo.toml b/client_cli/Cargo.toml index c9ac564c6de..02aee1ad625 100644 --- a/client_cli/Cargo.toml +++ b/client_cli/Cargo.toml @@ -36,5 +36,5 @@ serde_json = { workspace = true } erased-serde = "0.3.31" [build-dependencies] -vergen = { version = "8.2.5", default-features = false } +vergen = { version = "8.2.6", default-features = false } color-eyre = "0.6.2" diff --git a/config/Cargo.toml b/config/Cargo.toml index d6df71128fa..baaf9502bb9 100644 --- a/config/Cargo.toml +++ b/config/Cargo.toml @@ -33,7 +33,7 @@ cfg-if = { workspace = true } once_cell = { workspace = true } [dev-dependencies] -proptest = "1.3.1" +proptest = "1.4.0" stacker = "0.1.15" expect-test = { workspace = true } diff --git a/config/iroha_test_config.json b/config/iroha_test_config.json index 7a180598bbb..ba1dd4937a3 100644 --- a/config/iroha_test_config.json +++ b/config/iroha_test_config.json @@ -1,4 +1,5 @@ { + "CHAIN_ID": 0, "PUBLIC_KEY": "ed01201C61FAF8FE94E253B93114240394F79A607B7FA55F9E5A41EBEC74B88055768B", "PRIVATE_KEY": { "digest_function": "ed25519", diff --git a/config/src/client.rs b/config/src/client.rs index a9238879cac..8d859e9f8fc 100644 --- a/config/src/client.rs +++ b/config/src/client.rs @@ -6,13 +6,11 @@ use derive_more::Display; use eyre::{Result, WrapErr}; use iroha_config_base::derive::{Error as ConfigError, Proxy}; use iroha_crypto::prelude::*; -use iroha_data_model::{prelude::*, transaction::TransactionLimits}; +use iroha_data_model::prelude::*; use iroha_primitives::small::SmallStr; use serde::{Deserialize, Serialize}; use url::Url; -use crate::wsv::default::DEFAULT_TRANSACTION_LIMITS; - #[allow(unsafe_code)] const DEFAULT_TRANSACTION_TIME_TO_LIVE_MS: NonZeroU64 = unsafe { NonZeroU64::new_unchecked(100_000) }; @@ -69,6 +67,8 @@ pub struct BasicAuth { #[serde(rename_all = "UPPERCASE")] #[config(env_prefix = "IROHA_")] pub struct Configuration { + /// Unique id of the blockchain. Used for simple replay attack protection. + pub chain_id: u16, /// Public key of the user account. #[config(serde_as_str)] pub public_key: PublicKey, @@ -84,13 +84,6 @@ pub struct Configuration { pub transaction_time_to_live_ms: Option, /// Transaction status wait timeout in milliseconds. pub transaction_status_timeout_ms: u64, - /// The limits to which transactions must adhere to - // NOTE: If you want this functionality, implement it in the app manually - #[deprecated( - note = "This parameter is not used and takes no effect and will be removed in future releases. \ - If you want this functionality, implement it in the app manually." - )] - pub transaction_limits: TransactionLimits, /// If `true` add nonce, which make different hashes for transactions which occur repeatedly and simultaneously pub add_transaction_nonce: bool, } @@ -98,6 +91,7 @@ pub struct Configuration { impl Default for ConfigurationProxy { fn default() -> Self { Self { + chain_id: None, public_key: None, private_key: None, account_id: None, @@ -105,7 +99,6 @@ impl Default for ConfigurationProxy { torii_api_url: None, transaction_time_to_live_ms: Some(Some(DEFAULT_TRANSACTION_TIME_TO_LIVE_MS)), transaction_status_timeout_ms: Some(DEFAULT_TRANSACTION_STATUS_TIMEOUT_MS), - transaction_limits: Some(DEFAULT_TRANSACTION_LIMITS), add_transaction_nonce: Some(DEFAULT_ADD_TRANSACTION_NONCE), } } @@ -208,17 +201,17 @@ mod tests { prop_compose! { fn arb_proxy() ( + chain_id in prop::option::of(Just(crate::iroha::tests::placeholder_chain_id())), (public_key, private_key) in arb_keys_with_option(), account_id in prop::option::of(Just(placeholder_account())), basic_auth in prop::option::of(Just(None)), torii_api_url in prop::option::of(Just(format!("http://{DEFAULT_API_ADDR}").parse().unwrap())), transaction_time_to_live_ms in prop::option::of(Just(Some(DEFAULT_TRANSACTION_TIME_TO_LIVE_MS))), transaction_status_timeout_ms in prop::option::of(Just(DEFAULT_TRANSACTION_STATUS_TIMEOUT_MS)), - transaction_limits in prop::option::of(Just(DEFAULT_TRANSACTION_LIMITS)), add_transaction_nonce in prop::option::of(Just(DEFAULT_ADD_TRANSACTION_NONCE)), ) -> ConfigurationProxy { - ConfigurationProxy { public_key, private_key, account_id, basic_auth, torii_api_url, transaction_time_to_live_ms, transaction_status_timeout_ms, transaction_limits, add_transaction_nonce } + ConfigurationProxy { chain_id, public_key, private_key, account_id, basic_auth, torii_api_url, transaction_time_to_live_ms, transaction_status_timeout_ms, add_transaction_nonce } } } @@ -236,10 +229,6 @@ mod tests { assert_eq!(arb_cfg.account_id, example_cfg.account_id); assert_eq!(arb_cfg.transaction_time_to_live_ms, example_cfg.transaction_time_to_live_ms); assert_eq!(arb_cfg.transaction_status_timeout_ms, example_cfg.transaction_status_timeout_ms); - #[allow(deprecated)] // For testing purposes only - { - assert_eq!(arb_cfg.transaction_limits, example_cfg.transaction_limits); - } assert_eq!(arb_cfg.add_transaction_nonce, example_cfg.add_transaction_nonce); } } diff --git a/config/src/iroha.rs b/config/src/iroha.rs index 1946b2571b1..83f806190c1 100644 --- a/config/src/iroha.rs +++ b/config/src/iroha.rs @@ -14,6 +14,8 @@ view! { #[serde(rename_all = "UPPERCASE")] #[config(env_prefix = "IROHA_")] pub struct Configuration { + /// Unique id of the blockchain. Used for simple replay attack protection. + pub chain_id: u16, /// Public key of this peer #[config(serde_as_str)] pub public_key: PublicKey, @@ -64,6 +66,7 @@ view! { impl Default for ConfigurationProxy { fn default() -> Self { Self { + chain_id: None, public_key: None, private_key: None, kura: Some(Box::default()), @@ -165,7 +168,7 @@ impl ConfigurationProxy { } #[cfg(test)] -mod tests { +pub mod tests { use std::path::PathBuf; use proptest::prelude::*; @@ -200,8 +203,13 @@ mod tests { .boxed() } + pub fn placeholder_chain_id() -> u16 { + 0 + } + prop_compose! { fn arb_proxy()( + chain_id in prop::option::of(Just(placeholder_chain_id())), (public_key, private_key) in arb_keys(), kura in prop::option::of(kura::tests::arb_proxy().prop_map(Box::new)), sumeragi in (prop::option::of(sumeragi::tests::arb_proxy().prop_map(Box::new))), @@ -216,7 +224,7 @@ mod tests { snapshot in prop::option::of(snapshot::tests::arb_proxy().prop_map(Box::new)), live_query_store in prop::option::of(live_query_store::tests::arb_proxy()), ) -> ConfigurationProxy { - ConfigurationProxy { public_key, private_key, kura, sumeragi, torii, block_sync, queue, + ConfigurationProxy { chain_id, public_key, private_key, kura, sumeragi, torii, block_sync, queue, logger, genesis, wsv, network, telemetry, snapshot, live_query_store } } } diff --git a/configs/client/config.json b/configs/client/config.json index 5ed2399d626..69c16948d05 100644 --- a/configs/client/config.json +++ b/configs/client/config.json @@ -1,4 +1,5 @@ { + "CHAIN_ID": 0, "PUBLIC_KEY": "ed01207233BFC89DCBD68C19FDE6CE6158225298EC1131B6A130D1AEB454C1AB5183C0", "PRIVATE_KEY": { "digest_function": "ed25519", @@ -12,9 +13,5 @@ "TORII_API_URL": "http://127.0.0.1:8080/", "TRANSACTION_TIME_TO_LIVE_MS": 100000, "TRANSACTION_STATUS_TIMEOUT_MS": 15000, - "TRANSACTION_LIMITS": { - "max_instruction_number": 4096, - "max_wasm_size_bytes": 4194304 - }, "ADD_TRANSACTION_NONCE": false } diff --git a/configs/peer/config.json b/configs/peer/config.json index 3f0dc2f87a9..2695398702f 100644 --- a/configs/peer/config.json +++ b/configs/peer/config.json @@ -1,4 +1,5 @@ { + "CHAIN_ID": null, "PUBLIC_KEY": null, "PRIVATE_KEY": null, "KURA": { diff --git a/configs/peer/executor.wasm b/configs/peer/executor.wasm index 7af36698d6c..e8aa0303109 100644 Binary files a/configs/peer/executor.wasm and b/configs/peer/executor.wasm differ diff --git a/core/Cargo.toml b/core/Cargo.toml index def91180650..07ef1aabf39 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -69,7 +69,7 @@ wasmtime = { workspace = true } parking_lot = { workspace = true, features = ["deadlock_detection"] } derive_more = { workspace = true } -uuid = { version = "1.4.1", features = ["v4"] } +uuid = { version = "1.6.1", features = ["v4"] } indexmap = "2.1.0" [dev-dependencies] diff --git a/core/benches/blocks/common.rs b/core/benches/blocks/common.rs index 7aef12edd2d..fc8b95a2619 100644 --- a/core/benches/blocks/common.rs +++ b/core/benches/blocks/common.rs @@ -26,7 +26,9 @@ pub fn create_block( account_id: AccountId, key_pair: KeyPair, ) -> CommittedBlock { - let transaction = TransactionBuilder::new(account_id) + let chain_id = 0; + + let transaction = TransactionBuilder::new(chain_id, account_id) .with_instructions(instructions) .sign(key_pair.clone()) .unwrap(); @@ -34,7 +36,7 @@ pub fn create_block( let topology = Topology::new(UniqueVec::new()); let block = BlockBuilder::new( - vec![AcceptedTransaction::accept(transaction, &limits).unwrap()], + vec![AcceptedTransaction::accept(transaction, chain_id, &limits).unwrap()], topology.clone(), Vec::new(), ) diff --git a/core/benches/kura.rs b/core/benches/kura.rs index a47f731e31d..e5acb865fea 100644 --- a/core/benches/kura.rs +++ b/core/benches/kura.rs @@ -19,21 +19,26 @@ use iroha_primitives::unique_vec::UniqueVec; use tokio::{fs, runtime::Runtime}; async fn measure_block_size_for_n_executors(n_executors: u32) { + let chain_id = 0; + let alice_id = AccountId::from_str("alice@test").expect("tested"); let bob_id = AccountId::from_str("bob@test").expect("tested"); let xor_id = AssetDefinitionId::from_str("xor#test").expect("tested"); let alice_xor_id = AssetId::new(xor_id, alice_id); let transfer = Transfer::asset_quantity(alice_xor_id, 10_u32, bob_id); let keypair = KeyPair::generate().expect("Failed to generate KeyPair."); - let tx = TransactionBuilder::new(AccountId::from_str("alice@wonderland").expect("checked")) - .with_instructions([transfer]) - .sign(keypair.clone()) - .expect("Failed to sign."); + let tx = TransactionBuilder::new( + chain_id, + AccountId::from_str("alice@wonderland").expect("checked"), + ) + .with_instructions([transfer]) + .sign(keypair.clone()) + .expect("Failed to sign."); let transaction_limits = TransactionLimits { max_instruction_number: 4096, max_wasm_size_bytes: 0, }; - let tx = AcceptedTransaction::accept(tx, &transaction_limits) + let tx = AcceptedTransaction::accept(tx, chain_id, &transaction_limits) .expect("Failed to accept Transaction."); let dir = tempfile::tempdir().expect("Could not create tempfile."); let cfg = Configuration { diff --git a/core/benches/validation.rs b/core/benches/validation.rs index 3a5bcaefe23..236c28f60e9 100644 --- a/core/benches/validation.rs +++ b/core/benches/validation.rs @@ -23,7 +23,7 @@ const TRANSACTION_LIMITS: TransactionLimits = TransactionLimits { max_wasm_size_bytes: 0, }; -fn build_test_transaction(keys: KeyPair) -> SignedTransaction { +fn build_test_transaction(keys: KeyPair, chain_id: u16) -> SignedTransaction { let domain_name = "domain"; let domain_id = DomainId::from_str(domain_name).expect("does not panic"); let create_domain: InstructionBox = Register::domain(Domain::new(domain_id)).into(); @@ -47,10 +47,13 @@ fn build_test_transaction(keys: KeyPair) -> SignedTransaction { Register::asset_definition(AssetDefinition::quantity(asset_definition_id)).into(); let instructions = [create_domain, create_account, create_asset]; - TransactionBuilder::new(AccountId::new( - START_ACCOUNT.parse().expect("Valid"), - START_DOMAIN.parse().expect("Valid"), - )) + TransactionBuilder::new( + chain_id, + AccountId::new( + START_ACCOUNT.parse().expect("Valid"), + START_DOMAIN.parse().expect("Valid"), + ), + ) .with_instructions(instructions) .sign(keys) .expect("Failed to sign.") @@ -93,24 +96,28 @@ fn build_test_and_transient_wsv(keys: KeyPair) -> WorldStateView { } fn accept_transaction(criterion: &mut Criterion) { + let chain_id = 0; + let keys = KeyPair::generate().expect("Failed to generate keys"); - let transaction = build_test_transaction(keys); + let transaction = build_test_transaction(keys, chain_id); let mut success_count = 0; let mut failures_count = 0; let _ = criterion.bench_function("accept", |b| { - b.iter( - || match AcceptedTransaction::accept(transaction.clone(), &TRANSACTION_LIMITS) { + b.iter(|| { + match AcceptedTransaction::accept(transaction.clone(), chain_id, &TRANSACTION_LIMITS) { Ok(_) => success_count += 1, Err(_) => failures_count += 1, - }, - ); + } + }); }); println!("Success count: {success_count}, Failures count: {failures_count}"); } fn sign_transaction(criterion: &mut Criterion) { + let chain_id = 0; + let keys = KeyPair::generate().expect("Failed to generate keys"); - let transaction = build_test_transaction(keys); + let transaction = build_test_transaction(keys, chain_id); let key_pair = KeyPair::generate().expect("Failed to generate KeyPair."); let mut success_count = 0; let mut failures_count = 0; @@ -124,10 +131,15 @@ fn sign_transaction(criterion: &mut Criterion) { } fn validate_transaction(criterion: &mut Criterion) { + let chain_id = 0; + let keys = KeyPair::generate().expect("Failed to generate keys"); - let transaction = - AcceptedTransaction::accept(build_test_transaction(keys.clone()), &TRANSACTION_LIMITS) - .expect("Failed to accept transaction."); + let transaction = AcceptedTransaction::accept( + build_test_transaction(keys.clone(), chain_id), + chain_id, + &TRANSACTION_LIMITS, + ) + .expect("Failed to accept transaction."); let mut success_count = 0; let mut failure_count = 0; let wsv = build_test_and_transient_wsv(keys); @@ -145,10 +157,15 @@ fn validate_transaction(criterion: &mut Criterion) { } fn sign_blocks(criterion: &mut Criterion) { + let chain_id = 0; + let keys = KeyPair::generate().expect("Failed to generate keys"); - let transaction = - AcceptedTransaction::accept(build_test_transaction(keys), &TRANSACTION_LIMITS) - .expect("Failed to accept transaction."); + let transaction = AcceptedTransaction::accept( + build_test_transaction(keys, chain_id), + chain_id, + &TRANSACTION_LIMITS, + ) + .expect("Failed to accept transaction."); let key_pair = KeyPair::generate().expect("Failed to generate KeyPair."); let kura = iroha_core::kura::Kura::blank_kura_for_testing(); let query_handle = LiveQueryStore::test().start(); diff --git a/core/src/block.rs b/core/src/block.rs index 164dc6b5456..5931c3f136d 100644 --- a/core/src/block.rs +++ b/core/src/block.rs @@ -265,6 +265,7 @@ mod valid { pub fn validate( block: SignedBlock, topology: &Topology, + expected_chain_id: u16, wsv: &mut WorldStateView, ) -> Result { if !block.payload().header.is_genesis() { @@ -326,7 +327,7 @@ mod valid { return Err((block, BlockValidationError::HasCommittedTransactions)); } - if let Err(error) = Self::validate_transactions(&block, wsv) { + if let Err(error) = Self::validate_transactions(&block, expected_chain_id, wsv) { return Err((block, error.into())); } @@ -342,6 +343,7 @@ mod valid { fn validate_transactions( block: &SignedBlock, + expected_chain_id: u16, wsv: &mut WorldStateView, ) -> Result<(), TransactionValidationError> { let is_genesis = block.payload().header.is_genesis(); @@ -356,10 +358,10 @@ mod valid { let limits = &transaction_executor.transaction_limits; let tx = if is_genesis { - AcceptedTransaction::accept_genesis(GenesisTransaction(value)) + AcceptedTransaction::accept_genesis(GenesisTransaction(value), expected_chain_id) } else { - AcceptedTransaction::accept(value, limits)? - }; + AcceptedTransaction::accept(value, expected_chain_id, limits) + }?; if error.is_some() { match transaction_executor.validate(tx, wsv) { @@ -720,6 +722,8 @@ mod tests { #[tokio::test] async fn should_reject_due_to_repetition() { + let chain_id = 0; + // Predefined world state let alice_id = AccountId::from_str("alice@wonderland").expect("Valid"); let alice_keys = KeyPair::generate().expect("Valid"); @@ -740,11 +744,11 @@ mod tests { // Making two transactions that have the same instruction let transaction_limits = &wsv.transaction_executor().transaction_limits; - let tx = TransactionBuilder::new(alice_id) + let tx = TransactionBuilder::new(chain_id, alice_id) .with_instructions([create_asset_definition]) .sign(alice_keys.clone()) .expect("Valid"); - let tx = AcceptedTransaction::accept(tx, transaction_limits).expect("Valid"); + let tx = AcceptedTransaction::accept(tx, chain_id, transaction_limits).expect("Valid"); // Creating a block of two identical transactions and validating it let transactions = vec![tx.clone(), tx]; @@ -763,6 +767,8 @@ mod tests { #[tokio::test] async fn tx_order_same_in_validation_and_revalidation() { + let chain_id = 0; + // Predefined world state let alice_id = AccountId::from_str("alice@wonderland").expect("Valid"); let alice_keys = KeyPair::generate().expect("Valid"); @@ -783,11 +789,11 @@ mod tests { // Making two transactions that have the same instruction let transaction_limits = &wsv.transaction_executor().transaction_limits; - let tx = TransactionBuilder::new(alice_id.clone()) + let tx = TransactionBuilder::new(chain_id, alice_id.clone()) .with_instructions([create_asset_definition]) .sign(alice_keys.clone()) .expect("Valid"); - let tx = AcceptedTransaction::accept(tx, transaction_limits).expect("Valid"); + let tx = AcceptedTransaction::accept(tx, chain_id, transaction_limits).expect("Valid"); let quantity: u32 = 200; let fail_quantity: u32 = 20; @@ -802,17 +808,17 @@ mod tests { AssetId::new(asset_definition_id, alice_id.clone()), ); - let tx0 = TransactionBuilder::new(alice_id.clone()) + let tx0 = TransactionBuilder::new(chain_id, alice_id.clone()) .with_instructions([fail_mint]) .sign(alice_keys.clone()) .expect("Valid"); - let tx0 = AcceptedTransaction::accept(tx0, transaction_limits).expect("Valid"); + let tx0 = AcceptedTransaction::accept(tx0, chain_id, transaction_limits).expect("Valid"); - let tx2 = TransactionBuilder::new(alice_id) + let tx2 = TransactionBuilder::new(chain_id, alice_id) .with_instructions([succeed_mint]) .sign(alice_keys.clone()) .expect("Valid"); - let tx2 = AcceptedTransaction::accept(tx2, transaction_limits).expect("Valid"); + let tx2 = AcceptedTransaction::accept(tx2, chain_id, transaction_limits).expect("Valid"); // Creating a block of two identical transactions and validating it let transactions = vec![tx0, tx, tx2]; @@ -831,6 +837,8 @@ mod tests { #[tokio::test] async fn failed_transactions_revert() { + let chain_id = 0; + // Predefined world state let alice_id = AccountId::from_str("alice@wonderland").expect("Valid"); let alice_keys = KeyPair::generate().expect("Valid"); @@ -858,16 +866,18 @@ mod tests { Fail::new("Always fail".to_owned()).into(), ]; let instructions_accept: [InstructionBox; 2] = [create_domain.into(), create_asset.into()]; - let tx_fail = TransactionBuilder::new(alice_id.clone()) + let tx_fail = TransactionBuilder::new(chain_id, alice_id.clone()) .with_instructions(instructions_fail) .sign(alice_keys.clone()) .expect("Valid"); - let tx_fail = AcceptedTransaction::accept(tx_fail, transaction_limits).expect("Valid"); - let tx_accept = TransactionBuilder::new(alice_id) + let tx_fail = + AcceptedTransaction::accept(tx_fail, chain_id, transaction_limits).expect("Valid"); + let tx_accept = TransactionBuilder::new(chain_id, alice_id) .with_instructions(instructions_accept) .sign(alice_keys.clone()) .expect("Valid"); - let tx_accept = AcceptedTransaction::accept(tx_accept, transaction_limits).expect("Valid"); + let tx_accept = + AcceptedTransaction::accept(tx_accept, chain_id, transaction_limits).expect("Valid"); // Creating a block of where first transaction must fail and second one fully executed let transactions = vec![tx_fail, tx_accept]; diff --git a/core/src/gossiper.rs b/core/src/gossiper.rs index 365ebb7ac7a..7b4966c9e1c 100644 --- a/core/src/gossiper.rs +++ b/core/src/gossiper.rs @@ -31,6 +31,8 @@ impl TransactionGossiperHandle { /// Actor to gossip transactions and receive transaction gossips pub struct TransactionGossiper { + /// Unique id of the blockchain. Used for simple replay attack protection. + chain_id: u16, /// The size of batch that is being gossiped. Smaller size leads /// to longer time to synchronise, useful if you have high packet loss. gossip_batch_size: u32, @@ -57,19 +59,21 @@ impl TransactionGossiper { /// Construct [`Self`] from configuration pub fn from_configuration( + chain_id: u16, // Currently we are using configuration parameters from sumeragi not to break configuration - configuartion: &Configuration, + configuration: &Configuration, network: IrohaNetwork, queue: Arc, sumeragi: SumeragiHandle, ) -> Self { let wsv = sumeragi.wsv_clone(); Self { + chain_id, queue, sumeragi, network, - gossip_batch_size: configuartion.gossip_batch_size, - gossip_period: Duration::from_millis(configuartion.gossip_period_ms), + gossip_batch_size: configuration.gossip_batch_size, + gossip_period: Duration::from_millis(configuration.gossip_period_ms), wsv, } } @@ -115,7 +119,7 @@ impl TransactionGossiper { for tx in txs { let transaction_limits = &self.wsv.config.transaction_limits; - match AcceptedTransaction::accept(tx, transaction_limits) { + match AcceptedTransaction::accept(tx, self.chain_id, transaction_limits) { Ok(tx) => match self.queue.push(tx, &self.wsv) { Ok(()) => {} Err(crate::queue::Failure { diff --git a/core/src/queue.rs b/core/src/queue.rs index 2872ebc9365..d366b834b7a 100644 --- a/core/src/queue.rs +++ b/core/src/queue.rs @@ -250,9 +250,8 @@ impl Queue { expired_transactions: &mut Vec, ) -> Option { loop { - let Some(hash) = self.tx_hashes.pop() else { - return None; - }; + let hash = self.tx_hashes.pop()?; + let entry = match self.accepted_txs.entry(hash) { Entry::Occupied(entry) => entry, // FIXME: Reachable under high load. Investigate, see if it's a problem. @@ -394,11 +393,13 @@ mod tests { }; fn accepted_tx(account_id: &str, key: KeyPair) -> AcceptedTransaction { + let chain_id = 0; + let message = std::iter::repeat_with(rand::random::) .take(16) .collect(); let instructions = [Fail { message }]; - let tx = TransactionBuilder::new(AccountId::from_str(account_id).expect("Valid")) + let tx = TransactionBuilder::new(chain_id, AccountId::from_str(account_id).expect("Valid")) .with_instructions(instructions) .sign(key) .expect("Failed to sign."); @@ -406,7 +407,7 @@ mod tests { max_instruction_number: 4096, max_wasm_size_bytes: 0, }; - AcceptedTransaction::accept(tx, &limits).expect("Failed to accept Transaction.") + AcceptedTransaction::accept(tx, chain_id, &limits).expect("Failed to accept Transaction.") } pub fn world_with_test_domains( @@ -483,6 +484,8 @@ mod tests { #[test] async fn push_multisignature_tx() { + let chain_id = 0; + let max_txs_in_block = 2; let key_pairs = [KeyPair::generate().unwrap(), KeyPair::generate().unwrap()]; let kura = Kura::blank_kura_for_testing(); @@ -513,7 +516,7 @@ mod tests { .expect("Default queue config should always build") }); let instructions: [InstructionBox; 0] = []; - let tx = TransactionBuilder::new("alice@wonderland".parse().expect("Valid")) + let tx = TransactionBuilder::new(chain_id, "alice@wonderland".parse().expect("Valid")) .with_instructions(instructions); let tx_limits = TransactionLimits { max_instruction_number: 4096, @@ -527,7 +530,7 @@ mod tests { for key_pair in &key_pairs[1..] { signed_tx = signed_tx.sign(key_pair.clone()).expect("Failed to sign"); } - AcceptedTransaction::accept(signed_tx, &tx_limits) + AcceptedTransaction::accept(signed_tx, chain_id, &tx_limits) .expect("Failed to accept Transaction.") }; // Check that fully signed transaction pass signature check @@ -539,6 +542,7 @@ mod tests { let get_tx = |key_pair| { AcceptedTransaction::accept( tx.clone().sign(key_pair).expect("Failed to sign."), + chain_id, &tx_limits, ) .expect("Failed to accept Transaction.") @@ -744,6 +748,8 @@ mod tests { #[test] async fn custom_expired_transaction_is_rejected() { + let chain_id = 0; + let max_txs_in_block = 2; let alice_key = KeyPair::generate().expect("Failed to generate keypair."); let kura = Kura::blank_kura_for_testing(); @@ -763,16 +769,19 @@ mod tests { let instructions = [Fail { message: "expired".to_owned(), }]; - let mut tx = - TransactionBuilder::new(AccountId::from_str("alice@wonderland").expect("Valid")) - .with_instructions(instructions); + let mut tx = TransactionBuilder::new( + chain_id, + AccountId::from_str("alice@wonderland").expect("Valid"), + ) + .with_instructions(instructions); tx.set_ttl(Duration::from_millis(10)); let tx = tx.sign(alice_key).expect("Failed to sign."); let limits = TransactionLimits { max_instruction_number: 4096, max_wasm_size_bytes: 0, }; - let tx = AcceptedTransaction::accept(tx, &limits).expect("Failed to accept Transaction."); + let tx = AcceptedTransaction::accept(tx, chain_id, &limits) + .expect("Failed to accept Transaction."); queue .push(tx.clone(), &wsv) .expect("Failed to push tx into queue"); diff --git a/core/src/smartcontracts/isi/query.rs b/core/src/smartcontracts/isi/query.rs index d14ed740d0b..7ca9a8c45dc 100644 --- a/core/src/smartcontracts/isi/query.rs +++ b/core/src/smartcontracts/isi/query.rs @@ -249,6 +249,8 @@ mod tests { valid_tx_per_block: usize, invalid_tx_per_block: usize, ) -> Result { + let chain_id = 0; + let kura = Kura::blank_kura_for_testing(); let query_handle = LiveQueryStore::test().start(); let mut wsv = WorldStateView::new(world_with_test_domains(), kura.clone(), query_handle); @@ -266,17 +268,17 @@ mod tests { let valid_tx = { let instructions: [InstructionBox; 0] = []; - let tx = TransactionBuilder::new(ALICE_ID.clone()) + let tx = TransactionBuilder::new(chain_id, ALICE_ID.clone()) .with_instructions(instructions) .sign(ALICE_KEYS.clone())?; - AcceptedTransaction::accept(tx, &limits)? + AcceptedTransaction::accept(tx, chain_id, &limits)? }; let invalid_tx = { let isi = Fail::new("fail".to_owned()); - let tx = TransactionBuilder::new(ALICE_ID.clone()) + let tx = TransactionBuilder::new(chain_id, ALICE_ID.clone()) .with_instructions([isi.clone(), isi]) .sign(ALICE_KEYS.clone())?; - AcceptedTransaction::accept(tx, &huge_limits)? + AcceptedTransaction::accept(tx, chain_id, &huge_limits)? }; let mut transactions = vec![valid_tx; valid_tx_per_block]; @@ -409,17 +411,19 @@ mod tests { #[test] async fn find_transaction() -> Result<()> { + let chain_id = 0; + let kura = Kura::blank_kura_for_testing(); let query_handle = LiveQueryStore::test().start(); let mut wsv = WorldStateView::new(world_with_test_domains(), kura.clone(), query_handle); let instructions: [InstructionBox; 0] = []; - let tx = TransactionBuilder::new(ALICE_ID.clone()) + let tx = TransactionBuilder::new(chain_id, ALICE_ID.clone()) .with_instructions(instructions) .sign(ALICE_KEYS.clone())?; let tx_limits = &wsv.transaction_executor().transaction_limits; - let va_tx = AcceptedTransaction::accept(tx, tx_limits)?; + let va_tx = AcceptedTransaction::accept(tx, chain_id, tx_limits)?; let topology = Topology::new(UniqueVec::new()); let vcb = BlockBuilder::new(vec![va_tx.clone()], topology.clone(), Vec::new()) @@ -431,7 +435,7 @@ mod tests { wsv.apply(&vcb)?; kura.store_block(vcb); - let unapplied_tx = TransactionBuilder::new(ALICE_ID.clone()) + let unapplied_tx = TransactionBuilder::new(chain_id, ALICE_ID.clone()) .with_instructions([Unregister::account("account@domain".parse().unwrap())]) .sign(ALICE_KEYS.clone())?; let wrong_hash = unapplied_tx.hash(); diff --git a/core/src/sumeragi/main_loop.rs b/core/src/sumeragi/main_loop.rs index fcca60b867b..6651db83151 100644 --- a/core/src/sumeragi/main_loop.rs +++ b/core/src/sumeragi/main_loop.rs @@ -14,6 +14,8 @@ use crate::{block::*, sumeragi::tracing::instrument}; /// `Sumeragi` is the implementation of the consensus. pub struct Sumeragi { + /// Unique id of the blockchain. Used for simple replay attack protection. + pub chain_id: u16, /// The pair of keys used for communication given this Sumeragi instance. pub key_pair: KeyPair, /// Address of queue @@ -209,19 +211,23 @@ impl Sumeragi { } }; - let block = - match ValidBlock::validate(block, &self.current_topology, &mut new_wsv) - .and_then(|block| { - block - .commit(&self.current_topology) - .map_err(|(block, error)| (block.into(), error)) - }) { - Ok(block) => block, - Err((_, error)) => { - error!(?error, "Received invalid genesis block"); - continue; - } - }; + let block = match ValidBlock::validate( + block, + &self.current_topology, + self.chain_id, + &mut new_wsv, + ) + .and_then(|block| { + block + .commit(&self.current_topology) + .map_err(|(block, error)| (block.into(), error)) + }) { + Ok(block) => block, + Err((_, error)) => { + error!(?error, "Received invalid genesis block"); + continue; + } + }; new_wsv.world_mut().trusted_peers_ids = block.payload().commit_topology.clone(); @@ -244,8 +250,9 @@ impl Sumeragi { let transactions: Vec<_> = genesis_network .into_transactions() .into_iter() - .map(AcceptedTransaction::accept_genesis) - .collect(); + .map(|tx| AcceptedTransaction::accept_genesis(tx, self.chain_id)) + .collect::>() + .expect("Genesis invalid"); let mut new_wsv = self.wsv.clone(); let genesis = BlockBuilder::new(transactions, self.current_topology.clone(), vec![]) @@ -364,172 +371,177 @@ impl Sumeragi { self.transaction_cache .retain(|tx| !self.wsv.has_transaction(tx.hash()) && !self.queue.is_expired(tx)); } -} -fn suggest_view_change( - sumeragi: &Sumeragi, - view_change_proof_chain: &mut ProofChain, - current_view_change_index: u64, -) { - let suspect_proof = - ProofBuilder::new(sumeragi.wsv.latest_block_hash(), current_view_change_index) - .sign(sumeragi.key_pair.clone()) - .expect("Proof signing failed"); - - view_change_proof_chain - .insert_proof( - &sumeragi.current_topology.ordered_peers, - sumeragi.current_topology.max_faults(), - sumeragi.wsv.latest_block_hash(), - suspect_proof, - ) - .unwrap_or_else(|err| error!("{err}")); + fn vote_for_block( + &self, + topology: &Topology, + BlockCreated { block }: BlockCreated, + ) -> Option { + let block_hash = block.payload().hash(); + let addr = &self.peer_id.address; + let role = self.current_topology.role(&self.peer_id); + trace!(%addr, %role, block_hash=%block_hash, "Block received, voting..."); - let msg = MessagePacket::new(view_change_proof_chain.clone(), None); - sumeragi.broadcast_packet(msg); -} + let mut new_wsv = self.wsv.clone(); + let block = match ValidBlock::validate(block, topology, self.chain_id, &mut new_wsv) { + Ok(block) => block, + Err((_, error)) => { + warn!(%addr, %role, ?error, "Block validation failed"); + return None; + } + }; -fn prune_view_change_proofs_and_calculate_current_index( - sumeragi: &Sumeragi, - view_change_proof_chain: &mut ProofChain, -) -> u64 { - view_change_proof_chain.prune(sumeragi.wsv.latest_block_hash()); - view_change_proof_chain.verify_with_state( - &sumeragi.current_topology.ordered_peers, - sumeragi.current_topology.max_faults(), - sumeragi.wsv.latest_block_hash(), - ) as u64 -} + let signed_block = block + .sign(self.key_pair.clone()) + .expect("Block signing failed"); -#[allow(clippy::too_many_lines)] -fn handle_message( - message: Message, - sumeragi: &mut Sumeragi, - voting_block: &mut Option, - current_view_change_index: u64, - view_change_proof_chain: &mut ProofChain, - voting_signatures: &mut Vec>, -) { - let current_topology = &sumeragi.current_topology; - let role = current_topology.role(&sumeragi.peer_id); - let addr = &sumeragi.peer_id.address; - - #[allow(clippy::suspicious_operation_groupings)] - match (message, role) { - (Message::BlockSyncUpdate(BlockSyncUpdate { block }), _) => { - let block_hash = block.hash(); - info!(%addr, %role, hash=%block_hash, "Block sync update received"); - - match handle_block_sync(block, &sumeragi.wsv, &sumeragi.finalized_wsv) { - Ok(BlockSyncOk::CommitBlock(block, new_wsv)) => { - sumeragi.commit_block(block, new_wsv) - } - Ok(BlockSyncOk::ReplaceTopBlock(block, new_wsv)) => { - warn!( - %addr, %role, - peer_latest_block_hash=?sumeragi.wsv.latest_block_hash(), - peer_latest_block_view_change_index=?sumeragi.wsv.latest_block_view_change_index(), - consensus_latest_block_hash=%block.hash(), - consensus_latest_block_view_change_index=%block.payload().header.view_change_index, - "Soft fork occurred: peer in inconsistent state. Rolling back and replacing top block." - ); - sumeragi.replace_top_block(block, new_wsv) - } - Err((_, BlockSyncError::BlockNotValid(error))) => { - error!(%addr, %role, %block_hash, ?error, "Block not valid.") - } - Err((_, BlockSyncError::SoftForkBlockNotValid(error))) => { - error!(%addr, %role, %block_hash, ?error, "Soft-fork block not valid.") - } - Err(( - _, - BlockSyncError::SoftForkBlockSmallViewChangeIndex { - peer_view_change_index, - block_view_change_index, - }, - )) => { - debug!( - %addr, %role, - peer_latest_block_hash=?sumeragi.wsv.latest_block_hash(), - peer_latest_block_view_change_index=?peer_view_change_index, - consensus_latest_block_hash=%block_hash, - consensus_latest_block_view_change_index=%block_view_change_index, - "Soft fork doesn't occurred: block has the same or smaller view change index" - ); - } - Err(( - _, - BlockSyncError::BlockNotProperHeight { - peer_height, - block_height, - }, - )) => { - warn!(%addr, %role, %block_hash, %block_height, %peer_height, "Other peer send irrelevant or outdated block to the peer (it's neither `peer_height` nor `peer_height + 1`).") + Some(VotingBlock::new(signed_block, new_wsv)) + } + + fn suggest_view_change( + &self, + view_change_proof_chain: &mut ProofChain, + current_view_change_index: u64, + ) { + let suspect_proof = + ProofBuilder::new(self.wsv.latest_block_hash(), current_view_change_index) + .sign(self.key_pair.clone()) + .expect("Proof signing failed"); + + view_change_proof_chain + .insert_proof( + &self.current_topology.ordered_peers, + self.current_topology.max_faults(), + self.wsv.latest_block_hash(), + suspect_proof, + ) + .unwrap_or_else(|err| error!("{err}")); + + let msg = MessagePacket::new(view_change_proof_chain.clone(), None); + self.broadcast_packet(msg); + } + + fn prune_view_change_proofs_and_calculate_current_index( + &self, + view_change_proof_chain: &mut ProofChain, + ) -> u64 { + view_change_proof_chain.prune(self.wsv.latest_block_hash()); + view_change_proof_chain.verify_with_state( + &self.current_topology.ordered_peers, + self.current_topology.max_faults(), + self.wsv.latest_block_hash(), + ) as u64 + } + + #[allow(clippy::too_many_lines)] + fn handle_message( + &mut self, + message: Message, + voting_block: &mut Option, + current_view_change_index: u64, + view_change_proof_chain: &mut ProofChain, + voting_signatures: &mut Vec>, + ) { + let current_topology = &self.current_topology; + let role = current_topology.role(&self.peer_id); + let addr = &self.peer_id.address; + + #[allow(clippy::suspicious_operation_groupings)] + match (message, role) { + (Message::BlockSyncUpdate(BlockSyncUpdate { block }), _) => { + let block_hash = block.hash(); + info!(%addr, %role, hash=%block_hash, "Block sync update received"); + + match handle_block_sync(self.chain_id, block, &self.wsv, &self.finalized_wsv) { + Ok(BlockSyncOk::CommitBlock(block, new_wsv)) => { + self.commit_block(block, new_wsv) + } + Ok(BlockSyncOk::ReplaceTopBlock(block, new_wsv)) => { + warn!( + %addr, %role, + peer_latest_block_hash=?self.wsv.latest_block_hash(), + peer_latest_block_view_change_index=?self.wsv.latest_block_view_change_index(), + consensus_latest_block_hash=%block.hash(), + consensus_latest_block_view_change_index=%block.payload().header.view_change_index, + "Soft fork occurred: peer in inconsistent state. Rolling back and replacing top block." + ); + self.replace_top_block(block, new_wsv) + } + Err((_, BlockSyncError::BlockNotValid(error))) => { + error!(%addr, %role, %block_hash, ?error, "Block not valid.") + } + Err((_, BlockSyncError::SoftForkBlockNotValid(error))) => { + error!(%addr, %role, %block_hash, ?error, "Soft-fork block not valid.") + } + Err(( + _, + BlockSyncError::SoftForkBlockSmallViewChangeIndex { + peer_view_change_index, + block_view_change_index, + }, + )) => { + debug!( + %addr, %role, + peer_latest_block_hash=?self.wsv.latest_block_hash(), + peer_latest_block_view_change_index=?peer_view_change_index, + consensus_latest_block_hash=%block_hash, + consensus_latest_block_view_change_index=%block_view_change_index, + "Soft fork doesn't occurred: block has the same or smaller view change index" + ); + } + Err(( + _, + BlockSyncError::BlockNotProperHeight { + peer_height, + block_height, + }, + )) => { + warn!(%addr, %role, %block_hash, %block_height, %peer_height, "Other peer send irrelevant or outdated block to the peer (it's neither `peer_height` nor `peer_height + 1`).") + } } } - } - ( - Message::BlockCommitted(BlockCommitted { hash, signatures }), - Role::Leader | Role::ValidatingPeer | Role::ProxyTail | Role::ObservingPeer, - ) => { - let is_consensus_required = current_topology.is_consensus_required().is_some(); - if role == Role::ProxyTail && is_consensus_required - || role == Role::Leader && !is_consensus_required - { - error!(%addr, %role, "Received BlockCommitted message, but shouldn't"); - } else if let Some(voted_block) = voting_block.take() { - let voting_block_hash = voted_block.block.payload().hash(); - - if hash == voting_block_hash { - match voted_block - .block - .commit_with_signatures(current_topology, signatures) - { - Ok(committed_block) => { - sumeragi.commit_block(committed_block, voted_block.new_wsv) - } - Err((_, error)) => { - error!(%addr, %role, %hash, ?error, "Block failed to be committed") - } + ( + Message::BlockCommitted(BlockCommitted { hash, signatures }), + Role::Leader | Role::ValidatingPeer | Role::ProxyTail | Role::ObservingPeer, + ) => { + let is_consensus_required = current_topology.is_consensus_required().is_some(); + if role == Role::ProxyTail && is_consensus_required + || role == Role::Leader && !is_consensus_required + { + error!(%addr, %role, "Received BlockCommitted message, but shouldn't"); + } else if let Some(voted_block) = voting_block.take() { + let voting_block_hash = voted_block.block.payload().hash(); + + if hash == voting_block_hash { + match voted_block + .block + .commit_with_signatures(current_topology, signatures) + { + Ok(committed_block) => { + self.commit_block(committed_block, voted_block.new_wsv) + } + Err((_, error)) => { + error!(%addr, %role, %hash, ?error, "Block failed to be committed") + } + }; + } else { + error!( + %addr, %role, committed_block_hash=%hash, %voting_block_hash, + "The hash of the committed block does not match the hash of the block stored by the peer." + ); + + *voting_block = Some(voted_block); }; } else { - error!( - %addr, %role, committed_block_hash=%hash, %voting_block_hash, - "The hash of the committed block does not match the hash of the block stored by the peer." - ); - - *voting_block = Some(voted_block); - }; - } else { - error!(%addr, %role, %hash, "Peer missing voting block") + error!(%addr, %role, %hash, "Peer missing voting block") + } } - } - (Message::BlockCreated(block_created), Role::ValidatingPeer) => { - let current_topology = current_topology + (Message::BlockCreated(block_created), Role::ValidatingPeer) => { + let current_topology = current_topology .is_consensus_required() .expect("Peer has `ValidatingPeer` role, which mean that current topology require consensus"); - if let Some(v_block) = vote_for_block(sumeragi, ¤t_topology, block_created) { - let block_hash = v_block.block.payload().hash(); - - let msg = MessagePacket::new( - view_change_proof_chain.clone(), - Some(BlockSigned::from(v_block.block.clone()).into()), - ); - - sumeragi.broadcast_packet_to(msg, [current_topology.proxy_tail()]); - info!(%addr, %block_hash, "Block validated, signed and forwarded"); - - *voting_block = Some(v_block); - } - } - (Message::BlockCreated(block_created), Role::ObservingPeer) => { - let current_topology = current_topology.is_consensus_required().expect( - "Peer has `ObservingPeer` role, which mean that current topology require consensus", - ); - - if let Some(v_block) = vote_for_block(sumeragi, ¤t_topology, block_created) { - if current_view_change_index >= 1 { + if let Some(v_block) = self.vote_for_block(¤t_topology, block_created) { let block_hash = v_block.block.payload().hash(); let msg = MessagePacket::new( @@ -537,176 +549,198 @@ fn handle_message( Some(BlockSigned::from(v_block.block.clone()).into()), ); - sumeragi.broadcast_packet_to(msg, [current_topology.proxy_tail()]); + self.broadcast_packet_to(msg, [current_topology.proxy_tail()]); info!(%addr, %block_hash, "Block validated, signed and forwarded"); + *voting_block = Some(v_block); - } else { - error!(%addr, %role, "Received BlockCreated message, but shouldn't"); } } - } - (Message::BlockCreated(block_created), Role::ProxyTail) => { - if let Some(mut new_block) = vote_for_block(sumeragi, current_topology, block_created) { - // NOTE: Up until this point it was unknown which block is expected to be received, - // therefore all the signatures (of any hash) were collected and will now be pruned - add_signatures::(&mut new_block, voting_signatures.drain(..)); - *voting_block = Some(new_block); + (Message::BlockCreated(block_created), Role::ObservingPeer) => { + let current_topology = current_topology.is_consensus_required().expect( + "Peer has `ObservingPeer` role, which mean that current topology require consensus", + ); + + if let Some(v_block) = self.vote_for_block(¤t_topology, block_created) { + if current_view_change_index >= 1 { + let block_hash = v_block.block.payload().hash(); + + let msg = MessagePacket::new( + view_change_proof_chain.clone(), + Some(BlockSigned::from(v_block.block.clone()).into()), + ); + + self.broadcast_packet_to(msg, [current_topology.proxy_tail()]); + info!(%addr, %block_hash, "Block validated, signed and forwarded"); + *voting_block = Some(v_block); + } else { + error!(%addr, %role, "Received BlockCreated message, but shouldn't"); + } + } } - } - (Message::BlockSigned(BlockSigned { hash, signatures }), Role::ProxyTail) => { - trace!(block_hash=%hash, "Received block signatures"); + (Message::BlockCreated(block_created), Role::ProxyTail) => { + if let Some(mut new_block) = self.vote_for_block(current_topology, block_created) { + // NOTE: Up until this point it was unknown which block is expected to be received, + // therefore all the signatures (of any hash) were collected and will now be pruned + add_signatures::(&mut new_block, voting_signatures.drain(..)); + *voting_block = Some(new_block); + } + } + (Message::BlockSigned(BlockSigned { hash, signatures }), Role::ProxyTail) => { + trace!(block_hash=%hash, "Received block signatures"); - let roles: &[Role] = if current_view_change_index >= 1 { - &[Role::ValidatingPeer, Role::ObservingPeer] - } else { - &[Role::ValidatingPeer] - }; - let valid_signatures = current_topology.filter_signatures_by_roles(roles, &signatures); + let roles: &[Role] = if current_view_change_index >= 1 { + &[Role::ValidatingPeer, Role::ObservingPeer] + } else { + &[Role::ValidatingPeer] + }; + let valid_signatures = + current_topology.filter_signatures_by_roles(roles, &signatures); - if let Some(voted_block) = voting_block.as_mut() { - let voting_block_hash = voted_block.block.payload().hash(); + if let Some(voted_block) = voting_block.as_mut() { + let voting_block_hash = voted_block.block.payload().hash(); - if hash == voting_block_hash { - add_signatures::(voted_block, valid_signatures); + if hash == voting_block_hash { + add_signatures::(voted_block, valid_signatures); + } else { + debug!(%voting_block_hash, "Received signatures are not for the current block"); + } } else { - debug!(%voting_block_hash, "Received signatures are not for the current block"); + // NOTE: Due to the nature of distributed systems, signatures can sometimes be received before + // the block (sent by the leader). Collect the signatures and wait for the block to be received + voting_signatures.extend(valid_signatures); } - } else { - // NOTE: Due to the nature of distributed systems, signatures can sometimes be received before - // the block (sent by the leader). Collect the signatures and wait for the block to be received - voting_signatures.extend(valid_signatures); } - } - (msg, role) => { - trace!(%addr, %role, ?msg, "message not handled") + (msg, role) => { + trace!(%addr, %role, ?msg, "message not handled") + } } } -} -#[allow(clippy::too_many_lines)] -fn process_message_independent( - sumeragi: &mut Sumeragi, - voting_block: &mut Option, - current_view_change_index: u64, - view_change_proof_chain: &mut ProofChain, - round_start_time: &Instant, - #[cfg_attr(not(debug_assertions), allow(unused_variables))] is_genesis_peer: bool, -) { - let current_topology = &sumeragi.current_topology; - let role = current_topology.role(&sumeragi.peer_id); - let addr = &sumeragi.peer_id.address; - - match role { - Role::Leader => { - if voting_block.is_none() { - let cache_full = sumeragi.transaction_cache.len() >= sumeragi.max_txs_in_block; - let deadline_reached = round_start_time.elapsed() > sumeragi.block_time; - let cache_non_empty = !sumeragi.transaction_cache.is_empty(); - - if cache_full || (deadline_reached && cache_non_empty) { - let transactions = sumeragi.transaction_cache.clone(); - info!(%addr, txns=%transactions.len(), "Creating block..."); - - // TODO: properly process triggers! - let mut new_wsv = sumeragi.wsv.clone(); - let event_recommendations = Vec::new(); - let new_block = match BlockBuilder::new( - transactions, - sumeragi.current_topology.clone(), - event_recommendations, - ) - .chain(current_view_change_index, &mut new_wsv) - .sign(sumeragi.key_pair.clone()) - { - Ok(block) => block, - Err(error) => { - error!(?error, "Failed to sign block"); - return; - } - }; + #[allow(clippy::too_many_lines)] + fn process_message_independent( + &mut self, + voting_block: &mut Option, + current_view_change_index: u64, + view_change_proof_chain: &mut ProofChain, + round_start_time: &Instant, + #[cfg_attr(not(debug_assertions), allow(unused_variables))] is_genesis_peer: bool, + ) { + let current_topology = &self.current_topology; + let role = current_topology.role(&self.peer_id); + let addr = &self.peer_id.address; - if let Some(current_topology) = current_topology.is_consensus_required() { - info!(%addr, block_payload_hash=%new_block.payload().hash(), "Block created"); - *voting_block = Some(VotingBlock::new(new_block.clone(), new_wsv)); + match role { + Role::Leader => { + if voting_block.is_none() { + let cache_full = self.transaction_cache.len() >= self.max_txs_in_block; + let deadline_reached = round_start_time.elapsed() > self.block_time; + let cache_non_empty = !self.transaction_cache.is_empty(); - let msg = MessagePacket::new( - view_change_proof_chain.clone(), - Some(BlockCreated::from(new_block).into()), - ); - if current_view_change_index >= 1 { - sumeragi.broadcast_packet(msg); - } else { - sumeragi.broadcast_packet_to(msg, current_topology.voting_peers()); - } - } else { - match new_block.commit(current_topology) { - Ok(committed_block) => { - let msg = MessagePacket::new( - view_change_proof_chain.clone(), - Some(BlockCommitted::from(committed_block.clone()).into()), - ); + if cache_full || (deadline_reached && cache_non_empty) { + let transactions = self.transaction_cache.clone(); + info!(%addr, txns=%transactions.len(), "Creating block..."); - sumeragi.broadcast_packet(msg); - sumeragi.commit_block(committed_block, new_wsv); + // TODO: properly process triggers! + let mut new_wsv = self.wsv.clone(); + let event_recommendations = Vec::new(); + let new_block = match BlockBuilder::new( + transactions, + self.current_topology.clone(), + event_recommendations, + ) + .chain(current_view_change_index, &mut new_wsv) + .sign(self.key_pair.clone()) + { + Ok(block) => block, + Err(error) => { + error!(?error, "Failed to sign block"); + return; + } + }; + + if let Some(current_topology) = current_topology.is_consensus_required() { + info!(%addr, block_payload_hash=%new_block.payload().hash(), "Block created"); + *voting_block = Some(VotingBlock::new(new_block.clone(), new_wsv)); + + let msg = MessagePacket::new( + view_change_proof_chain.clone(), + Some(BlockCreated::from(new_block).into()), + ); + if current_view_change_index >= 1 { + self.broadcast_packet(msg); + } else { + self.broadcast_packet_to(msg, current_topology.voting_peers()); + } + } else { + match new_block.commit(current_topology) { + Ok(committed_block) => { + let msg = MessagePacket::new( + view_change_proof_chain.clone(), + Some(BlockCommitted::from(committed_block.clone()).into()), + ); + + self.broadcast_packet(msg); + self.commit_block(committed_block, new_wsv); + } + Err((_, error)) => error!(%addr, role=%Role::Leader, ?error), } - Err((_, error)) => error!(%addr, role=%Role::Leader, ?error), } } } } - } - Role::ProxyTail => { - if let Some(voted_block) = voting_block.take() { - let voted_at = voted_block.voted_at; - let new_wsv = voted_block.new_wsv; + Role::ProxyTail => { + if let Some(voted_block) = voting_block.take() { + let voted_at = voted_block.voted_at; + let new_wsv = voted_block.new_wsv; - match voted_block.block.commit(current_topology) { - Ok(committed_block) => { - info!(voting_block_hash = %committed_block.hash(), "Block reached required number of votes"); + match voted_block.block.commit(current_topology) { + Ok(committed_block) => { + info!(voting_block_hash = %committed_block.hash(), "Block reached required number of votes"); - let msg = MessagePacket::new( - view_change_proof_chain.clone(), - Some(BlockCommitted::from(committed_block.clone()).into()), - ); + let msg = MessagePacket::new( + view_change_proof_chain.clone(), + Some(BlockCommitted::from(committed_block.clone()).into()), + ); - let current_topology = current_topology + let current_topology = current_topology .is_consensus_required() .expect("Peer has `ProxyTail` role, which mean that current topology require consensus"); - #[cfg(debug_assertions)] - if is_genesis_peer && sumeragi.debug_force_soft_fork { - std::thread::sleep(sumeragi.pipeline_time() * 2); - } else if current_view_change_index >= 1 { - sumeragi.broadcast_packet(msg); - } else { - sumeragi.broadcast_packet_to(msg, current_topology.voting_peers()); - } - - #[cfg(not(debug_assertions))] - { - if current_view_change_index >= 1 { - sumeragi.broadcast_packet(msg); + #[cfg(debug_assertions)] + if is_genesis_peer && self.debug_force_soft_fork { + std::thread::sleep(self.pipeline_time() * 2); + } else if current_view_change_index >= 1 { + self.broadcast_packet(msg); } else { - sumeragi.broadcast_packet_to( - msg, - current_topology - .ordered_peers - .iter() - .take(current_topology.min_votes_for_commit()), - ); + self.broadcast_packet_to(msg, current_topology.voting_peers()); } + + #[cfg(not(debug_assertions))] + { + if current_view_change_index >= 1 { + self.broadcast_packet(msg); + } else { + self.broadcast_packet_to( + msg, + current_topology + .ordered_peers + .iter() + .take(current_topology.min_votes_for_commit()), + ); + } + } + self.commit_block(committed_block, new_wsv); + } + Err((block, error)) => { + // Restore the current voting block and continue the round + *voting_block = Some(VotingBlock::voted_at(block, new_wsv, voted_at)); + trace!(?error, "Not enough signatures, waiting for more..."); } - sumeragi.commit_block(committed_block, new_wsv); - } - Err((block, error)) => { - // Restore the current voting block and continue the round - *voting_block = Some(VotingBlock::voted_at(block, new_wsv, voted_at)); - trace!(?error, "Not enough signatures, waiting for more..."); } } } + _ => {} } - _ => {} } } @@ -857,10 +891,8 @@ pub(crate) fn run( ); sumeragi.send_events(expired_transactions.iter().map(expired_event)); - let current_view_change_index = prune_view_change_proofs_and_calculate_current_index( - &sumeragi, - &mut view_change_proof_chain, - ); + let current_view_change_index = sumeragi + .prune_view_change_proofs_and_calculate_current_index(&mut view_change_proof_chain); reset_state( &sumeragi.peer_id, @@ -893,11 +925,7 @@ pub(crate) fn run( warn!(peer_public_key=%sumeragi.peer_id.public_key, %role, "No block produced in due time, requesting view change..."); } - suggest_view_change( - &sumeragi, - &mut view_change_proof_chain, - current_view_change_index, - ); + sumeragi.suggest_view_change(&mut view_change_proof_chain, current_view_change_index); // NOTE: View change must be periodically suggested until it is accepted. // Must be initialized to pipeline time but can increase by chosen amount @@ -914,9 +942,8 @@ pub(crate) fn run( should_sleep = true; }, |message| { - handle_message( + sumeragi.handle_message( message, - &mut sumeragi, &mut voting_block, current_view_change_index, &mut view_change_proof_chain, @@ -926,10 +953,8 @@ pub(crate) fn run( ); // State could be changed after handling message so it is necessary to reset state before handling message independent step - let current_view_change_index = prune_view_change_proofs_and_calculate_current_index( - &sumeragi, - &mut view_change_proof_chain, - ); + let current_view_change_index = sumeragi + .prune_view_change_proofs_and_calculate_current_index(&mut view_change_proof_chain); reset_state( &sumeragi.peer_id, @@ -949,8 +974,7 @@ pub(crate) fn run( &mut view_change_time, ); - process_message_independent( - &mut sumeragi, + sumeragi.process_message_independent( &mut voting_block, current_view_change_index, &mut view_change_proof_chain, @@ -989,32 +1013,6 @@ fn expired_event(txn: &AcceptedTransaction) -> Event { .into() } -fn vote_for_block( - sumeragi: &Sumeragi, - topology: &Topology, - BlockCreated { block }: BlockCreated, -) -> Option { - let block_hash = block.payload().hash(); - let addr = &sumeragi.peer_id.address; - let role = sumeragi.current_topology.role(&sumeragi.peer_id); - trace!(%addr, %role, block_hash=%block_hash, "Block received, voting..."); - - let mut new_wsv = sumeragi.wsv.clone(); - let block = match ValidBlock::validate(block, topology, &mut new_wsv) { - Ok(block) => block, - Err((_, error)) => { - warn!(%addr, %role, ?error, "Block validation failed"); - return None; - } - }; - - let signed_block = block - .sign(sumeragi.key_pair.clone()) - .expect("Block signing failed"); - - Some(VotingBlock::new(signed_block, new_wsv)) -} - /// Type enumerating early return types to reduce cyclomatic /// complexity of the main loop items and allow direct short /// circuiting with the `?` operator. Candidate for `impl @@ -1111,6 +1109,7 @@ enum BlockSyncError { } fn handle_block_sync( + chain_id: u16, block: SignedBlock, wsv: &WorldStateView, finalized_wsv: &WorldStateView, @@ -1128,7 +1127,7 @@ fn handle_block_sync( let view_change_index = block.payload().header().view_change_index; Topology::recreate_topology(&last_committed_block, view_change_index, new_peers) }; - ValidBlock::validate(block, &topology, &mut new_wsv) + ValidBlock::validate(block, &topology, chain_id, &mut new_wsv) .and_then(|block| { block .commit(&topology) @@ -1148,7 +1147,7 @@ fn handle_block_sync( let view_change_index = block.payload().header().view_change_index; Topology::recreate_topology(&last_committed_block, view_change_index, new_peers) }; - ValidBlock::validate(block, &topology, &mut new_wsv) + ValidBlock::validate(block, &topology, chain_id, &mut new_wsv) .and_then(|block| { block .commit(&topology) @@ -1191,6 +1190,7 @@ mod tests { use crate::{query::store::LiveQueryStore, smartcontracts::Registrable}; fn create_data_for_test( + chain_id: u16, topology: &Topology, leader_key_pair: KeyPair, ) -> (WorldStateView, Arc, SignedBlock) { @@ -1209,15 +1209,19 @@ mod tests { // Create "genesis" block // Creating an instruction - let fail_box: InstructionBox = Fail::new("Dummy isi".to_owned()).into(); + let fail_box = Fail::new("Dummy isi".to_owned()); // Making two transactions that have the same instruction - let tx = TransactionBuilder::new(alice_id.clone()) + let tx = TransactionBuilder::new(chain_id, alice_id.clone()) .with_instructions([fail_box]) .sign(alice_keys.clone()) .expect("Valid"); - let tx = AcceptedTransaction::accept(tx, &wsv.transaction_executor().transaction_limits) - .expect("Valid"); + let tx = AcceptedTransaction::accept( + tx, + chain_id, + &wsv.transaction_executor().transaction_limits, + ) + .expect("Valid"); // Creating a block of two identical transactions and validating it let block = BlockBuilder::new(vec![tx.clone(), tx], topology.clone(), Vec::new()) @@ -1237,20 +1241,28 @@ mod tests { "xor2#wonderland".parse().expect("Valid"), )); - let tx1 = TransactionBuilder::new(alice_id.clone()) + let tx1 = TransactionBuilder::new(chain_id, alice_id.clone()) .with_instructions([create_asset_definition1]) .sign(alice_keys.clone()) .expect("Valid"); - let tx1 = AcceptedTransaction::accept(tx1, &wsv.transaction_executor().transaction_limits) - .map(Into::into) - .expect("Valid"); - let tx2 = TransactionBuilder::new(alice_id) + let tx1 = AcceptedTransaction::accept( + tx1, + chain_id, + &wsv.transaction_executor().transaction_limits, + ) + .map(Into::into) + .expect("Valid"); + let tx2 = TransactionBuilder::new(chain_id, alice_id) .with_instructions([create_asset_definition2]) .sign(alice_keys) .expect("Valid"); - let tx2 = AcceptedTransaction::accept(tx2, &wsv.transaction_executor().transaction_limits) - .map(Into::into) - .expect("Valid"); + let tx2 = AcceptedTransaction::accept( + tx2, + chain_id, + &wsv.transaction_executor().transaction_limits, + ) + .map(Into::into) + .expect("Valid"); // Creating a block of two identical transactions and validating it let block = BlockBuilder::new(vec![tx1, tx2], topology.clone(), Vec::new()) @@ -1264,32 +1276,39 @@ mod tests { #[test] #[allow(clippy::redundant_clone)] async fn block_sync_invalid_block() { + let chain_id = 0; + let leader_key_pair = KeyPair::generate().unwrap(); let topology = Topology::new(unique_vec![PeerId::new( &"127.0.0.1:8080".parse().unwrap(), leader_key_pair.public_key(), )]); - let (finalized_wsv, _, mut block) = create_data_for_test(&topology, leader_key_pair); + let (finalized_wsv, _, mut block) = + create_data_for_test(chain_id, &topology, leader_key_pair); let wsv = finalized_wsv.clone(); // Malform block to make it invalid block.payload_mut().commit_topology.clear(); - let result = handle_block_sync(block, &wsv, &finalized_wsv); + let result = handle_block_sync(chain_id, block, &wsv, &finalized_wsv); assert!(matches!(result, Err((_, BlockSyncError::BlockNotValid(_))))) } #[test] async fn block_sync_invalid_soft_fork_block() { + let chain_id = 0; + let leader_key_pair = KeyPair::generate().unwrap(); let topology = Topology::new(unique_vec![PeerId::new( &"127.0.0.1:8080".parse().unwrap(), leader_key_pair.public_key(), )]); - let (finalized_wsv, kura, mut block) = create_data_for_test(&topology, leader_key_pair); + let (finalized_wsv, kura, mut block) = + create_data_for_test(chain_id, &topology, leader_key_pair); let mut wsv = finalized_wsv.clone(); - let validated_block = ValidBlock::validate(block.clone(), &topology, &mut wsv).unwrap(); + let validated_block = + ValidBlock::validate(block.clone(), &topology, chain_id, &mut wsv).unwrap(); let committed_block = validated_block.commit(&topology).expect("Block is valid"); wsv.apply_without_execution(&committed_block) .expect("Failed to apply block"); @@ -1298,7 +1317,7 @@ mod tests { // Malform block to make it invalid block.payload_mut().commit_topology.clear(); - let result = handle_block_sync(block, &wsv, &finalized_wsv); + let result = handle_block_sync(chain_id, block, &wsv, &finalized_wsv); assert!(matches!( result, Err((_, BlockSyncError::SoftForkBlockNotValid(_))) @@ -1308,15 +1327,18 @@ mod tests { #[test] #[allow(clippy::redundant_clone)] async fn block_sync_not_proper_height() { + let chain_id = 0; + let topology = Topology::new(UniqueVec::new()); let leader_key_pair = KeyPair::generate().unwrap(); - let (finalized_wsv, _, mut block) = create_data_for_test(&topology, leader_key_pair); + let (finalized_wsv, _, mut block) = + create_data_for_test(chain_id, &topology, leader_key_pair); let wsv = finalized_wsv.clone(); // Change block height block.payload_mut().header.height = 42; - let result = handle_block_sync(block, &wsv, &finalized_wsv); + let result = handle_block_sync(chain_id, block, &wsv, &finalized_wsv); assert!(matches!( result, Err(( @@ -1332,28 +1354,34 @@ mod tests { #[test] #[allow(clippy::redundant_clone)] async fn block_sync_commit_block() { + let chain_id = 0; + let leader_key_pair = KeyPair::generate().unwrap(); let topology = Topology::new(unique_vec![PeerId::new( &"127.0.0.1:8080".parse().unwrap(), leader_key_pair.public_key(), )]); - let (finalized_wsv, _, block) = create_data_for_test(&topology, leader_key_pair); + let (finalized_wsv, _, block) = create_data_for_test(chain_id, &topology, leader_key_pair); let wsv = finalized_wsv.clone(); - let result = handle_block_sync(block, &wsv, &finalized_wsv); + let result = handle_block_sync(chain_id, block, &wsv, &finalized_wsv); assert!(matches!(result, Ok(BlockSyncOk::CommitBlock(_, _)))) } #[test] async fn block_sync_replace_top_block() { + let chain_id = 0; + let leader_key_pair = KeyPair::generate().unwrap(); let topology = Topology::new(unique_vec![PeerId::new( &"127.0.0.1:8080".parse().unwrap(), leader_key_pair.public_key(), )]); - let (finalized_wsv, kura, mut block) = create_data_for_test(&topology, leader_key_pair); + let (finalized_wsv, kura, mut block) = + create_data_for_test(chain_id, &topology, leader_key_pair); let mut wsv = finalized_wsv.clone(); - let validated_block = ValidBlock::validate(block.clone(), &topology, &mut wsv).unwrap(); + let validated_block = + ValidBlock::validate(block.clone(), &topology, chain_id, &mut wsv).unwrap(); let committed_block = validated_block.commit(&topology).expect("Block is valid"); wsv.apply_without_execution(&committed_block) .expect("Failed to apply block"); @@ -1363,24 +1391,28 @@ mod tests { // Increase block view change index block.payload_mut().header.view_change_index = 42; - let result = handle_block_sync(block, &wsv, &finalized_wsv); + let result = handle_block_sync(chain_id, block, &wsv, &finalized_wsv); assert!(matches!(result, Ok(BlockSyncOk::ReplaceTopBlock(_, _)))) } #[test] async fn block_sync_small_view_change_index() { + let chain_id = 0; + let leader_key_pair = KeyPair::generate().unwrap(); let topology = Topology::new(unique_vec![PeerId::new( &"127.0.0.1:8080".parse().unwrap(), leader_key_pair.public_key(), )]); - let (finalized_wsv, kura, mut block) = create_data_for_test(&topology, leader_key_pair); + let (finalized_wsv, kura, mut block) = + create_data_for_test(chain_id, &topology, leader_key_pair); let mut wsv = finalized_wsv.clone(); // Increase block view change index block.payload_mut().header.view_change_index = 42; - let validated_block = ValidBlock::validate(block.clone(), &topology, &mut wsv).unwrap(); + let validated_block = + ValidBlock::validate(block.clone(), &topology, chain_id, &mut wsv).unwrap(); let committed_block = validated_block.commit(&topology).expect("Block is valid"); wsv.apply_without_execution(&committed_block) .expect("Failed to apply block"); @@ -1390,7 +1422,7 @@ mod tests { // Decrease block view change index back block.payload_mut().header.view_change_index = 0; - let result = handle_block_sync(block, &wsv, &finalized_wsv); + let result = handle_block_sync(chain_id, block, &wsv, &finalized_wsv); assert!(matches!( result, Err(( @@ -1406,9 +1438,12 @@ mod tests { #[test] #[allow(clippy::redundant_clone)] async fn block_sync_genesis_block_do_not_replace() { + let chain_id = 0; + let topology = Topology::new(UniqueVec::new()); let leader_key_pair = KeyPair::generate().unwrap(); - let (finalized_wsv, _, mut block) = create_data_for_test(&topology, leader_key_pair); + let (finalized_wsv, _, mut block) = + create_data_for_test(chain_id, &topology, leader_key_pair); let wsv = finalized_wsv.clone(); // Change block height and view change index @@ -1416,7 +1451,7 @@ mod tests { block.payload_mut().header.view_change_index = 42; block.payload_mut().header.height = 1; - let result = handle_block_sync(block, &wsv, &finalized_wsv); + let result = handle_block_sync(chain_id, block, &wsv, &finalized_wsv); assert!(matches!( result, Err(( diff --git a/core/src/sumeragi/mod.rs b/core/src/sumeragi/mod.rs index 2bafc67e9f2..c8711551aef 100644 --- a/core/src/sumeragi/mod.rs +++ b/core/src/sumeragi/mod.rs @@ -227,6 +227,7 @@ impl SumeragiHandle { } fn replay_block( + chain_id: u16, block: &SignedBlock, wsv: &mut WorldStateView, mut current_topology: Topology, @@ -234,7 +235,7 @@ impl SumeragiHandle { // NOTE: topology need to be updated up to block's view_change_index current_topology.rotate_all_n(block.payload().header.view_change_index); - let block = ValidBlock::validate(block.clone(), ¤t_topology, wsv) + let block = ValidBlock::validate(block.clone(), ¤t_topology, chain_id, wsv) .expect("Kura blocks should be valid") .commit(¤t_topology) .expect("Kura blocks should be valid"); @@ -258,6 +259,7 @@ impl SumeragiHandle { #[allow(clippy::too_many_lines)] pub fn start( SumeragiStartArgs { + chain_id, configuration, events_sender, mut wsv, @@ -296,14 +298,14 @@ impl SumeragiHandle { let block_iter_except_last = (&mut blocks_iter).take(block_count.saturating_sub(skip_block_count + 1)); for block in block_iter_except_last { - current_topology = Self::replay_block(&block, &mut wsv, current_topology); + current_topology = Self::replay_block(chain_id, &block, &mut wsv, current_topology); } // finalized_wsv is one block behind let finalized_wsv = wsv.clone(); if let Some(block) = blocks_iter.next() { - current_topology = Self::replay_block(&block, &mut wsv, current_topology); + current_topology = Self::replay_block(chain_id, &block, &mut wsv, current_topology); } info!("Sumeragi has finished loading blocks and setting up the WSV"); @@ -318,6 +320,7 @@ impl SumeragiHandle { let debug_force_soft_fork = false; let sumeragi = main_loop::Sumeragi { + chain_id, key_pair: configuration.key_pair.clone(), queue: Arc::clone(&queue), peer_id: configuration.peer_id.clone(), @@ -418,6 +421,7 @@ impl VotingBlock { /// Arguments for [`SumeragiHandle::start`] function #[allow(missing_docs)] pub struct SumeragiStartArgs<'args> { + pub chain_id: u16, pub configuration: &'args Configuration, pub events_sender: EventsSender, pub wsv: WorldStateView, diff --git a/core/src/tx.rs b/core/src/tx.rs index 01ee688edcf..b3ecf102319 100644 --- a/core/src/tx.rs +++ b/core/src/tx.rs @@ -11,6 +11,7 @@ use eyre::Result; use iroha_crypto::{HashOf, SignatureVerificationFail, SignaturesOf}; pub use iroha_data_model::prelude::*; use iroha_data_model::{ + isi::error::Mismatch, query::error::FindError, transaction::{error::TransactionLimitError, TransactionLimits}, }; @@ -34,12 +35,26 @@ pub enum AcceptTransactionFail { SignatureVerification(#[source] SignatureVerificationFail), /// The genesis account can only sign transactions in the genesis block UnexpectedGenesisAccountSignature, + /// Transaction's `chain_id` doesn't correspond to the id of current blockchain + ChainIdMismatch(Mismatch), } impl AcceptedTransaction { /// Accept genesis transaction. Transition from [`GenesisTransaction`] to [`AcceptedTransaction`]. - pub fn accept_genesis(tx: GenesisTransaction) -> Self { - Self(tx.0) + pub fn accept_genesis( + tx: GenesisTransaction, + expected_chain_id: u16, + ) -> Result { + let actual_chain_id = tx.0.payload().chain_id; + + if expected_chain_id != actual_chain_id { + return Err(AcceptTransactionFail::ChainIdMismatch(Mismatch { + expected: expected_chain_id, + actual: actual_chain_id, + })); + } + + Ok(Self(tx.0)) } /// Accept transaction. Transition from [`SignedTransaction`] to [`AcceptedTransaction`]. @@ -48,14 +63,24 @@ impl AcceptedTransaction { /// /// - if it does not adhere to limits pub fn accept( - transaction: SignedTransaction, + tx: SignedTransaction, + expected_chain_id: u16, limits: &TransactionLimits, ) -> Result { - if *iroha_genesis::GENESIS_ACCOUNT_ID == transaction.payload().authority { + let actual_chain_id = tx.payload().chain_id; + + if expected_chain_id != actual_chain_id { + return Err(AcceptTransactionFail::ChainIdMismatch(Mismatch { + expected: expected_chain_id, + actual: actual_chain_id, + })); + } + + if *iroha_genesis::GENESIS_ACCOUNT_ID == tx.payload().authority { return Err(AcceptTransactionFail::UnexpectedGenesisAccountSignature); } - match &transaction.payload().instructions { + match &tx.payload().instructions { Executable::Instructions(instructions) => { let instruction_count = instructions.len(); if u64::try_from(instruction_count).expect("`usize` should always fit into `u64`") @@ -93,7 +118,7 @@ impl AcceptedTransaction { } } - Ok(Self(transaction)) + Ok(Self(tx)) } /// Transaction hash diff --git a/core/test_network/Cargo.toml b/core/test_network/Cargo.toml index 22cbae6888a..71e24f95f18 100644 --- a/core/test_network/Cargo.toml +++ b/core/test_network/Cargo.toml @@ -25,5 +25,5 @@ rand = { workspace = true } tempfile = { workspace = true } tokio = { workspace = true, features = ["rt", "rt-multi-thread", "macros"] } unique_port = "0.2.1" -parity-scale-codec = { version = "3.6.5", default-features = false } +parity-scale-codec = { version = "3.6.9", default-features = false } serde_json = { workspace = true } diff --git a/core/test_network/src/lib.rs b/core/test_network/src/lib.rs index 9b217a69d59..a3abee7f51e 100644 --- a/core/test_network/src/lib.rs +++ b/core/test_network/src/lib.rs @@ -49,21 +49,22 @@ pub struct Network { pub peers: BTreeMap, } +/// Get a standardized blockchain id +pub fn get_chain_id() -> u16 { + 0 +} + /// Get a standardised key-pair from the hard-coded literals. -/// -/// # Panics -/// Programmer error. Given keys must be in proper format. pub fn get_key_pair() -> KeyPair { KeyPair::new( PublicKey::from_str( "ed01207233BFC89DCBD68C19FDE6CE6158225298EC1131B6A130D1AEB454C1AB5183C0", - ) - .expect("Public key not in mulithash format"), + ).unwrap(), PrivateKey::from_hex( Algorithm::Ed25519, "9AC47ABF59B356E0BD7DCBBBB4DEC080E302156A48CA907E47CB6AEA1D32719E7233BFC89DCBD68C19FDE6CE6158225298EC1131B6A130D1AEB454C1AB5183C0" - ).expect("Private key not hex encoded") - ).expect("Key pair mismatch") + ).unwrap() + ).unwrap() } /// Trait used to differentiate a test instance of `genesis`. @@ -129,12 +130,13 @@ impl TestGenesis for GenesisNetwork { first_transaction.append_instruction(isi); } + let chain_id = 0; let key_pair = KeyPair::new( cfg.genesis.public_key.clone(), cfg.genesis.private_key.expect("Should be"), ) .expect("Genesis key pair should be valid"); - GenesisNetwork::new(genesis, &key_pair).expect("Failed to init genesis") + GenesisNetwork::new(genesis, chain_id, &key_pair).expect("Failed to init genesis") } } @@ -768,8 +770,11 @@ impl TestRuntime for Runtime { impl TestConfiguration for Configuration { fn test() -> Self { - let mut sample_proxy = - iroha::samples::get_config_proxy(UniqueVec::new(), Some(get_key_pair())); + let mut sample_proxy = iroha::samples::get_config_proxy( + UniqueVec::new(), + Some(get_chain_id()), + Some(get_key_pair()), + ); let env_proxy = ConfigurationProxy::from_std_env().expect("Test env variables should parse properly"); let (public_key, private_key) = KeyPair::generate().unwrap().into(); @@ -791,7 +796,8 @@ impl TestConfiguration for Configuration { impl TestClientConfiguration for ClientConfiguration { fn test(api_url: &SocketAddr) -> Self { - let mut configuration = iroha_client::samples::get_client_config(&get_key_pair()); + let mut configuration = + iroha_client::samples::get_client_config(get_chain_id(), &get_key_pair()); configuration.torii_api_url = format!("http://{api_url}") .parse() .expect("Should be valid url"); diff --git a/crypto/Cargo.toml b/crypto/Cargo.toml index d3f9684b8b2..4fac7fa3b55 100644 --- a/crypto/Cargo.toml +++ b/crypto/Cargo.toml @@ -55,18 +55,18 @@ serde_with = { workspace = true, features = ["macros"] } hex = { workspace = true, features = ["alloc", "serde"] } getset = { workspace = true } -thiserror = { version = "1.0.50", optional = true } +thiserror = { version = "1.0.56", optional = true } displaydoc = { version = "0.2.4", default-features = false } digest = { version = "0.10.7", optional = true } blake2 = { version = "0.10.6", optional = true } sha2 = { version = "0.10.8", optional = true } -hkdf = { version = "0.12.3", optional = true } +hkdf = { version = "0.12.4", optional = true } amcl = { version = "0.2.0", optional = true, default-features = false, features = ["secp256k1"] } amcl_wrapper = { version = "0.4.0", optional = true } -signature = { version = "2.1.0", optional = true } -ed25519-dalek = { version = "2.0.0", optional = true, features = ["rand_core"] } +signature = { version = "2.2.0", optional = true } +ed25519-dalek = { version = "2.1.0", optional = true, features = ["rand_core"] } curve25519-dalek = { version = "4.1.1", optional = true } x25519-dalek = { version = "2.0.0", optional = true, features = ["static_secrets"] } @@ -74,14 +74,14 @@ rand = { workspace = true, optional = true } rand_chacha = { version = "0.3.1", optional = true } -zeroize = { version = "1.6.0", optional = true } +zeroize = { version = "1.7.0", optional = true } arrayref = { version = "0.3.7", optional = true } aead = { version = "0.5.2", optional = true } chacha20poly1305 = { version = "0.10.1", optional = true } -elliptic-curve = { version = "0.13.6", optional = true } -k256 = { version = "0.13.1", optional = true, features = ["ecdsa", "sha256"]} +elliptic-curve = { version = "0.13.8", optional = true } +k256 = { version = "0.13.3", optional = true, features = ["ecdsa", "sha256"]} [dev-dependencies] hex-literal = { workspace = true } @@ -89,6 +89,6 @@ serde_json = { workspace = true } # these crypto libraries are not used to implement actual crypto algorithms # but to test some of the primitives against them -secp256k1 = { version = "0.28.0", features = ["rand", "serde"] } -libsodium-sys-stable = "1.20.3" -openssl = { version = "0.10.59", features = ["vendored"] } +secp256k1 = { version = "0.28.1", features = ["rand", "serde"] } +libsodium-sys-stable = "1.20.4" +openssl = { version = "0.10.62", features = ["vendored"] } diff --git a/data_model/src/transaction.rs b/data_model/src/transaction.rs index 9d288239636..b22a16b3270 100644 --- a/data_model/src/transaction.rs +++ b/data_model/src/transaction.rs @@ -101,6 +101,8 @@ pub mod model { #[getset(get = "pub")] #[ffi_type] pub struct TransactionPayload { + /// Unique id of the blockchain. Used for simple replay attack protection. + pub chain_id: u16, /// Creation timestamp (unix time in milliseconds). #[getset(skip)] pub creation_time_ms: u64, @@ -676,7 +678,7 @@ mod http { /// Construct [`Self`]. #[inline] #[cfg(feature = "std")] - pub fn new(authority: AccountId) -> Self { + pub fn new(chain_id: u16, authority: AccountId) -> Self { let creation_time_ms = crate::current_time() .as_millis() .try_into() @@ -684,6 +686,7 @@ mod http { Self { payload: TransactionPayload { + chain_id, authority, creation_time_ms, nonce: None, diff --git a/docker-compose.dev.local.yml b/docker-compose.dev.local.yml index b7e9c831877..be69b02c5e5 100644 --- a/docker-compose.dev.local.yml +++ b/docker-compose.dev.local.yml @@ -7,6 +7,7 @@ services: build: ./ platform: linux/amd64 environment: + IROHA_CHAIN_ID: 0 IROHA_CONFIG: /config/config.json IROHA_PUBLIC_KEY: ed01208BA62848CF767D72E7F7F4B9D2D7BA07FEE33760F79ABE5597A51520E292A0CB IROHA_PRIVATE_KEY: '{"digest_function":"ed25519","payload":"8f4c15e5d664da3f13778801d23d4e89b76e94c1b94b389544168b6cb894f84f8ba62848cf767d72e7f7f4b9d2d7ba07fee33760f79abe5597a51520e292a0cb"}' @@ -27,6 +28,7 @@ services: build: ./ platform: linux/amd64 environment: + IROHA_CHAIN_ID: 0 IROHA_CONFIG: /config/config.json IROHA_PUBLIC_KEY: ed0120815BBDC9775D28C3633269B25F22D048E2AA2E36017CBE5AD85F15220BEB6F6F IROHA_PRIVATE_KEY: '{"digest_function":"ed25519","payload":"c02ffad5e455e7ec620d74de5769681e4d8385906bce5a437eb67452a9efbbc2815bbdc9775d28c3633269b25f22d048e2aa2e36017cbe5ad85f15220beb6f6f"}' @@ -44,6 +46,7 @@ services: build: ./ platform: linux/amd64 environment: + IROHA_CHAIN_ID: 0 IROHA_CONFIG: /config/config.json IROHA_PUBLIC_KEY: ed0120F417E0371E6ADB32FD66749477402B1AB67F84A8E9B082E997980CC91F327736 IROHA_PRIVATE_KEY: '{"digest_function":"ed25519","payload":"29c5ed1409cb10fd791bc4ff8a6cb5e22a5fae7e36f448ef3ea2988b1319a88bf417e0371e6adb32fd66749477402b1ab67f84a8e9b082e997980cc91f327736"}' @@ -61,6 +64,7 @@ services: build: ./ platform: linux/amd64 environment: + IROHA_CHAIN_ID: 0 IROHA_CONFIG: /config/config.json IROHA_PUBLIC_KEY: ed0120A66522370D60B9C09E79ADE2E9BB1EF2E78733A944B999B3A6AEE687CE476D61 IROHA_PRIVATE_KEY: '{"digest_function":"ed25519","payload":"5eed4855fad183c451aac39dfc50831607e4cf408c98e2b977f3ce4a2df42ce2a66522370d60b9c09e79ade2e9bb1ef2e78733a944b999b3a6aee687ce476d61"}' diff --git a/docker-compose.dev.single.yml b/docker-compose.dev.single.yml index a6d0af1cdae..01e4d9b0197 100644 --- a/docker-compose.dev.single.yml +++ b/docker-compose.dev.single.yml @@ -7,6 +7,7 @@ services: build: ./ platform: linux/amd64 environment: + IROHA_CHAIN_ID: 0 IROHA_CONFIG: /config/config.json IROHA_PUBLIC_KEY: ed01208BA62848CF767D72E7F7F4B9D2D7BA07FEE33760F79ABE5597A51520E292A0CB IROHA_PRIVATE_KEY: '{"digest_function":"ed25519","payload":"8f4c15e5d664da3f13778801d23d4e89b76e94c1b94b389544168b6cb894f84f8ba62848cf767d72e7f7f4b9d2d7ba07fee33760f79abe5597a51520e292a0cb"}' diff --git a/docker-compose.dev.yml b/docker-compose.dev.yml index a483a1c1b16..83439255e3e 100644 --- a/docker-compose.dev.yml +++ b/docker-compose.dev.yml @@ -7,6 +7,7 @@ services: image: hyperledger/iroha2:dev platform: linux/amd64 environment: + IROHA_CHAIN_ID: 0 IROHA_CONFIG: /config/config.json IROHA_PUBLIC_KEY: ed01208BA62848CF767D72E7F7F4B9D2D7BA07FEE33760F79ABE5597A51520E292A0CB IROHA_PRIVATE_KEY: '{"digest_function":"ed25519","payload":"8f4c15e5d664da3f13778801d23d4e89b76e94c1b94b389544168b6cb894f84f8ba62848cf767d72e7f7f4b9d2d7ba07fee33760f79abe5597a51520e292a0cb"}' @@ -27,6 +28,7 @@ services: image: hyperledger/iroha2:dev platform: linux/amd64 environment: + IROHA_CHAIN_ID: 0 IROHA_CONFIG: /config/config.json IROHA_PUBLIC_KEY: ed0120815BBDC9775D28C3633269B25F22D048E2AA2E36017CBE5AD85F15220BEB6F6F IROHA_PRIVATE_KEY: '{"digest_function":"ed25519","payload":"c02ffad5e455e7ec620d74de5769681e4d8385906bce5a437eb67452a9efbbc2815bbdc9775d28c3633269b25f22d048e2aa2e36017cbe5ad85f15220beb6f6f"}' @@ -44,6 +46,7 @@ services: image: hyperledger/iroha2:dev platform: linux/amd64 environment: + IROHA_CHAIN_ID: 0 IROHA_CONFIG: /config/config.json IROHA_PUBLIC_KEY: ed0120F417E0371E6ADB32FD66749477402B1AB67F84A8E9B082E997980CC91F327736 IROHA_PRIVATE_KEY: '{"digest_function":"ed25519","payload":"29c5ed1409cb10fd791bc4ff8a6cb5e22a5fae7e36f448ef3ea2988b1319a88bf417e0371e6adb32fd66749477402b1ab67f84a8e9b082e997980cc91f327736"}' @@ -61,6 +64,7 @@ services: image: hyperledger/iroha2:dev platform: linux/amd64 environment: + IROHA_CHAIN_ID: 0 IROHA_CONFIG: /config/config.json IROHA_PUBLIC_KEY: ed0120A66522370D60B9C09E79ADE2E9BB1EF2E78733A944B999B3A6AEE687CE476D61 IROHA_PRIVATE_KEY: '{"digest_function":"ed25519","payload":"5eed4855fad183c451aac39dfc50831607e4cf408c98e2b977f3ce4a2df42ce2a66522370d60b9c09e79ade2e9bb1ef2e78733a944b999b3a6aee687ce476d61"}' diff --git a/docs/source/references/schema.json b/docs/source/references/schema.json index 3ac32350d43..b07817080eb 100644 --- a/docs/source/references/schema.json +++ b/docs/source/references/schema.json @@ -4124,6 +4124,10 @@ }, "TransactionPayload": { "Struct": [ + { + "name": "chain_id", + "type": "u16" + }, { "name": "creation_time_ms", "type": "u64" diff --git a/ffi/derive/src/attr_parse/derive.rs b/ffi/derive/src/attr_parse/derive.rs index d1d36bb5832..7ee73ab107d 100644 --- a/ffi/derive/src/attr_parse/derive.rs +++ b/ffi/derive/src/attr_parse/derive.rs @@ -21,9 +21,7 @@ pub enum RustcDerive { impl RustcDerive { fn try_from_path(path: &syn2::Path) -> Option { - let Some(ident) = path.get_ident() else { - return None; - }; + let ident = path.get_ident()?; match ident.to_string().as_str() { "Eq" => Some(Self::Eq), diff --git a/genesis/src/lib.rs b/genesis/src/lib.rs index d32ebb22405..673e11972e6 100644 --- a/genesis/src/lib.rs +++ b/genesis/src/lib.rs @@ -45,7 +45,11 @@ impl GenesisNetwork { /// - If fails to sign a transaction (which means that the `key_pair` is malformed rather /// than anything else) /// - If transactions set is empty - pub fn new(raw_block: RawGenesisBlock, genesis_key_pair: &KeyPair) -> Result { + pub fn new( + raw_block: RawGenesisBlock, + chain_id: u16, + genesis_key_pair: &KeyPair, + ) -> Result { // First instruction should be Executor upgrade. // This makes possible to grant permissions to users in genesis. let transactions_iter = std::iter::once(GenesisTransactionBuilder { @@ -64,7 +68,7 @@ impl GenesisNetwork { // FIXME: fix underlying chain of `.sign` so that it doesn't // consume the key pair unnecessarily. It might be costly to clone // the key pair for a large genesis. - .sign(genesis_key_pair.clone()) + .sign(chain_id, genesis_key_pair.clone()) .map(GenesisTransaction) .wrap_err_with(|| eyre!("Failed to sign transaction at index {i}")) }) @@ -188,11 +192,12 @@ impl GenesisTransactionBuilder { /// /// # Errors /// Fails if signing or accepting fails. - pub fn sign( + fn sign( self, + chain_id: u16, genesis_key_pair: KeyPair, ) -> core::result::Result { - TransactionBuilder::new(GENESIS_ACCOUNT_ID.clone()) + TransactionBuilder::new(chain_id, GENESIS_ACCOUNT_ID.clone()) .with_instructions(self.isi) .sign(genesis_key_pair) } @@ -364,8 +369,11 @@ mod tests { #[test] fn load_new_genesis_block() -> Result<()> { + let chain_id = 0; + let genesis_key_pair = KeyPair::generate()?; let (alice_public_key, _) = KeyPair::generate()?.into(); + let _genesis_block = GenesisNetwork::new( RawGenesisBlockBuilder::default() .domain("wonderland".parse()?) @@ -373,6 +381,7 @@ mod tests { .finish_domain() .executor(dummy_executor()) .build(), + chain_id, &genesis_key_pair, )?; Ok(()) diff --git a/logger/Cargo.toml b/logger/Cargo.toml index 83aba591aea..0b9334ace4b 100644 --- a/logger/Cargo.toml +++ b/logger/Cargo.toml @@ -17,7 +17,7 @@ iroha_data_model = { workspace = true } color-eyre = { workspace = true } serde_json = { workspace = true } tracing = { workspace = true } -tracing-core = "0.1.31" +tracing-core = "0.1.32" tracing-futures = { version = "0.2.5", default-features = false, features = ["std-future", "std"] } tracing-subscriber = { workspace = true, features = ["fmt", "ansi", "json"] } tokio = { workspace = true, features = ["sync", "rt", "macros"] } diff --git a/primitives/Cargo.toml b/primitives/Cargo.toml index 6d9328a51ef..5013ac44e7b 100644 --- a/primitives/Cargo.toml +++ b/primitives/Cargo.toml @@ -37,7 +37,7 @@ fixnum = { workspace = true, features = ["serde", "parity", "i64"] } derive_more = { workspace = true, features = ["display", "from", "as_ref", "as_mut", "deref", "constructor", "into_iterator"] } serde = { workspace = true, features = ["derive"] } serde_with = { workspace = true, features = ["macros"] } -smallvec = { version = "1.11.1", default-features = false, features = ["serde", "union"] } +smallvec = { version = "1.11.2", default-features = false, features = ["serde", "union"] } smallstr = { version = "0.3.0", default-features = false, features = ["serde", "union"] } thiserror = { workspace = true, optional = true } displaydoc = { workspace = true } diff --git a/scripts/test_env.py b/scripts/test_env.py index cd72d89c5aa..b64e0cf05e6 100755 --- a/scripts/test_env.py +++ b/scripts/test_env.py @@ -46,6 +46,7 @@ def __init__(self, args: argparse.Namespace): sys.exit(1) copy_or_prompt_build_bin("iroha", args.root_dir, peers_dir) + self.shared_env["IROHA_CHAIN_ID"] = "0" self.shared_env["IROHA_CONFIG"] = str(peers_dir.joinpath("config.json")) self.shared_env["IROHA_GENESIS_PUBLIC_KEY"] = self.peers[0].public_key diff --git a/tools/kagami/src/config.rs b/tools/kagami/src/config.rs index e36b53fcd18..e78798d3841 100644 --- a/tools/kagami/src/config.rs +++ b/tools/kagami/src/config.rs @@ -41,6 +41,7 @@ mod client { impl RunArgs for Args { fn run(self, writer: &mut BufWriter) -> Outcome { let config = ConfigurationProxy { + chain_id: Some(0), torii_api_url: Some(format!("http://{DEFAULT_API_ADDR}").parse()?), account_id: Some("alice@wonderland".parse()?), basic_auth: Some(Some(BasicAuth { diff --git a/tools/parity_scale_decoder/Cargo.toml b/tools/parity_scale_decoder/Cargo.toml index e2cb948ff7a..fa77b2b2c8d 100644 --- a/tools/parity_scale_decoder/Cargo.toml +++ b/tools/parity_scale_decoder/Cargo.toml @@ -27,7 +27,7 @@ iroha_genesis = { workspace = true } clap = { workspace = true, features = ["derive", "cargo"] } eyre = { workspace = true } parity-scale-codec = { workspace = true } -colored = "2.0.4" +colored = "2.1.0" [build-dependencies] iroha_data_model = { workspace = true } diff --git a/tools/swarm/src/compose.rs b/tools/swarm/src/compose.rs index 80dc51f7ac4..1c0227adb68 100644 --- a/tools/swarm/src/compose.rs +++ b/tools/swarm/src/compose.rs @@ -103,6 +103,7 @@ pub struct DockerComposeService { impl DockerComposeService { pub fn new( + chain_id: u16, peer: &Peer, source: ServiceSource, volumes: Vec<(String, String)>, @@ -122,6 +123,7 @@ impl DockerComposeService { }; let compact_env = CompactPeerEnv { + chain_id, trusted_peers, genesis_public_key, genesis_private_key, @@ -209,6 +211,7 @@ pub enum ServiceSource { #[derive(Serialize, Debug)] #[serde(rename_all = "UPPERCASE")] struct FullPeerEnv { + iroha_chain_id: SerializeAsJsonStr, iroha_config: String, iroha_public_key: PublicKey, iroha_private_key: SerializeAsJsonStr, @@ -224,6 +227,7 @@ struct FullPeerEnv { } struct CompactPeerEnv { + chain_id: u16, key_pair: KeyPair, genesis_public_key: PublicKey, /// Genesis private key is only needed for a peer that is submitting the genesis block @@ -240,12 +244,13 @@ impl From for FullPeerEnv { .genesis_private_key .map_or((None, None), |private_key| { ( - Some(private_key).map(SerializeAsJsonStr), + Some(SerializeAsJsonStr(private_key)), Some(PATH_TO_GENESIS.to_string()), ) }); Self { + iroha_chain_id: SerializeAsJsonStr(value.chain_id), iroha_config: PATH_TO_CONFIG.to_string(), iroha_public_key: value.key_pair.public_key().clone(), iroha_private_key: SerializeAsJsonStr(value.key_pair.private_key().clone()), @@ -302,6 +307,7 @@ impl DockerComposeBuilder<'_> { ) })?; + let chain_id = 0; let peers = peer_generator::generate_peers(self.peers, self.seed) .wrap_err("Failed to generate peers")?; let genesis_key_pair = generate_key_pair(self.seed, GENESIS_KEYPAIR_SEED) @@ -329,6 +335,7 @@ impl DockerComposeBuilder<'_> { let first_peer_service = { let (name, peer) = peers_iter.next().expect("There is non-zero count of peers"); let service = DockerComposeService::new( + chain_id, peer, service_source.clone(), volumes.clone(), @@ -347,6 +354,7 @@ impl DockerComposeBuilder<'_> { let services = peers_iter .map(|(name, peer)| { let service = DockerComposeService::new( + chain_id, peer, service_source.clone(), volumes.clone(), @@ -561,6 +569,7 @@ mod tests { fn default_config_with_swarm_env_is_exhaustive() { let keypair = KeyPair::generate().unwrap(); let env: TestEnv = CompactPeerEnv { + chain_id: 0, key_pair: keypair.clone(), genesis_public_key: keypair.public_key().clone(), genesis_private_key: Some(keypair.private_key().clone()), @@ -597,6 +606,7 @@ mod tests { services: { let mut map = BTreeMap::new(); + let chain_id = 0; let key_pair = KeyPair::generate_with_configuration( KeyGenConfiguration::default().use_seed(vec![1, 5, 1, 2, 2, 3, 4, 1, 2, 3]), ) @@ -608,6 +618,7 @@ mod tests { platform: PlatformArchitecture, source: ServiceSource::Build(PathBuf::from(".")), environment: CompactPeerEnv { + chain_id, key_pair: key_pair.clone(), genesis_public_key: key_pair.public_key().clone(), genesis_private_key: Some(key_pair.private_key().clone()), @@ -642,6 +653,7 @@ mod tests { build: . platform: linux/amd64 environment: + IROHA_CHAIN_ID: '0' IROHA_CONFIG: /config/config.json IROHA_PUBLIC_KEY: ed012039E5BF092186FACC358770792A493CA98A83740643A3D41389483CF334F748C8 IROHA_PRIVATE_KEY: '{"digest_function":"ed25519","payload":"db9d90d20f969177bd5882f9fe211d14d1399d5440d04e3468783d169bbc4a8e39e5bf092186facc358770792a493ca98a83740643a3d41389483cf334f748c8"}' @@ -664,11 +676,15 @@ mod tests { #[test] fn empty_genesis_public_key_is_skipped_in_env() { + let chain_id = 0; + let key_pair = KeyPair::generate_with_configuration( KeyGenConfiguration::default().use_seed(vec![0, 1, 2]), ) .unwrap(); + let env: FullPeerEnv = CompactPeerEnv { + chain_id, key_pair: key_pair.clone(), genesis_public_key: key_pair.public_key().clone(), genesis_private_key: None, @@ -680,6 +696,7 @@ mod tests { let actual = serde_yaml::to_string(&env).unwrap(); let expected = expect_test::expect![[r#" + IROHA_CHAIN_ID: '0' IROHA_CONFIG: /config/config.json IROHA_PUBLIC_KEY: ed0120415388A90FA238196737746A70565D041CFB32EAA0C89FF8CB244C7F832A6EBD IROHA_PRIVATE_KEY: '{"digest_function":"ed25519","payload":"6bf163fd75192b81a78cb20c5f8cb917f591ac6635f2577e6ca305c27a456a5d415388a90fa238196737746a70565d041cfb32eaa0c89ff8cb244c7f832a6ebd"}' @@ -719,6 +736,7 @@ mod tests { build: ./iroha-cloned platform: linux/amd64 environment: + IROHA_CHAIN_ID: '0' IROHA_CONFIG: /config/config.json IROHA_PUBLIC_KEY: ed0120F0321EB4139163C35F88BF78520FF7071499D7F4E79854550028A196C7B49E13 IROHA_PRIVATE_KEY: '{"digest_function":"ed25519","payload":"5f8d1291bf6b762ee748a87182345d135fd167062857aa4f20ba39f25e74c4b0f0321eb4139163c35f88bf78520ff7071499d7f4e79854550028a196c7b49e13"}' @@ -739,6 +757,7 @@ mod tests { build: ./iroha-cloned platform: linux/amd64 environment: + IROHA_CHAIN_ID: '0' IROHA_CONFIG: /config/config.json IROHA_PUBLIC_KEY: ed0120A88554AA5C86D28D0EEBEC497235664433E807881CD31E12A1AF6C4D8B0F026C IROHA_PRIVATE_KEY: '{"digest_function":"ed25519","payload":"8d34d2c6a699c61e7a9d5aabbbd07629029dfb4f9a0800d65aa6570113edb465a88554aa5c86d28d0eebec497235664433e807881cd31e12a1af6c4d8b0f026c"}' @@ -756,6 +775,7 @@ mod tests { build: ./iroha-cloned platform: linux/amd64 environment: + IROHA_CHAIN_ID: '0' IROHA_CONFIG: /config/config.json IROHA_PUBLIC_KEY: ed0120312C1B7B5DE23D366ADCF23CD6DB92CE18B2AA283C7D9F5033B969C2DC2B92F4 IROHA_PRIVATE_KEY: '{"digest_function":"ed25519","payload":"cf4515a82289f312868027568c0da0ee3f0fde7fef1b69deb47b19fde7cbc169312c1b7b5de23d366adcf23cd6db92ce18b2aa283c7d9f5033b969c2dc2b92f4"}' @@ -773,6 +793,7 @@ mod tests { build: ./iroha-cloned platform: linux/amd64 environment: + IROHA_CHAIN_ID: '0' IROHA_CONFIG: /config/config.json IROHA_PUBLIC_KEY: ed0120854457B2E3D6082181DA73DC01C1E6F93A72D0C45268DC8845755287E98A5DEE IROHA_PRIVATE_KEY: '{"digest_function":"ed25519","payload":"ab0e99c2b845b4ac7b3e88d25a860793c7eb600a25c66c75cba0bae91e955aa6854457b2e3d6082181da73dc01c1e6f93a72d0c45268dc8845755287e98a5dee"}' diff --git a/tools/wasm_test_runner/Cargo.toml b/tools/wasm_test_runner/Cargo.toml index 682ad1ccce1..920a0fcd2e3 100644 --- a/tools/wasm_test_runner/Cargo.toml +++ b/tools/wasm_test_runner/Cargo.toml @@ -13,4 +13,4 @@ workspace = true [dependencies] wasmtime = { workspace = true } -anyhow = "1.0.75" +anyhow = "1.0.79" diff --git a/torii/src/lib.rs b/torii/src/lib.rs index 83d9ce10a26..7e9aca9efcb 100644 --- a/torii/src/lib.rs +++ b/torii/src/lib.rs @@ -41,6 +41,7 @@ mod stream; /// Main network handler and the only entrypoint of the Iroha. pub struct Torii { + chain_id: u16, kiso: KisoHandle, queue: Arc, events: EventsSender, @@ -56,6 +57,7 @@ impl Torii { /// Construct `Torii`. #[allow(clippy::too_many_arguments)] pub fn new( + chain_id: u16, kiso: KisoHandle, config: &ToriiConfiguration, queue: Arc, @@ -66,6 +68,7 @@ impl Torii { kura: Arc, ) -> Self { Self { + chain_id, kiso, queue, events, @@ -131,10 +134,10 @@ impl Torii { let post_router = warp::post() .and( - endpoint3( + endpoint4( routing::handle_transaction, warp::path(uri::TRANSACTION) - .and(add_state!(self.queue, self.sumeragi)) + .and(add_state!(self.chain_id, self.queue, self.sumeragi)) .and(warp::body::content_length_limit( self.transaction_max_content_length, )) diff --git a/torii/src/routing.rs b/torii/src/routing.rs index baf083998b6..6a803792e67 100644 --- a/torii/src/routing.rs +++ b/torii/src/routing.rs @@ -76,13 +76,14 @@ fn fetch_size() -> impl warp::Filter, sumeragi: SumeragiHandle, transaction: SignedTransaction, ) -> Result { let wsv = sumeragi.wsv_clone(); let transaction_limits = wsv.config.transaction_limits; - let transaction = AcceptedTransaction::accept(transaction, &transaction_limits) + let transaction = AcceptedTransaction::accept(transaction, chain_id, &transaction_limits) .map_err(Error::AcceptTransaction)?; queue .push(transaction, &wsv) diff --git a/wasm_builder/Cargo.toml b/wasm_builder/Cargo.toml index 00434bd7c2c..bfce759b162 100644 --- a/wasm_builder/Cargo.toml +++ b/wasm_builder/Cargo.toml @@ -14,6 +14,6 @@ workspace = true [dependencies] eyre = { workspace = true } serde_json = { workspace = true, features = ["std"] } -sha256 = "1.4.0" +sha256 = "1.5.0" path-absolutize = { workspace = true } wasm-opt = "0.116.0"