From 0c1680b2b61b6bd78216669fa7b38661624a97a9 Mon Sep 17 00:00:00 2001 From: tgmichel Date: Thu, 22 Jun 2023 00:33:46 +0200 Subject: [PATCH 01/12] EVM + Weight v2 support (#1039) * wip `record_extarnal_cost` * Add support for external cost recording from the EVM * Add refund external cost support * Separate dispatchables with weight limit * introduce tbd EthereumWeigher * Validate gas to weight conversion * Add static opcode recording with temp costs * Add optional `transaction_len` parameter to `GasWeightMapping` * Account for pallet and call indexes * Rollback `transaction_len` param, use MAX_POV_SIZE ratio instead * wip tests * Add `uncached_account_code_proof_size_accounting_works` test * wip tests * Temp remove static external cost accounting * fix build * Temp remove static external cost accounting 2 * warning cleanup * fmt * clippy * taplo * Add `evm-with-weight-limit` to ci * Temp set evm fork + update Cargo.lock * handle `code_hash` * taplo * fmt * Handle `transact_with_weight_limit` as self contained * fix ts tests * suggestion * remove precompile test * some suggestions * remove `transact_with_weight_limit` * configurable `MaxPovSize` * test pov size constants * accessed storage overlayed cache * `new_from_weight_limit` suggestion * remove unnecessary check * warnings cleanup * set constant gas limit max pov size ratio * check state version for suicide * just completely remove suicide accounting * - `code` must be able to oog - refund based on effective used gas * `is_empty` accounting * fix build * `SSTORE` must record account storage proof size * suggestion: move weight_limit checks * editorconfig * fmt * rename `transaction_len` to `proof_size_base_cost` in runner * move `proof_size_base_cost` to validation primitive * gas limit saturated conversion * remove transaction proof size check outside validation * pin evm * pin evm+ * fix todos * fix build * scope of already recorded codes and storages must be per transaction * pin evm + implement new `record_external_operation` * fix runtime api versioning + legacy `ExecutionInfo` handling * editorconfig * cargo fmt * clippy * suggestion remove `evm-with-weight-limit` feature * fmt * update comment * update tests for additional `AccountBasicRead` in the evm * update evm pin --- Cargo.lock | 16 +- Cargo.toml | 2 +- client/rpc/src/eth/execute.rs | 185 ++++- frame/ethereum/src/lib.rs | 75 +- frame/ethereum/src/mock.rs | 7 +- frame/ethereum/src/tests/eip1559.rs | 41 +- frame/ethereum/src/tests/eip2930.rs | 42 +- frame/ethereum/src/tests/legacy.rs | 40 +- frame/evm/precompile/dispatch/src/lib.rs | 22 +- frame/evm/precompile/dispatch/src/mock.rs | 11 + frame/evm/src/benchmarking.rs | 4 + frame/evm/src/lib.rs | 85 ++- frame/evm/src/mock.rs | 12 +- ...oof_size_test_callee_contract_bytecode.txt | 1 + .../res/proof_size_test_contract_bytecode.txt | 1 + frame/evm/src/runner/mod.rs | 12 +- frame/evm/src/runner/stack.rs | 397 ++++++++++- frame/evm/src/tests.rs | 654 ++++++++++++++++-- frame/evm/test-vector-support/src/lib.rs | 6 + primitives/evm/src/lib.rs | 135 +++- primitives/evm/src/precompile.rs | 4 +- primitives/evm/src/validation.rs | 73 +- primitives/rpc/src/lib.rs | 39 +- template/runtime/src/lib.rs | 9 + 24 files changed, 1682 insertions(+), 191 deletions(-) create mode 100644 frame/evm/src/res/proof_size_test_callee_contract_bytecode.txt create mode 100644 frame/evm/src/res/proof_size_test_contract_bytecode.txt diff --git a/Cargo.lock b/Cargo.lock index 9dc7aeb3f4..bccd7d130d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1982,9 +1982,8 @@ checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" [[package]] name = "evm" -version = "0.39.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1099df1dac16f32a136452ad98ee0f1ff42acd3e12ce65bea4462b61d656608a" +version = "0.39.1" +source = "git+https://github.com/rust-blockchain/evm?branch=master#e85c34f96e3237c09955193b41154030b78119c5" dependencies = [ "auto_impl", "environmental", @@ -2004,8 +2003,7 @@ dependencies = [ [[package]] name = "evm-core" version = "0.39.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f1f13264b044cb66f0602180f0bc781c29accb41ff560669a3ec15858d5b606" +source = "git+https://github.com/rust-blockchain/evm?branch=master#e85c34f96e3237c09955193b41154030b78119c5" dependencies = [ "parity-scale-codec", "primitive-types", @@ -2016,8 +2014,7 @@ dependencies = [ [[package]] name = "evm-gasometer" version = "0.39.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d43eadc395bd1a52990787ca1495c26b0248165444912be075c28909a853b8c" +source = "git+https://github.com/rust-blockchain/evm?branch=master#e85c34f96e3237c09955193b41154030b78119c5" dependencies = [ "environmental", "evm-core", @@ -2028,8 +2025,7 @@ dependencies = [ [[package]] name = "evm-runtime" version = "0.39.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2aa5b32f59ec582a5651978004e5c784920291263b7dcb6de418047438e37f4f" +source = "git+https://github.com/rust-blockchain/evm?branch=master#e85c34f96e3237c09955193b41154030b78119c5" dependencies = [ "auto_impl", "environmental", @@ -10000,7 +9996,7 @@ checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" dependencies = [ "cfg-if", "digest 0.10.6", - "rand 0.8.5", + "rand 0.7.3", "static_assertions", ] diff --git a/Cargo.toml b/Cargo.toml index 33c27f6102..e71fc735cb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -43,7 +43,7 @@ bn = { package = "substrate-bn", version = "0.6", default-features = false } environmental = { version = "1.1.4", default-features = false } ethereum = { version = "0.14.0", default-features = false } ethereum-types = { version = "0.14.1", default-features = false } -evm = { version = "0.39.0", default-features = false } +evm = { git = "https://github.com/rust-blockchain/evm", branch = "master", default-features = false } hex-literal = { version = "0.3.4" } impl-serde = { version = "0.4.0", default-features = false } jsonrpsee = "0.16.2" diff --git a/client/rpc/src/eth/execute.rs b/client/rpc/src/eth/execute.rs index d603043b9b..00bd6656b2 100644 --- a/client/rpc/src/eth/execute.rs +++ b/client/rpc/src/eth/execute.rs @@ -34,7 +34,7 @@ use sp_runtime::{traits::Block as BlockT, DispatchError, SaturatedConversion}; use sp_state_machine::OverlayedChanges; // Frontier use fc_rpc_core::types::*; -use fp_evm::CallInfo; +use fp_evm::{ExecutionInfo, ExecutionInfoV2}; use fp_rpc::{EthereumRuntimeRPCApi, RuntimeStorageOverride}; use fp_storage::{EVM_ACCOUNT_CODES, PALLET_EVM}; @@ -210,7 +210,7 @@ where error_on_execution_failure(&info.exit_reason, &info.value)?; Ok(Bytes(info.value)) - } else if api_version == 4 { + } else if api_version == 4 || api_version == 5 { // Post-london + access list support let encoded_params = Encode::encode(&( &from.unwrap_or_default(), @@ -247,23 +247,47 @@ where recorder: &None, }; - let info = self - .client - .call_api_at(params) - .and_then(|r| { - Result::map_err( - as Decode>::decode(&mut &r[..]), - |error| sp_api::ApiError::FailedToDecodeReturnValue { - function: "EthereumRuntimeRPCApi_call", - error, - }, - ) - }) - .map_err(|err| internal_err(format!("runtime error: {:?}", err)))? - .map_err(|err| internal_err(format!("execution fatal: {:?}", err)))?; + let value = if api_version == 4 { + let info = self + .client + .call_api_at(params) + .and_then(|r| { + Result::map_err( + >, DispatchError> as Decode>::decode(&mut &r[..]), + |error| sp_api::ApiError::FailedToDecodeReturnValue { + function: "EthereumRuntimeRPCApi_call", + error, + }, + ) + }) + .map_err(|err| internal_err(format!("runtime error: {:?}", err)))? + .map_err(|err| internal_err(format!("execution fatal: {:?}", err)))?; - error_on_execution_failure(&info.exit_reason, &info.value)?; - Ok(Bytes(info.value)) + error_on_execution_failure(&info.exit_reason, &info.value)?; + info.value + } else if api_version == 5 { + let info = self + .client + .call_api_at(params) + .and_then(|r| { + Result::map_err( + >, DispatchError> as Decode>::decode(&mut &r[..]), + |error| sp_api::ApiError::FailedToDecodeReturnValue { + function: "EthereumRuntimeRPCApi_call", + error, + }, + ) + }) + .map_err(|err| internal_err(format!("runtime error: {:?}", err)))? + .map_err(|err| internal_err(format!("execution fatal: {:?}", err)))?; + + error_on_execution_failure(&info.exit_reason, &info.value)?; + info.value + } else { + unreachable!("invalid version"); + }; + + Ok(Bytes(value)) } else { Err(internal_err("failed to retrieve Runtime Api version")) } @@ -315,6 +339,36 @@ where .map_err(|err| internal_err(format!("runtime error: {:?}", err)))?; Ok(Bytes(code)) } else if api_version == 4 { + // Post-london + access list support + let access_list = access_list.unwrap_or_default(); + #[allow(deprecated)] + let info = api.create_before_version_5( + substrate_hash, + from.unwrap_or_default(), + data, + value.unwrap_or_default(), + gas_limit, + max_fee_per_gas, + max_priority_fee_per_gas, + nonce, + false, + Some( + access_list + .into_iter() + .map(|item| (item.address, item.storage_keys)) + .collect(), + ), + ) + .map_err(|err| internal_err(format!("runtime error: {:?}", err)))? + .map_err(|err| internal_err(format!("execution fatal: {:?}", err)))?; + + error_on_execution_failure(&info.exit_reason, &[])?; + + let code = api + .account_code_at(substrate_hash, info.value) + .map_err(|err| internal_err(format!("runtime error: {:?}", err)))?; + Ok(Bytes(code)) + } else if api_version == 5 { // Post-london + access list support let access_list = access_list.unwrap_or_default(); let info = api @@ -515,10 +569,10 @@ where let (exit_reason, data, used_gas) = match to { Some(to) => { - let info = if api_version == 1 { + if api_version == 1 { // Legacy pre-london #[allow(deprecated)] - api.call_before_version_2( + let info = api.call_before_version_2( substrate_hash, from.unwrap_or_default(), to, @@ -530,11 +584,33 @@ where estimate_mode, ) .map_err(|err| internal_err(format!("runtime error: {:?}", err)))? - .map_err(|err| internal_err(format!("execution fatal: {:?}", err)))? + .map_err(|err| internal_err(format!("execution fatal: {:?}", err)))?; + + (info.exit_reason, info.value, info.used_gas) } else if api_version < 4 { // Post-london #[allow(deprecated)] - api.call_before_version_4( + let info = api.call_before_version_4( + substrate_hash, + from.unwrap_or_default(), + to, + data, + value.unwrap_or_default(), + gas_limit, + max_fee_per_gas, + max_priority_fee_per_gas, + nonce, + estimate_mode, + ) + .map_err(|err| internal_err(format!("runtime error: {:?}", err)))? + .map_err(|err| internal_err(format!("execution fatal: {:?}", err)))?; + + (info.exit_reason, info.value, info.used_gas) + } else if api_version == 4 { + // Post-london + access list support + let access_list = access_list.unwrap_or_default(); + #[allow(deprecated)] + let info = api.call_before_version_5( substrate_hash, from.unwrap_or_default(), to, @@ -545,13 +621,21 @@ where max_priority_fee_per_gas, nonce, estimate_mode, + Some( + access_list + .into_iter() + .map(|item| (item.address, item.storage_keys)) + .collect(), + ), ) .map_err(|err| internal_err(format!("runtime error: {:?}", err)))? - .map_err(|err| internal_err(format!("execution fatal: {:?}", err)))? + .map_err(|err| internal_err(format!("execution fatal: {:?}", err)))?; + + (info.exit_reason, info.value, info.used_gas) } else { // Post-london + access list support let access_list = access_list.unwrap_or_default(); - api.call( + let info = api.call( substrate_hash, from.unwrap_or_default(), to, @@ -570,16 +654,16 @@ where ), ) .map_err(|err| internal_err(format!("runtime error: {:?}", err)))? - .map_err(|err| internal_err(format!("execution fatal: {:?}", err)))? - }; + .map_err(|err| internal_err(format!("execution fatal: {:?}", err)))?; - (info.exit_reason, info.value, info.used_gas) + (info.exit_reason, info.value, info.used_gas.effective) + } } None => { - let info = if api_version == 1 { + if api_version == 1 { // Legacy pre-london #[allow(deprecated)] - api.create_before_version_2( + let info = api.create_before_version_2( substrate_hash, from.unwrap_or_default(), data, @@ -590,11 +674,13 @@ where estimate_mode, ) .map_err(|err| internal_err(format!("runtime error: {:?}", err)))? - .map_err(|err| internal_err(format!("execution fatal: {:?}", err)))? + .map_err(|err| internal_err(format!("execution fatal: {:?}", err)))?; + + (info.exit_reason, Vec::new(), info.used_gas) } else if api_version < 4 { // Post-london #[allow(deprecated)] - api.create_before_version_4( + let info = api.create_before_version_4( substrate_hash, from.unwrap_or_default(), data, @@ -606,11 +692,38 @@ where estimate_mode, ) .map_err(|err| internal_err(format!("runtime error: {:?}", err)))? - .map_err(|err| internal_err(format!("execution fatal: {:?}", err)))? + .map_err(|err| internal_err(format!("execution fatal: {:?}", err)))?; + + (info.exit_reason, Vec::new(), info.used_gas) + } else if api_version == 4 { + // Post-london + access list support + let access_list = access_list.unwrap_or_default(); + #[allow(deprecated)] + let info = api.create_before_version_5( + substrate_hash, + from.unwrap_or_default(), + data, + value.unwrap_or_default(), + gas_limit, + max_fee_per_gas, + max_priority_fee_per_gas, + nonce, + estimate_mode, + Some( + access_list + .into_iter() + .map(|item| (item.address, item.storage_keys)) + .collect(), + ), + ) + .map_err(|err| internal_err(format!("runtime error: {:?}", err)))? + .map_err(|err| internal_err(format!("execution fatal: {:?}", err)))?; + + (info.exit_reason, Vec::new(), info.used_gas) } else { // Post-london + access list support let access_list = access_list.unwrap_or_default(); - api.create( + let info = api.create( substrate_hash, from.unwrap_or_default(), data, @@ -628,10 +741,10 @@ where ), ) .map_err(|err| internal_err(format!("runtime error: {:?}", err)))? - .map_err(|err| internal_err(format!("execution fatal: {:?}", err)))? - }; + .map_err(|err| internal_err(format!("execution fatal: {:?}", err)))?; - (info.exit_reason, Vec::new(), info.used_gas) + (info.exit_reason, Vec::new(), info.used_gas.effective) + } } }; Ok(ExecutableResult { diff --git a/frame/ethereum/src/lib.rs b/frame/ethereum/src/lib.rs index 975a5d46f3..d9f1131db8 100644 --- a/frame/ethereum/src/lib.rs +++ b/frame/ethereum/src/lib.rs @@ -353,6 +353,16 @@ pub mod pallet { } impl Pallet { + fn transaction_len(transaction: &Transaction) -> u64 { + transaction + .encode() + .len() + // pallet index + .saturating_add(1) + // call index + .saturating_add(1) as u64 + } + fn recover_signer(transaction: &Transaction) -> Option { let mut sig = [0u8; 65]; let mut msg = [0u8; 32]; @@ -473,6 +483,17 @@ impl Pallet { let transaction_data: TransactionData = transaction.into(); let transaction_nonce = transaction_data.nonce; + let (weight_limit, proof_size_base_cost) = + match ::GasWeightMapping::gas_to_weight( + transaction_data.gas_limit.unique_saturated_into(), + true, + ) { + weight_limit if weight_limit.proof_size() > 0 => { + (Some(weight_limit), Some(Self::transaction_len(transaction))) + } + _ => (None, None), + }; + let (base_fee, _) = T::FeeCalculator::min_gas_price(); let (who, _) = pallet_evm::Pallet::::account_basic(&origin); @@ -485,6 +506,8 @@ impl Pallet { is_transactional: true, }, transaction_data.clone().into(), + weight_limit, + proof_size_base_cost, ) .validate_in_pool_for(&who) .and_then(|v| v.with_chain_id()) @@ -540,7 +563,7 @@ impl Pallet { let transaction_hash = transaction.hash(); let transaction_index = pending.len() as u32; - let (reason, status, used_gas, dest, extra_data) = match info { + let (reason, status, weight_info, used_gas, dest, extra_data) = match info { CallOrCreateInfo::Call(info) => ( info.exit_reason.clone(), TransactionStatus { @@ -556,6 +579,7 @@ impl Pallet { bloom }, }, + info.weight_info, info.used_gas, to, match info.exit_reason { @@ -599,6 +623,7 @@ impl Pallet { bloom }, }, + info.weight_info, info.used_gas, Some(info.value), Vec::new(), @@ -615,11 +640,11 @@ impl Pallet { let cumulative_gas_used = if let Some((_, _, receipt)) = pending.last() { match receipt { Receipt::Legacy(d) | Receipt::EIP2930(d) | Receipt::EIP1559(d) => { - d.used_gas.saturating_add(used_gas) + d.used_gas.saturating_add(used_gas.effective) } } } else { - used_gas + used_gas.effective }; match &transaction { Transaction::Legacy(_) => Receipt::Legacy(ethereum::EIP658ReceiptData { @@ -654,10 +679,18 @@ impl Pallet { }); Ok(PostDispatchInfo { - actual_weight: Some(T::GasWeightMapping::gas_to_weight( - used_gas.unique_saturated_into(), - true, - )), + actual_weight: { + let mut gas_to_weight = T::GasWeightMapping::gas_to_weight( + used_gas.standard.unique_saturated_into(), + true, + ); + if let Some(weight_info) = weight_info { + if let Some(proof_size_usage) = weight_info.proof_size_usage { + *gas_to_weight.proof_size_mut() = proof_size_usage; + } + } + Some(gas_to_weight) + }, pays_fee: Pays::No, }) } @@ -738,6 +771,17 @@ impl Pallet { let is_transactional = true; let validate = false; + + let (transaction_len, weight_limit) = + match ::GasWeightMapping::gas_to_weight( + gas_limit.unique_saturated_into(), + true, + ) { + weight_limit if weight_limit.proof_size() > 0 => { + (Some(Self::transaction_len(transaction)), Some(weight_limit)) + } + _ => (None, None), + }; match action { ethereum::TransactionAction::Call(target) => { let res = match T::Runner::call( @@ -752,6 +796,8 @@ impl Pallet { access_list, is_transactional, validate, + weight_limit, + transaction_len, config.as_ref().unwrap_or_else(|| T::config()), ) { Ok(res) => res, @@ -780,6 +826,8 @@ impl Pallet { access_list, is_transactional, validate, + weight_limit, + transaction_len, config.as_ref().unwrap_or_else(|| T::config()), ) { Ok(res) => res, @@ -812,6 +860,17 @@ impl Pallet { let (base_fee, _) = T::FeeCalculator::min_gas_price(); let (who, _) = pallet_evm::Pallet::::account_basic(&origin); + let (weight_limit, proof_size_base_cost) = + match ::GasWeightMapping::gas_to_weight( + transaction_data.gas_limit.unique_saturated_into(), + true, + ) { + weight_limit if weight_limit.proof_size() > 0 => { + (Some(weight_limit), Some(Self::transaction_len(transaction))) + } + _ => (None, None), + }; + let _ = CheckEvmTransaction::::new( CheckEvmTransactionConfig { evm_config: T::config(), @@ -821,6 +880,8 @@ impl Pallet { is_transactional: true, }, transaction_data.into(), + weight_limit, + proof_size_base_cost, ) .validate_in_block_for(&who) .and_then(|v| v.with_chain_id()) diff --git a/frame/ethereum/src/mock.rs b/frame/ethereum/src/mock.rs index 90eb823d65..237f6de0ae 100644 --- a/frame/ethereum/src/mock.rs +++ b/frame/ethereum/src/mock.rs @@ -134,11 +134,15 @@ impl FindAuthor for FindAuthorTruncated { } } +const BLOCK_GAS_LIMIT: u64 = 150_000_000; +const MAX_POV_SIZE: u64 = 5 * 1024 * 1024; + parameter_types! { pub const TransactionByteFee: u64 = 1; pub const ChainId: u64 = 42; pub const EVMModuleId: PalletId = PalletId(*b"py/evmpa"); - pub const BlockGasLimit: U256 = U256::MAX; + pub BlockGasLimit: U256 = U256::from(BLOCK_GAS_LIMIT); + pub const GasLimitPovSizeRatio: u64 = BLOCK_GAS_LIMIT.saturating_div(MAX_POV_SIZE); pub const WeightPerGas: Weight = Weight::from_parts(20_000, 0); } @@ -169,6 +173,7 @@ impl pallet_evm::Config for Test { type OnChargeTransaction = (); type OnCreate = (); type FindAuthor = FindAuthorTruncated; + type GasLimitPovSizeRatio = GasLimitPovSizeRatio; type Timestamp = Timestamp; type WeightInfo = (); } diff --git a/frame/ethereum/src/tests/eip1559.rs b/frame/ethereum/src/tests/eip1559.rs index 8a55e33033..d0091d6ab0 100644 --- a/frame/ethereum/src/tests/eip1559.rs +++ b/frame/ethereum/src/tests/eip1559.rs @@ -335,7 +335,7 @@ fn transaction_should_generate_correct_gas_used() { match info { CallOrCreateInfo::Create(info) => { - assert_eq!(info.used_gas, expected_gas); + assert_eq!(info.used_gas.standard, expected_gas); } CallOrCreateInfo::Call(_) => panic!("expected create info"), } @@ -422,7 +422,7 @@ fn event_extra_data_should_be_handle_properly() { input: hex::decode(TEST_CONTRACT_CODE).unwrap(), } .sign(&alice.private_key, None); - assert_ok!(Ethereum::apply_validated_transaction(alice.address, t)); + assert_ok!(Ethereum::apply_validated_transaction(alice.address, t,)); let contract_address = hex::decode("32dcab0ef3fb2de2fce1d2e0799d36239671f04a").unwrap(); let foo = hex::decode("c2985578").unwrap(); @@ -440,7 +440,7 @@ fn event_extra_data_should_be_handle_properly() { .sign(&alice.private_key, None); // calling foo - assert_ok!(Ethereum::apply_validated_transaction(alice.address, t2)); + assert_ok!(Ethereum::apply_validated_transaction(alice.address, t2,)); System::assert_last_event(RuntimeEvent::Ethereum(Event::Executed { from: alice.address, to: H160::from_slice(&contract_address), @@ -464,7 +464,7 @@ fn event_extra_data_should_be_handle_properly() { .sign(&alice.private_key, None); // calling bar revert - assert_ok!(Ethereum::apply_validated_transaction(alice.address, t3)); + assert_ok!(Ethereum::apply_validated_transaction(alice.address, t3,)); System::assert_last_event(RuntimeEvent::Ethereum(Event::Executed { from: alice.address, to: H160::from_slice(&contract_address), @@ -553,3 +553,36 @@ fn validated_transaction_apply_zero_gas_price_works() { assert_eq!(Balances::free_balance(&substrate_bob), 1_100); }); } + +#[test] +fn proof_size_weight_limit_validation_works() { + let (pairs, mut ext) = new_test_ext(1); + let alice = &pairs[0]; + + ext.execute_with(|| { + let mut tx = EIP1559UnsignedTransaction { + nonce: U256::from(2), + max_priority_fee_per_gas: U256::zero(), + max_fee_per_gas: U256::from(1), + gas_limit: U256::from(0x100000), + action: ethereum::TransactionAction::Call(alice.address), + value: U256::from(1), + input: Vec::new(), + }; + + let gas_limit: u64 = 1_000_000; + tx.gas_limit = U256::from(gas_limit); + + let weight_limit = + ::GasWeightMapping::gas_to_weight(gas_limit, true); + + // Gas limit cannot afford the extra byte and thus is expected to exhaust. + tx.input = vec![0u8; (weight_limit.proof_size() + 1) as usize]; + let tx = tx.sign(&alice.private_key, None); + + // Execute + assert!( + Ethereum::transact(RawOrigin::EthereumTransaction(alice.address).into(), tx,).is_err() + ); + }); +} diff --git a/frame/ethereum/src/tests/eip2930.rs b/frame/ethereum/src/tests/eip2930.rs index 56543f62dc..6b3a6cd34d 100644 --- a/frame/ethereum/src/tests/eip2930.rs +++ b/frame/ethereum/src/tests/eip2930.rs @@ -267,7 +267,7 @@ fn transaction_should_generate_correct_gas_used() { match info { CallOrCreateInfo::Create(info) => { - assert_eq!(info.used_gas, expected_gas); + assert_eq!(info.used_gas.standard, expected_gas); } CallOrCreateInfo::Call(_) => panic!("expected create info"), } @@ -350,7 +350,7 @@ fn event_extra_data_should_be_handle_properly() { input: hex::decode(TEST_CONTRACT_CODE).unwrap(), } .sign(&alice.private_key, None); - assert_ok!(Ethereum::apply_validated_transaction(alice.address, t)); + assert_ok!(Ethereum::apply_validated_transaction(alice.address, t,)); let contract_address = hex::decode("32dcab0ef3fb2de2fce1d2e0799d36239671f04a").unwrap(); let foo = hex::decode("c2985578").unwrap(); @@ -367,7 +367,7 @@ fn event_extra_data_should_be_handle_properly() { .sign(&alice.private_key, None); // calling foo - assert_ok!(Ethereum::apply_validated_transaction(alice.address, t2)); + assert_ok!(Ethereum::apply_validated_transaction(alice.address, t2,)); System::assert_last_event(RuntimeEvent::Ethereum(Event::Executed { from: alice.address, to: H160::from_slice(&contract_address), @@ -390,7 +390,7 @@ fn event_extra_data_should_be_handle_properly() { .sign(&alice.private_key, None); // calling bar revert - assert_ok!(Ethereum::apply_validated_transaction(alice.address, t3)); + assert_ok!(Ethereum::apply_validated_transaction(alice.address, t3,)); System::assert_last_event(RuntimeEvent::Ethereum(Event::Executed { from: alice.address, to: H160::from_slice(&contract_address), @@ -478,3 +478,37 @@ fn validated_transaction_apply_zero_gas_price_works() { assert_eq!(Balances::free_balance(&substrate_bob), 1_100); }); } + +#[test] +fn proof_size_weight_limit_validation_works() { + use pallet_evm::GasWeightMapping; + + let (pairs, mut ext) = new_test_ext(1); + let alice = &pairs[0]; + + ext.execute_with(|| { + let mut tx = EIP2930UnsignedTransaction { + nonce: U256::from(2), + gas_price: U256::from(1), + gas_limit: U256::from(0x100000), + action: ethereum::TransactionAction::Call(alice.address), + value: U256::from(1), + input: Vec::new(), + }; + + let gas_limit: u64 = 1_000_000; + tx.gas_limit = U256::from(gas_limit); + + let weight_limit = + ::GasWeightMapping::gas_to_weight(gas_limit, true); + + // Gas limit cannot afford the extra byte and thus is expected to exhaust. + tx.input = vec![0u8; (weight_limit.proof_size() + 1) as usize]; + let tx = tx.sign(&alice.private_key, None); + + // Execute + assert!( + Ethereum::transact(RawOrigin::EthereumTransaction(alice.address).into(), tx,).is_err() + ); + }); +} diff --git a/frame/ethereum/src/tests/legacy.rs b/frame/ethereum/src/tests/legacy.rs index 777ff1bb88..c119da3700 100644 --- a/frame/ethereum/src/tests/legacy.rs +++ b/frame/ethereum/src/tests/legacy.rs @@ -267,7 +267,7 @@ fn transaction_should_generate_correct_gas_used() { match info { CallOrCreateInfo::Create(info) => { - assert_eq!(info.used_gas, expected_gas); + assert_eq!(info.used_gas.standard, expected_gas); } CallOrCreateInfo::Call(_) => panic!("expected create info"), } @@ -367,7 +367,7 @@ fn event_extra_data_should_be_handle_properly() { .sign(&alice.private_key); // calling foo - assert_ok!(Ethereum::apply_validated_transaction(alice.address, t2)); + assert_ok!(Ethereum::apply_validated_transaction(alice.address, t2,)); System::assert_last_event(RuntimeEvent::Ethereum(Event::Executed { from: alice.address, to: H160::from_slice(&contract_address), @@ -390,7 +390,7 @@ fn event_extra_data_should_be_handle_properly() { .sign(&alice.private_key); // calling bar revert - assert_ok!(Ethereum::apply_validated_transaction(alice.address, t3)); + assert_ok!(Ethereum::apply_validated_transaction(alice.address, t3,)); System::assert_last_event(RuntimeEvent::Ethereum(Event::Executed { from: alice.address, to: H160::from_slice(&contract_address), @@ -478,3 +478,37 @@ fn validated_transaction_apply_zero_gas_price_works() { assert_eq!(Balances::free_balance(&substrate_bob), 1_100); }); } + +#[test] +fn proof_size_weight_limit_validation_works() { + use pallet_evm::GasWeightMapping; + + let (pairs, mut ext) = new_test_ext(1); + let alice = &pairs[0]; + + ext.execute_with(|| { + let mut tx = LegacyUnsignedTransaction { + nonce: U256::from(2), + gas_price: U256::from(1), + gas_limit: U256::from(0x100000), + action: ethereum::TransactionAction::Call(alice.address), + value: U256::from(1), + input: Vec::new(), + }; + + let gas_limit: u64 = 1_000_000; + tx.gas_limit = U256::from(gas_limit); + + let weight_limit = + ::GasWeightMapping::gas_to_weight(gas_limit, true); + + // Gas limit cannot afford the extra byte and thus is expected to exhaust. + tx.input = vec![0u8; (weight_limit.proof_size() + 1) as usize]; + let tx = tx.sign(&alice.private_key); + + // Execute + assert!( + Ethereum::transact(RawOrigin::EthereumTransaction(alice.address).into(), tx,).is_err() + ); + }); +} diff --git a/frame/evm/precompile/dispatch/src/lib.rs b/frame/evm/precompile/dispatch/src/lib.rs index 6a4d3d46fd..f73349bf31 100644 --- a/frame/evm/precompile/dispatch/src/lib.rs +++ b/frame/evm/precompile/dispatch/src/lib.rs @@ -81,14 +81,28 @@ where return Err(err); } + handle + .record_external_cost(Some(info.weight.ref_time()), Some(info.weight.proof_size()))?; + match call.dispatch(Some(origin).into()) { Ok(post_info) => { if post_info.pays_fee(&info) == Pays::Yes { - let cost = T::GasWeightMapping::weight_to_gas( - post_info.actual_weight.unwrap_or(info.weight), - ); - + let actual_weight = post_info.actual_weight.unwrap_or(info.weight); + let cost = T::GasWeightMapping::weight_to_gas(actual_weight); handle.record_cost(cost)?; + + handle.refund_external_cost( + Some( + info.weight + .ref_time() + .saturating_sub(actual_weight.ref_time()), + ), + Some( + info.weight + .proof_size() + .saturating_sub(actual_weight.proof_size()), + ), + ); } Ok(PrecompileOutput { diff --git a/frame/evm/precompile/dispatch/src/mock.rs b/frame/evm/precompile/dispatch/src/mock.rs index 7f0acfe231..e08be3ff22 100644 --- a/frame/evm/precompile/dispatch/src/mock.rs +++ b/frame/evm/precompile/dispatch/src/mock.rs @@ -159,6 +159,7 @@ impl pallet_evm::Config for Test { type OnChargeTransaction = (); type OnCreate = (); type FindAuthor = FindAuthorTruncated; + type GasLimitPovSizeRatio = (); type Timestamp = Timestamp; type WeightInfo = (); } @@ -185,6 +186,16 @@ impl PrecompileHandle for MockHandle { Ok(()) } + fn record_external_cost( + &mut self, + _ref_time: Option, + _proof_size: Option, + ) -> Result<(), ExitError> { + Ok(()) + } + + fn refund_external_cost(&mut self, _ref_time: Option, _proof_size: Option) {} + fn remaining_gas(&self) -> u64 { unimplemented!() } diff --git a/frame/evm/src/benchmarking.rs b/frame/evm/src/benchmarking.rs index 97b885d22e..bf65792f7f 100644 --- a/frame/evm/src/benchmarking.rs +++ b/frame/evm/src/benchmarking.rs @@ -89,6 +89,8 @@ benchmarks! { Vec::new(), is_transactional, validate, + None, + None, T::config(), ); assert!(create_runner_results.is_ok(), "create() failed"); @@ -124,6 +126,8 @@ benchmarks! { Vec::new(), is_transactional, validate, + None, + None, T::config(), ); assert!(call_runner_results.is_ok(), "call() failed"); diff --git a/frame/evm/src/lib.rs b/frame/evm/src/lib.rs index ca84889b1e..b96eaca1ed 100644 --- a/frame/evm/src/lib.rs +++ b/frame/evm/src/lib.rs @@ -67,7 +67,7 @@ mod tests; pub mod weights; use frame_support::{ - dispatch::{DispatchResultWithPostInfo, Pays, PostDispatchInfo}, + dispatch::{DispatchResultWithPostInfo, MaxEncodedLen, Pays, PostDispatchInfo}, traits::{ tokens::fungible::Inspect, Currency, ExistenceRequirement, FindAuthor, Get, Imbalance, OnUnbalanced, SignedImbalance, Time, WithdrawReasons, @@ -91,9 +91,10 @@ use fp_account::AccountId20; #[cfg(feature = "std")] use fp_evm::GenesisAccount; pub use fp_evm::{ - Account, CallInfo, CreateInfo, ExecutionInfo, FeeCalculator, InvalidEvmTransactionError, - IsPrecompileResult, LinearCostPrecompile, Log, Precompile, PrecompileFailure, PrecompileHandle, - PrecompileOutput, PrecompileResult, PrecompileSet, Vicinity, + Account, CallInfo, CreateInfo, ExecutionInfoV2 as ExecutionInfo, FeeCalculator, + InvalidEvmTransactionError, IsPrecompileResult, LinearCostPrecompile, Log, Precompile, + PrecompileFailure, PrecompileHandle, PrecompileOutput, PrecompileResult, PrecompileSet, + Vicinity, }; pub use self::{ @@ -159,6 +160,9 @@ pub mod pallet { /// Find author for the current block. type FindAuthor: FindAuthor; + /// Gas limit Pov size ratio. + type GasLimitPovSizeRatio: Get; + /// Get the timestamp for the current block. type Timestamp: Time; @@ -228,6 +232,8 @@ pub mod pallet { access_list, is_transactional, validate, + None, + None, T::config(), ) { Ok(info) => info, @@ -252,10 +258,18 @@ pub mod pallet { }; Ok(PostDispatchInfo { - actual_weight: Some(T::GasWeightMapping::gas_to_weight( - info.used_gas.unique_saturated_into(), - true, - )), + actual_weight: { + let mut gas_to_weight = T::GasWeightMapping::gas_to_weight( + info.used_gas.standard.unique_saturated_into(), + true, + ); + if let Some(weight_info) = info.weight_info { + if let Some(proof_size_usage) = weight_info.proof_size_usage { + *gas_to_weight.proof_size_mut() = proof_size_usage; + } + } + Some(gas_to_weight) + }, pays_fee: Pays::No, }) } @@ -293,6 +307,8 @@ pub mod pallet { access_list, is_transactional, validate, + None, + None, T::config(), ) { Ok(info) => info, @@ -329,10 +345,18 @@ pub mod pallet { } Ok(PostDispatchInfo { - actual_weight: Some(T::GasWeightMapping::gas_to_weight( - info.used_gas.unique_saturated_into(), - true, - )), + actual_weight: { + let mut gas_to_weight = T::GasWeightMapping::gas_to_weight( + info.used_gas.standard.unique_saturated_into(), + true, + ); + if let Some(weight_info) = info.weight_info { + if let Some(proof_size_usage) = weight_info.proof_size_usage { + *gas_to_weight.proof_size_mut() = proof_size_usage; + } + } + Some(gas_to_weight) + }, pays_fee: Pays::No, }) } @@ -371,6 +395,8 @@ pub mod pallet { access_list, is_transactional, validate, + None, + None, T::config(), ) { Ok(info) => info, @@ -407,10 +433,18 @@ pub mod pallet { } Ok(PostDispatchInfo { - actual_weight: Some(T::GasWeightMapping::gas_to_weight( - info.used_gas.unique_saturated_into(), - true, - )), + actual_weight: { + let mut gas_to_weight = T::GasWeightMapping::gas_to_weight( + info.used_gas.standard.unique_saturated_into(), + true, + ); + if let Some(weight_info) = info.weight_info { + if let Some(proof_size_usage) = weight_info.proof_size_usage { + *gas_to_weight.proof_size_mut() = proof_size_usage; + } + } + Some(gas_to_weight) + }, pays_fee: Pays::No, }) } @@ -530,7 +564,17 @@ pub type BalanceOf = type NegativeImbalanceOf = ::AccountId>>::NegativeImbalance; -#[derive(Debug, Clone, Copy, Eq, PartialEq, Encode, Decode, TypeInfo)] +#[derive( + Debug, + Clone, + Copy, + Eq, + PartialEq, + Encode, + Decode, + TypeInfo, + MaxEncodedLen +)] pub struct CodeMetadata { pub size: u64, pub hash: H256, @@ -707,6 +751,13 @@ impl GasWeightMapping for FixedGasWeightMapping { .base_extrinsic, ); } + // Apply a gas to proof size ratio based on BlockGasLimit + let ratio = T::GasLimitPovSizeRatio::get(); + if ratio > 0 { + let proof_size = gas.saturating_div(ratio); + *weight.proof_size_mut() = proof_size; + } + weight } fn weight_to_gas(weight: Weight) -> u64 { diff --git a/frame/evm/src/mock.rs b/frame/evm/src/mock.rs index 10df7514b2..88d050a6ce 100644 --- a/frame/evm/src/mock.rs +++ b/frame/evm/src/mock.rs @@ -17,7 +17,6 @@ //! Test mock for unit tests and benchmarking -use fp_evm::{IsPrecompileResult, Precompile}; use frame_support::{ parameter_types, traits::{ConstU32, FindAuthor}, @@ -32,8 +31,8 @@ use sp_runtime::{ use sp_std::{boxed::Box, prelude::*, str::FromStr}; use crate::{ - EnsureAddressNever, EnsureAddressRoot, FeeCalculator, IdentityAddressMapping, PrecompileHandle, - PrecompileResult, PrecompileSet, + EnsureAddressNever, EnsureAddressRoot, FeeCalculator, IdentityAddressMapping, + IsPrecompileResult, Precompile, PrecompileHandle, PrecompileResult, PrecompileSet, }; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; @@ -126,8 +125,12 @@ impl FindAuthor for FindAuthorTruncated { Some(H160::from_str("1234500000000000000000000000000000000000").unwrap()) } } +const BLOCK_GAS_LIMIT: u64 = 150_000_000; +const MAX_POV_SIZE: u64 = 5 * 1024 * 1024; + parameter_types! { - pub BlockGasLimit: U256 = U256::max_value(); + pub BlockGasLimit: U256 = U256::from(BLOCK_GAS_LIMIT); + pub const GasLimitPovSizeRatio: u64 = BLOCK_GAS_LIMIT.saturating_div(MAX_POV_SIZE); pub WeightPerGas: Weight = Weight::from_parts(20_000, 0); pub MockPrecompiles: MockPrecompileSet = MockPrecompileSet; } @@ -152,6 +155,7 @@ impl crate::Config for Test { type OnChargeTransaction = (); type OnCreate = (); type FindAuthor = FindAuthorTruncated; + type GasLimitPovSizeRatio = GasLimitPovSizeRatio; type Timestamp = Timestamp; type WeightInfo = (); } diff --git a/frame/evm/src/res/proof_size_test_callee_contract_bytecode.txt b/frame/evm/src/res/proof_size_test_callee_contract_bytecode.txt new file mode 100644 index 0000000000..40f0a191bc --- /dev/null +++ b/frame/evm/src/res/proof_size_test_callee_contract_bytecode.txt @@ -0,0 +1 @@ +6080604052348015600f57600080fd5b50607480601d6000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c8063ac4c25b214602d575b600080fd5b60336035565b005b6000600190505056fea2646970667358221220eae4df57558ab19ae5b916be34b7789b8e52d806b4680224965e76ab9554d77d64736f6c63430008120033 \ No newline at end of file diff --git a/frame/evm/src/res/proof_size_test_contract_bytecode.txt b/frame/evm/src/res/proof_size_test_contract_bytecode.txt new file mode 100644 index 0000000000..006b2d018e --- /dev/null +++ b/frame/evm/src/res/proof_size_test_contract_bytecode.txt @@ -0,0 +1 @@ +608060405234801561001057600080fd5b506006600081905550610410806100286000396000f3fe608060405234801561001057600080fd5b50600436106100575760003560e01c806335f56c3b1461005c5780634f3080a914610078578063944ddc6214610082578063c6d6f6061461008c578063e27a0ecd146100a8575b600080fd5b61007660048036038101906100719190610265565b6100c6565b005b610080610103565b005b61008a610115565b005b6100a660048036038101906100a191906102d0565b610189565b005b6100b06101ec565b6040516100bd9190610316565b60405180910390f35b60008173ffffffffffffffffffffffffffffffffffffffff1631905060008273ffffffffffffffffffffffffffffffffffffffff16319050505050565b60046000819055506005600081905550565b6000600190505b6001156101865760008160001b604051602001610139919061035c565b6040516020818303038152906040528051906020012060001c905060008173ffffffffffffffffffffffffffffffffffffffff1631905060018361017d91906103a6565b9250505061011c565b50565b8073ffffffffffffffffffffffffffffffffffffffff1663ac4c25b26040518163ffffffff1660e01b8152600401600060405180830381600087803b1580156101d157600080fd5b505af11580156101e5573d6000803e3d6000fd5b5050505050565b6000806000549050600080549050809250505090565b600080fd5b600073ffffffffffffffffffffffffffffffffffffffff82169050919050565b600061023282610207565b9050919050565b61024281610227565b811461024d57600080fd5b50565b60008135905061025f81610239565b92915050565b60006020828403121561027b5761027a610202565b5b600061028984828501610250565b91505092915050565b600061029d82610227565b9050919050565b6102ad81610292565b81146102b857600080fd5b50565b6000813590506102ca816102a4565b92915050565b6000602082840312156102e6576102e5610202565b5b60006102f4848285016102bb565b91505092915050565b6000819050919050565b610310816102fd565b82525050565b600060208201905061032b6000830184610307565b92915050565b6000819050919050565b6000819050919050565b61035661035182610331565b61033b565b82525050565b60006103688284610345565b60208201915081905092915050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b60006103b1826102fd565b91506103bc836102fd565b92508282019050808211156103d4576103d3610377565b5b9291505056fea26469706673582212202182fa69ea8e39cf4dcbadb7f3ccba2544ba38c9be05a5087cd41f59a174e1d164736f6c63430008120033 \ No newline at end of file diff --git a/frame/evm/src/runner/mod.rs b/frame/evm/src/runner/mod.rs index 179f1eae69..b23379690b 100644 --- a/frame/evm/src/runner/mod.rs +++ b/frame/evm/src/runner/mod.rs @@ -17,7 +17,7 @@ pub mod stack; -use crate::Config; +use crate::{Config, Weight}; use fp_evm::{CallInfo, CreateInfo}; use sp_core::{H160, H256, U256}; use sp_std::vec::Vec; @@ -25,7 +25,7 @@ use sp_std::vec::Vec; #[derive(Debug)] pub struct RunnerError> { pub error: E, - pub weight: frame_support::weights::Weight, + pub weight: Weight, } pub trait Runner { @@ -42,6 +42,8 @@ pub trait Runner { nonce: Option, access_list: Vec<(H160, Vec)>, is_transactional: bool, + weight_limit: Option, + transaction_len: Option, evm_config: &evm::Config, ) -> Result<(), RunnerError>; @@ -57,6 +59,8 @@ pub trait Runner { access_list: Vec<(H160, Vec)>, is_transactional: bool, validate: bool, + weight_limit: Option, + transaction_len: Option, config: &evm::Config, ) -> Result>; @@ -71,6 +75,8 @@ pub trait Runner { access_list: Vec<(H160, Vec)>, is_transactional: bool, validate: bool, + weight_limit: Option, + transaction_len: Option, config: &evm::Config, ) -> Result>; @@ -86,6 +92,8 @@ pub trait Runner { access_list: Vec<(H160, Vec)>, is_transactional: bool, validate: bool, + weight_limit: Option, + transaction_len: Option, config: &evm::Config, ) -> Result>; } diff --git a/frame/evm/src/runner/stack.rs b/frame/evm/src/runner/stack.rs index f21a8b1fbd..cdff3369ce 100644 --- a/frame/evm/src/runner/stack.rs +++ b/frame/evm/src/runner/stack.rs @@ -18,18 +18,22 @@ //! EVM stack-based runner. use crate::{ - runner::Runner as RunnerT, AccountCodes, AccountStorages, AddressMapping, BalanceOf, - BlockHashMapping, Config, Error, Event, FeeCalculator, OnChargeEVMTransaction, OnCreate, - Pallet, RunnerError, + runner::Runner as RunnerT, AccountCodes, AccountCodesMetadata, AccountStorages, AddressMapping, + BalanceOf, BlockHashMapping, Config, Error, Event, FeeCalculator, OnChargeEVMTransaction, + OnCreate, Pallet, RunnerError, Weight, }; use evm::{ backend::Backend as BackendT, executor::stack::{Accessed, StackExecutor, StackState as StackStateT, StackSubstateMetadata}, - ExitError, ExitReason, Transfer, + gasometer::{GasCost, StorageTarget}, + ExitError, ExitReason, Opcode, Transfer, }; use fp_evm::{ - CallInfo, CreateInfo, ExecutionInfo, IsPrecompileResult, Log, PrecompileSet, Vicinity, + AccessedStorage, CallInfo, CreateInfo, ExecutionInfoV2, IsPrecompileResult, Log, PrecompileSet, + Vicinity, WeightInfo, ACCOUNT_BASIC_PROOF_SIZE, ACCOUNT_CODES_METADATA_PROOF_SIZE, + ACCOUNT_STORAGE_PROOF_SIZE, IS_EMPTY_CHECK_PROOF_SIZE, WRITE_PROOF_SIZE, }; + use frame_support::traits::{Currency, ExistenceRequirement, Get, Time}; use sp_core::{H160, H256, U256}; use sp_runtime::traits::UniqueSaturatedInto; @@ -64,8 +68,10 @@ where config: &'config evm::Config, precompiles: &'precompiles T::PrecompilesType, is_transactional: bool, + weight_limit: Option, + proof_size_base_cost: Option, f: F, - ) -> Result, RunnerError>> + ) -> Result, RunnerError>> where F: FnOnce( &mut StackExecutor< @@ -99,6 +105,8 @@ where f, base_fee, weight, + weight_limit, + proof_size_base_cost, ); // Set IN_EVM to false @@ -121,8 +129,10 @@ where is_transactional: bool, f: F, base_fee: U256, - weight: crate::Weight, - ) -> Result, RunnerError>> + weight: Weight, + weight_limit: Option, + proof_size_base_cost: Option, + ) -> Result, RunnerError>> where F: FnOnce( &mut StackExecutor< @@ -134,21 +144,29 @@ where ) -> (ExitReason, R), R: Default, { + // Used to record the external costs in the evm through the StackState implementation + let maybe_weight_info = + WeightInfo::new_from_weight_limit(weight_limit, proof_size_base_cost).map_err( + |_| RunnerError { + error: Error::::Undefined, + weight, + }, + )?; // The precompile check is only used for transactional invocations. However, here we always // execute the check, because the check has side effects. - let is_precompile = match precompiles.is_precompile(source, gas_limit) { - IsPrecompileResult::Answer { - is_precompile, - extra_cost, - } => { + match precompiles.is_precompile(source, gas_limit) { + IsPrecompileResult::Answer { extra_cost, .. } => { gas_limit = gas_limit.saturating_sub(extra_cost); - is_precompile } IsPrecompileResult::OutOfGas => { - return Ok(ExecutionInfo { + return Ok(ExecutionInfoV2 { exit_reason: ExitError::OutOfGas.into(), value: Default::default(), - used_gas: gas_limit.into(), + used_gas: fp_evm::UsedGas { + standard: gas_limit.into(), + effective: gas_limit.into(), + }, + weight_info: maybe_weight_info, logs: Default::default(), }) } @@ -160,12 +178,7 @@ where // // EIP-3607: https://eips.ethereum.org/EIPS/eip-3607 // Do not allow transactions for which `tx.sender` has any code deployed. - // - // We extend the principle of this EIP to also prevent `tx.sender` to be the address - // of a precompile. While mainnet Ethereum currently only has stateless precompiles, - // projects using Frontier can have stateful precompiles that can manage funds or - // which calls other contracts that expects this precompile address to be trustworthy. - if is_transactional && (!>::get(source).is_empty() || is_precompile) { + if is_transactional && !>::get(source).is_empty() { return Err(RunnerError { error: Error::::TransactionMustComeFromEOA, weight, @@ -222,14 +235,25 @@ where }; let metadata = StackSubstateMetadata::new(gas_limit, config); - let state = SubstrateStackState::new(&vicinity, metadata); + let state = SubstrateStackState::new(&vicinity, metadata, maybe_weight_info); let mut executor = StackExecutor::new_with_precompiles(state, config, precompiles); let (reason, retv) = f(&mut executor); // Post execution. - let used_gas = U256::from(executor.used_gas()); - let actual_fee = executor.fee(total_fee_per_gas); + let used_gas = executor.used_gas(); + let effective_gas = match executor.state().weight_info() { + Some(weight_info) => U256::from(sp_std::cmp::max( + used_gas, + weight_info + .proof_size_usage + .unwrap_or_default() + .saturating_mul(T::GasLimitPovSizeRatio::get()), + )), + _ => used_gas.into(), + }; + let actual_fee = effective_gas.saturating_mul(total_fee_per_gas); + log::debug!( target: "evm", "Execution {:?} [source: {:?}, value: {}, gas_limit: {}, actual_fee: {}, is_transactional: {}]", @@ -274,13 +298,13 @@ where let state = executor.into_state(); - for address in state.substate.deletes { + for address in &state.substate.deletes { log::debug!( target: "evm", "Deleting account at {:?}", address ); - Pallet::::remove_account(&address) + Pallet::::remove_account(address) } for log in &state.substate.logs { @@ -302,10 +326,14 @@ where }); } - Ok(ExecutionInfo { + Ok(ExecutionInfoV2 { value: retv, exit_reason: reason, - used_gas, + used_gas: fp_evm::UsedGas { + standard: used_gas.into(), + effective: effective_gas, + }, + weight_info: state.weight_info(), logs: state.substate.logs, }) } @@ -328,6 +356,8 @@ where nonce: Option, access_list: Vec<(H160, Vec)>, is_transactional: bool, + weight_limit: Option, + proof_size_base_cost: Option, evm_config: &evm::Config, ) -> Result<(), RunnerError> { let (base_fee, mut weight) = T::FeeCalculator::min_gas_price(); @@ -354,6 +384,8 @@ where value, access_list, }, + weight_limit, + proof_size_base_cost, ) .validate_in_block_for(&source_account) .and_then(|v| v.with_base_fee()) @@ -374,6 +406,8 @@ where access_list: Vec<(H160, Vec)>, is_transactional: bool, validate: bool, + weight_limit: Option, + proof_size_base_cost: Option, config: &evm::Config, ) -> Result> { if validate { @@ -388,6 +422,8 @@ where nonce, access_list.clone(), is_transactional, + weight_limit, + proof_size_base_cost, config, )?; } @@ -401,6 +437,8 @@ where config, &precompiles, is_transactional, + weight_limit, + proof_size_base_cost, |executor| executor.transact_call(source, target, value, input, gas_limit, access_list), ) } @@ -416,6 +454,8 @@ where access_list: Vec<(H160, Vec)>, is_transactional: bool, validate: bool, + weight_limit: Option, + proof_size_base_cost: Option, config: &evm::Config, ) -> Result> { if validate { @@ -430,6 +470,8 @@ where nonce, access_list.clone(), is_transactional, + weight_limit, + proof_size_base_cost, config, )?; } @@ -443,6 +485,8 @@ where config, &precompiles, is_transactional, + weight_limit, + proof_size_base_cost, |executor| { let address = executor.create_address(evm::CreateScheme::Legacy { caller: source }); T::OnCreate::on_create(source, address); @@ -465,6 +509,8 @@ where access_list: Vec<(H160, Vec)>, is_transactional: bool, validate: bool, + weight_limit: Option, + proof_size_base_cost: Option, config: &evm::Config, ) -> Result> { if validate { @@ -479,6 +525,8 @@ where nonce, access_list.clone(), is_transactional, + weight_limit, + proof_size_base_cost, config, )?; } @@ -493,6 +541,8 @@ where config, &precompiles, is_transactional, + weight_limit, + proof_size_base_cost, |executor| { let address = executor.create_address(evm::CreateScheme::Create2 { caller: source, @@ -605,17 +655,29 @@ impl<'config> SubstrateStackSubstate<'config> { } } +#[derive(Default, Clone, Eq, PartialEq)] +pub struct Recorded { + account_codes: sp_std::vec::Vec, + account_storages: BTreeMap<(H160, H256), bool>, +} + /// Substrate backend for EVM. pub struct SubstrateStackState<'vicinity, 'config, T> { vicinity: &'vicinity Vicinity, substate: SubstrateStackSubstate<'config>, original_storage: BTreeMap<(H160, H256), H256>, + recorded: Recorded, + weight_info: Option, _marker: PhantomData, } impl<'vicinity, 'config, T: Config> SubstrateStackState<'vicinity, 'config, T> { /// Create a new backend with given vicinity. - pub fn new(vicinity: &'vicinity Vicinity, metadata: StackSubstateMetadata<'config>) -> Self { + pub fn new( + vicinity: &'vicinity Vicinity, + metadata: StackSubstateMetadata<'config>, + weight_info: Option, + ) -> Self { Self { vicinity, substate: SubstrateStackSubstate { @@ -626,11 +688,28 @@ impl<'vicinity, 'config, T: Config> SubstrateStackState<'vicinity, 'config, T> { }, _marker: PhantomData, original_storage: BTreeMap::new(), + recorded: Default::default(), + weight_info, } } + + pub fn weight_info(&self) -> Option { + self.weight_info + } + + pub fn recorded(&self) -> &Recorded { + &self.recorded + } + + pub fn info_mut(&mut self) -> (&mut Option, &mut Recorded) { + (&mut self.weight_info, &mut self.recorded) + } } -impl<'vicinity, 'config, T: Config> BackendT for SubstrateStackState<'vicinity, 'config, T> { +impl<'vicinity, 'config, T: Config> BackendT for SubstrateStackState<'vicinity, 'config, T> +where + BalanceOf: TryFrom + Into, +{ fn gas_price(&self) -> U256 { self.vicinity.gas_price } @@ -689,6 +768,61 @@ impl<'vicinity, 'config, T: Config> BackendT for SubstrateStackState<'vicinity, } } + fn record_external_operation(&mut self, op: evm::ExternalOperation) -> Result<(), ExitError> { + let size_limit: u64 = self + .metadata() + .gasometer() + .config() + .create_contract_limit + .unwrap_or_default() as u64; + + let (weight_info, recorded) = self.info_mut(); + + if let Some(weight_info) = weight_info { + match op { + evm::ExternalOperation::AccountBasicRead => { + weight_info.try_record_proof_size_or_fail(ACCOUNT_BASIC_PROOF_SIZE)? + } + evm::ExternalOperation::AddressCodeRead(address) => { + let maybe_record = !recorded.account_codes.contains(&address); + // Skip if the address has been already recorded this block + if maybe_record { + // First we record account emptiness check. + // Transfers to EOAs with standard 21_000 gas limit are able to + // pay for this pov size. + weight_info.try_record_proof_size_or_fail(IS_EMPTY_CHECK_PROOF_SIZE)?; + + if >::decode_len(address).unwrap_or(0) == 0 { + return Ok(()); + } + // Try to record fixed sized `AccountCodesMetadata` read + // Tentatively 16 + 20 + 40 + weight_info + .try_record_proof_size_or_fail(ACCOUNT_CODES_METADATA_PROOF_SIZE)?; + if let Some(meta) = >::get(address) { + weight_info.try_record_proof_size_or_fail(meta.size)?; + } else { + // If it does not exist, try to record `create_contract_limit` first. + weight_info.try_record_proof_size_or_fail(size_limit)?; + let meta = Pallet::::account_code_metadata(address); + let actual_size = meta.size; + // Refund if applies + weight_info.refund_proof_size(size_limit.saturating_sub(actual_size)); + } + recorded.account_codes.push(address); + } + } + evm::ExternalOperation::IsEmpty => { + weight_info.try_record_proof_size_or_fail(IS_EMPTY_CHECK_PROOF_SIZE)? + } + evm::ExternalOperation::Write => { + weight_info.try_record_proof_size_or_fail(WRITE_PROOF_SIZE)? + } + }; + } + Ok(()) + } + fn code(&self, address: H160) -> Vec { >::get(address) } @@ -698,8 +832,6 @@ impl<'vicinity, 'config, T: Config> BackendT for SubstrateStackState<'vicinity, } fn original_storage(&self, address: H160, index: H256) -> Option { - // Not being cached means that it was never changed, which means we - // can fetch it from storage. Some( self.original_storage .get(&(address, index)) @@ -816,7 +948,6 @@ where fn transfer(&mut self, transfer: Transfer) -> Result<(), ExitError> { let source = T::AddressMapping::into_account_id(transfer.source); let target = T::AddressMapping::into_account_id(transfer.target); - T::Currency::transfer( &source, &target, @@ -862,6 +993,202 @@ where fn code_hash(&self, address: H160) -> H256 { >::account_code_metadata(address).hash } + + fn record_external_dynamic_opcode_cost( + &mut self, + opcode: Opcode, + _gas_cost: GasCost, + target: evm::gasometer::StorageTarget, + ) -> Result<(), ExitError> { + // If account code or storage slot is in the overlay it is already accounted for and early exit + let mut accessed_storage: Option = match target { + StorageTarget::Address(address) => { + if self.recorded().account_codes.contains(&address) { + return Ok(()); + } else { + Some(AccessedStorage::AccountCodes(address)) + } + } + StorageTarget::Slot(address, index) => { + if self + .recorded() + .account_storages + .contains_key(&(address, index)) + { + return Ok(()); + } else { + Some(AccessedStorage::AccountStorages((address, index))) + } + } + _ => None, + }; + + let size_limit: u64 = self + .metadata() + .gasometer() + .config() + .create_contract_limit + .unwrap_or_default() as u64; + + let (weight_info, recorded) = { + let (weight_info, recorded) = self.info_mut(); + if let Some(weight_info) = weight_info { + (weight_info, recorded) + } else { + return Ok(()); + } + }; + + // Record ref_time first + // TODO benchmark opcodes, until this is done we do used_gas to weight conversion for ref_time + + // Record proof_size + // Return if proof size recording is disabled + let proof_size_limit = if let Some(proof_size_limit) = weight_info.proof_size_limit { + proof_size_limit + } else { + return Ok(()); + }; + + let mut maybe_record_and_refund = |with_empty_check: bool| -> Result<(), ExitError> { + let address = if let Some(AccessedStorage::AccountCodes(address)) = accessed_storage { + address + } else { + // This must be unreachable, a valid target must be set. + // TODO decide how do we want to gracefully handle. + return Err(ExitError::OutOfGas); + }; + // First try to record fixed sized `AccountCodesMetadata` read + // Tentatively 20 + 8 + 32 + let mut base_cost = ACCOUNT_CODES_METADATA_PROOF_SIZE; + if with_empty_check { + base_cost = base_cost.saturating_add(IS_EMPTY_CHECK_PROOF_SIZE); + } + weight_info.try_record_proof_size_or_fail(base_cost)?; + if let Some(meta) = >::get(address) { + weight_info.try_record_proof_size_or_fail(meta.size)?; + } else { + // If it does not exist, try to record `create_contract_limit` first. + weight_info.try_record_proof_size_or_fail(size_limit)?; + let meta = Pallet::::account_code_metadata(address); + let actual_size = meta.size; + // Refund if applies + weight_info.refund_proof_size(size_limit.saturating_sub(actual_size)); + } + recorded.account_codes.push(address); + // Already recorded, return + Ok(()) + }; + + // Proof size is fixed length for writes (a 32-byte hash in a merkle trie), and + // the full key/value for reads. For read and writes over the same storage, the full value + // is included. + // For cold reads involving code (call, callcode, staticcall and delegatecall): + // - We depend on https://github.com/paritytech/frontier/pull/893 + // - Try to get the cached size or compute it on the fly + // - We record the actual size after caching, refunding the difference between it and the initially deducted + // contract size limit. + let opcode_proof_size = match opcode { + // Basic account fixed length + Opcode::BALANCE => { + accessed_storage = None; + U256::from(ACCOUNT_BASIC_PROOF_SIZE) + } + Opcode::EXTCODESIZE | Opcode::EXTCODECOPY | Opcode::EXTCODEHASH => { + return maybe_record_and_refund(false) + } + Opcode::CALLCODE | Opcode::CALL | Opcode::DELEGATECALL | Opcode::STATICCALL => { + return maybe_record_and_refund(true) + } + // (H160, H256) double map blake2 128 concat key size (68) + value 32 + Opcode::SLOAD => U256::from(ACCOUNT_STORAGE_PROOF_SIZE), + Opcode::SSTORE => { + let (address, index) = + if let Some(AccessedStorage::AccountStorages((address, index))) = + accessed_storage + { + (address, index) + } else { + // This must be unreachable, a valid target must be set. + // TODO decide how do we want to gracefully handle. + return Err(ExitError::OutOfGas); + }; + let mut cost = WRITE_PROOF_SIZE; + let maybe_record = !recorded.account_storages.contains_key(&(address, index)); + // If the slot is yet to be accessed we charge for it, as the evm reads + // it prior to the opcode execution. + // Skip if the address and index has been already recorded this block. + if maybe_record { + cost = cost.saturating_add(ACCOUNT_STORAGE_PROOF_SIZE); + } + U256::from(cost) + } + // Fixed trie 32 byte hash + Opcode::CREATE | Opcode::CREATE2 => U256::from(WRITE_PROOF_SIZE), + // When calling SUICIDE a target account will receive the self destructing + // address's balance. We need to account for both: + // - Target basic account read + // - 5 bytes of `decode_len` + Opcode::SUICIDE => { + accessed_storage = None; + U256::from(IS_EMPTY_CHECK_PROOF_SIZE) + } + // Rest of dynamic opcodes that do not involve proof size recording, do nothing + _ => return Ok(()), + }; + + if opcode_proof_size > U256::from(u64::MAX) { + weight_info.try_record_proof_size_or_fail(proof_size_limit)?; + return Err(ExitError::OutOfGas); + } + + // Cache the storage access + match accessed_storage { + Some(AccessedStorage::AccountStorages((address, index))) => { + recorded.account_storages.insert((address, index), true); + } + Some(AccessedStorage::AccountCodes(address)) => { + recorded.account_codes.push(address); + } + _ => {} + } + + // Record cost + self.record_external_cost(None, Some(opcode_proof_size.low_u64()))?; + Ok(()) + } + + fn record_external_cost( + &mut self, + ref_time: Option, + proof_size: Option, + ) -> Result<(), ExitError> { + let weight_info = if let (Some(weight_info), _) = self.info_mut() { + weight_info + } else { + return Ok(()); + }; + // Record ref_time first + // TODO benchmark opcodes, until this is done we do used_gas to weight conversion for ref_time + if let Some(amount) = ref_time { + weight_info.try_record_ref_time_or_fail(amount)?; + } + if let Some(amount) = proof_size { + weight_info.try_record_proof_size_or_fail(amount)?; + } + Ok(()) + } + + fn refund_external_cost(&mut self, ref_time: Option, proof_size: Option) { + if let Some(mut weight_info) = self.weight_info { + if let Some(amount) = ref_time { + weight_info.refund_ref_time(amount); + } + if let Some(amount) = proof_size { + weight_info.refund_proof_size(amount); + } + } + } } #[cfg(feature = "forbid-evm-reentrancy")] @@ -910,7 +1237,7 @@ mod tests { ); assert_matches!( res, - Ok(ExecutionInfo { + Ok(ExecutionInfoV2 { exit_reason: ExitReason::Error(ExitError::CallTooDeep), .. }) diff --git a/frame/evm/src/tests.rs b/frame/evm/src/tests.rs index bb09d665db..bb1850708c 100644 --- a/frame/evm/src/tests.rs +++ b/frame/evm/src/tests.rs @@ -26,6 +26,604 @@ use frame_support::{ }; use std::{collections::BTreeMap, str::FromStr}; +mod proof_size_test { + use super::*; + use fp_evm::{ + CreateInfo, ACCOUNT_BASIC_PROOF_SIZE, ACCOUNT_CODES_METADATA_PROOF_SIZE, + ACCOUNT_STORAGE_PROOF_SIZE, IS_EMPTY_CHECK_PROOF_SIZE, WRITE_PROOF_SIZE, + }; + use frame_support::traits::StorageInfoTrait; + // pragma solidity ^0.8.2; + // contract Callee { + // // ac4c25b2 + // function void() public { + // uint256 foo = 1; + // } + // } + pub const PROOF_SIZE_TEST_CALLEE_CONTRACT_BYTECODE: &str = + include_str!("./res/proof_size_test_callee_contract_bytecode.txt"); + // pragma solidity ^0.8.2; + // contract ProofSizeTest { + // uint256 foo; + // constructor() { + // foo = 6; + // } + // // 35f56c3b + // function test_balance(address who) public { + // // cold + // uint256 a = address(who).balance; + // // warm + // uint256 b = address(who).balance; + // } + // // e27a0ecd + // function test_sload() public returns (uint256) { + // // cold + // uint256 a = foo; + // // warm + // uint256 b = foo; + // return b; + // } + // // 4f3080a9 + // function test_sstore() public { + // // cold + // foo = 4; + // // warm + // foo = 5; + // } + // // c6d6f606 + // function test_call(Callee _callee) public { + // _callee.void(); + // } + // // 944ddc62 + // function test_oog() public { + // uint256 i = 1; + // while(true) { + // address who = address(uint160(uint256(keccak256(abi.encodePacked(bytes32(i)))))); + // uint256 a = address(who).balance; + // i = i + 1; + // } + // } + // } + pub const PROOF_SIZE_TEST_CONTRACT_BYTECODE: &str = + include_str!("./res/proof_size_test_contract_bytecode.txt"); + + fn create_proof_size_test_callee_contract( + gas_limit: u64, + weight_limit: Option, + ) -> Result>> { + ::Runner::create( + H160::default(), + hex::decode(PROOF_SIZE_TEST_CALLEE_CONTRACT_BYTECODE.trim_end()).unwrap(), + U256::zero(), + gas_limit, + Some(FixedGasPrice::min_gas_price().0), + None, + None, + Vec::new(), + true, // transactional + true, // must be validated + weight_limit, + Some(0), + &::config().clone(), + ) + } + + fn create_proof_size_test_contract( + gas_limit: u64, + weight_limit: Option, + ) -> Result>> { + ::Runner::create( + H160::default(), + hex::decode(PROOF_SIZE_TEST_CONTRACT_BYTECODE.trim_end()).unwrap(), + U256::zero(), + gas_limit, + Some(FixedGasPrice::min_gas_price().0), + None, + None, + Vec::new(), + true, // non-transactional + true, // must be validated + weight_limit, + Some(0), + &::config().clone(), + ) + } + + #[test] + fn account_basic_proof_size_constant_matches() { + assert_eq!( + ACCOUNT_BASIC_PROOF_SIZE, + frame_system::Account::::storage_info() + .get(0) + .expect("item") + .max_size + .expect("size") as u64 + ); + } + + #[test] + fn account_storage_proof_size_constant_matches() { + assert_eq!( + ACCOUNT_STORAGE_PROOF_SIZE, + AccountStorages::::storage_info() + .get(0) + .expect("item") + .max_size + .expect("size") as u64 + ); + } + + #[test] + fn account_codes_metadata_proof_size_constant_matches() { + assert_eq!( + ACCOUNT_CODES_METADATA_PROOF_SIZE, + AccountCodesMetadata::::storage_info() + .get(0) + .expect("item") + .max_size + .expect("size") as u64 + ); + } + + #[test] + fn proof_size_create_accounting_works() { + new_test_ext().execute_with(|| { + let gas_limit: u64 = 1_000_000; + let weight_limit = FixedGasWeightMapping::::gas_to_weight(gas_limit, true); + + let result = create_proof_size_test_callee_contract(gas_limit, Some(weight_limit)) + .expect("create succeeds"); + + // Creating a new contract does not involve reading the code from storage. + // We account for a fixed hash proof size write, an empty check and . + let write_cost = WRITE_PROOF_SIZE; + let is_empty_check = IS_EMPTY_CHECK_PROOF_SIZE; + let nonce_increases = ACCOUNT_BASIC_PROOF_SIZE * 2; + let expected_proof_size = write_cost + is_empty_check + nonce_increases; + + let actual_proof_size = result + .weight_info + .expect("weight info") + .proof_size_usage + .expect("proof size usage"); + + assert_eq!(expected_proof_size, actual_proof_size); + }); + } + + #[test] + fn proof_size_subcall_accounting_works() { + new_test_ext().execute_with(|| { + // Create callee contract A + let gas_limit: u64 = 1_000_000; + let weight_limit = FixedGasWeightMapping::::gas_to_weight(gas_limit, true); + let result = + create_proof_size_test_callee_contract(gas_limit, None).expect("create succeeds"); + + let subcall_contract_address = result.value; + + // Create proof size test contract B + let result = create_proof_size_test_contract(gas_limit, None).expect("create succeeds"); + + let call_contract_address = result.value; + + // Call B, that calls A, with weight limit + // selector for ProofSizeTest::test_call function.. + let mut call_data: String = "c6d6f606000000000000000000000000".to_owned(); + // ..encode the callee address argument + call_data.push_str(&format!("{:x}", subcall_contract_address)); + + let result = ::Runner::call( + H160::default(), + call_contract_address, + hex::decode(&call_data).unwrap(), + U256::zero(), + gas_limit, + Some(FixedGasPrice::min_gas_price().0), + None, + None, + Vec::new(), + true, // transactional + true, // must be validated + Some(weight_limit), + Some(0), + &::config().clone(), + ) + .expect("call succeeds"); + + // Expected proof size + let reading_main_contract_len = AccountCodes::::get(call_contract_address).len(); + let reading_contract_len = AccountCodes::::get(subcall_contract_address).len(); + let read_account_metadata = ACCOUNT_CODES_METADATA_PROOF_SIZE as usize; + let is_empty_check = (IS_EMPTY_CHECK_PROOF_SIZE * 2) as usize; + let increase_nonce = (ACCOUNT_BASIC_PROOF_SIZE * 3) as usize; + let expected_proof_size = ((read_account_metadata * 2) + + reading_contract_len + + reading_main_contract_len + + is_empty_check + increase_nonce) as u64; + + let actual_proof_size = result + .weight_info + .expect("weight info") + .proof_size_usage + .expect("proof size usage"); + + assert_eq!(expected_proof_size, actual_proof_size); + }); + } + + #[test] + fn proof_size_balance_accounting_works() { + new_test_ext().execute_with(|| { + let gas_limit: u64 = 1_000_000; + let weight_limit = FixedGasWeightMapping::::gas_to_weight(gas_limit, true); + + // Create proof size test contract + let result = create_proof_size_test_contract(gas_limit, None).expect("create succeeds"); + + let call_contract_address = result.value; + + // selector for ProofSizeTest::balance function.. + let mut call_data: String = "35f56c3b000000000000000000000000".to_owned(); + // ..encode bobs address + call_data.push_str(&format!("{:x}", H160::random())); + + let result = ::Runner::call( + H160::default(), + call_contract_address, + hex::decode(&call_data).unwrap(), + U256::zero(), + gas_limit, + Some(FixedGasPrice::min_gas_price().0), + None, + None, + Vec::new(), + true, // transactional + true, // must be validated + Some(weight_limit), + Some(0), + &::config().clone(), + ) + .expect("call succeeds"); + + // - Three account reads. + // - Main contract code read. + // - One metadata read. + let basic_account_size = (ACCOUNT_BASIC_PROOF_SIZE * 3) as usize; + let read_account_metadata = ACCOUNT_CODES_METADATA_PROOF_SIZE as usize; + let is_empty_check = IS_EMPTY_CHECK_PROOF_SIZE as usize; + let increase_nonce = ACCOUNT_BASIC_PROOF_SIZE as usize; + let reading_main_contract_len = AccountCodes::::get(call_contract_address).len(); + let expected_proof_size = (basic_account_size + + read_account_metadata + + reading_main_contract_len + + is_empty_check + increase_nonce) as u64; + + let actual_proof_size = result + .weight_info + .expect("weight info") + .proof_size_usage + .expect("proof size usage"); + + assert_eq!(expected_proof_size, actual_proof_size); + }); + } + + #[test] + fn proof_size_sload_accounting_works() { + new_test_ext().execute_with(|| { + let gas_limit: u64 = 1_000_000; + let weight_limit = FixedGasWeightMapping::::gas_to_weight(gas_limit, true); + + // Create proof size test contract + let result = create_proof_size_test_contract(gas_limit, None).expect("create succeeds"); + + let call_contract_address = result.value; + + // selector for ProofSizeTest::test_sload function.. + let call_data: String = "e27a0ecd".to_owned(); + let result = ::Runner::call( + H160::default(), + call_contract_address, + hex::decode(&call_data).unwrap(), + U256::zero(), + gas_limit, + Some(FixedGasPrice::min_gas_price().0), + None, + None, + Vec::new(), + true, // transactional + true, // must be validated + Some(weight_limit), + Some(0), + &::config().clone(), + ) + .expect("call succeeds"); + + let reading_main_contract_len = + AccountCodes::::get(call_contract_address).len() as u64; + let expected_proof_size = reading_main_contract_len + + ACCOUNT_STORAGE_PROOF_SIZE + + ACCOUNT_CODES_METADATA_PROOF_SIZE + + IS_EMPTY_CHECK_PROOF_SIZE + + (ACCOUNT_BASIC_PROOF_SIZE * 2); + + let actual_proof_size = result + .weight_info + .expect("weight info") + .proof_size_usage + .expect("proof size usage"); + + assert_eq!(expected_proof_size, actual_proof_size); + }); + } + + #[test] + fn proof_size_sstore_accounting_works() { + new_test_ext().execute_with(|| { + let gas_limit: u64 = 1_000_000; + let weight_limit = FixedGasWeightMapping::::gas_to_weight(gas_limit, true); + + // Create proof size test contract + let result = create_proof_size_test_contract(gas_limit, None).expect("create succeeds"); + + let call_contract_address = result.value; + + // selector for ProofSizeTest::test_sstore function.. + let call_data: String = "4f3080a9".to_owned(); + let result = ::Runner::call( + H160::default(), + call_contract_address, + hex::decode(&call_data).unwrap(), + U256::zero(), + gas_limit, + Some(FixedGasPrice::min_gas_price().0), + None, + None, + Vec::new(), + true, // transactional + true, // must be validated + Some(weight_limit), + Some(0), + &::config().clone(), + ) + .expect("call succeeds"); + + let reading_main_contract_len = + AccountCodes::::get(call_contract_address).len() as u64; + let expected_proof_size = reading_main_contract_len + + WRITE_PROOF_SIZE + + ACCOUNT_CODES_METADATA_PROOF_SIZE + + ACCOUNT_STORAGE_PROOF_SIZE + + IS_EMPTY_CHECK_PROOF_SIZE + + (ACCOUNT_BASIC_PROOF_SIZE * 2); + + let actual_proof_size = result + .weight_info + .expect("weight info") + .proof_size_usage + .expect("proof size usage"); + + assert_eq!(expected_proof_size, actual_proof_size); + }); + } + + #[test] + fn proof_size_oog_works() { + new_test_ext().execute_with(|| { + let gas_limit: u64 = 1_000_000; + let mut weight_limit = FixedGasWeightMapping::::gas_to_weight(gas_limit, true); + + // Artifically set a lower proof size limit so we OOG this instead gas. + *weight_limit.proof_size_mut() = weight_limit.proof_size() / 2; + + // Create proof size test contract + let result = create_proof_size_test_contract(gas_limit, None).expect("create succeeds"); + + let call_contract_address = result.value; + + // selector for ProofSizeTest::test_oog function.. + let call_data: String = "944ddc62".to_owned(); + let result = ::Runner::call( + H160::default(), + call_contract_address, + hex::decode(&call_data).unwrap(), + U256::zero(), + gas_limit, + Some(FixedGasPrice::min_gas_price().0), + None, + None, + Vec::new(), + true, // transactional + true, // must be validated + Some(weight_limit), + Some(0), + &::config().clone(), + ) + .expect("call succeeds"); + + // Find how many random balance reads can we do with the available proof size. + let reading_main_contract_len = + AccountCodes::::get(call_contract_address).len() as u64; + let overhead = reading_main_contract_len + + ACCOUNT_CODES_METADATA_PROOF_SIZE + + IS_EMPTY_CHECK_PROOF_SIZE; + let available_proof_size = weight_limit.proof_size() - overhead; + let number_balance_reads = + available_proof_size.saturating_div(ACCOUNT_BASIC_PROOF_SIZE); + // The actual proof size consumed by those balance reads. + let expected_proof_size = + overhead + (number_balance_reads * ACCOUNT_BASIC_PROOF_SIZE) as u64; + + let actual_proof_size = result + .weight_info + .expect("weight info") + .proof_size_usage + .expect("proof size usage"); + + assert_eq!(expected_proof_size, actual_proof_size); + }); + } + + #[test] + fn uncached_account_code_proof_size_accounting_works() { + new_test_ext().execute_with(|| { + // Create callee contract A + let gas_limit: u64 = 1_000_000; + let weight_limit = FixedGasWeightMapping::::gas_to_weight(gas_limit, true); + let result = + create_proof_size_test_callee_contract(gas_limit, None).expect("create succeeds"); + + let subcall_contract_address = result.value; + + // Expect callee contract code hash and size to be cached + let _ = >::get(subcall_contract_address) + .expect("contract code hash and size are cached"); + + // Remove callee cache + >::remove(subcall_contract_address); + + // Create proof size test contract B + let result = create_proof_size_test_contract(gas_limit, None).expect("create succeeds"); + + let call_contract_address = result.value; + + // Call B, that calls A, with weight limit + // selector for ProofSizeTest::test_call function.. + let mut call_data: String = "c6d6f606000000000000000000000000".to_owned(); + // ..encode the callee address argument + call_data.push_str(&format!("{:x}", subcall_contract_address)); + let result = ::Runner::call( + H160::default(), + call_contract_address, + hex::decode(&call_data).unwrap(), + U256::zero(), + gas_limit, + Some(FixedGasPrice::min_gas_price().0), + None, + None, + Vec::new(), + true, // transactional + true, // must be validated + Some(weight_limit), + Some(0), + &::config().clone(), + ) + .expect("call succeeds"); + + // Expected proof size + let read_account_metadata = ACCOUNT_CODES_METADATA_PROOF_SIZE as usize; + let is_empty_check = (IS_EMPTY_CHECK_PROOF_SIZE * 2) as usize; + let increase_nonce = (ACCOUNT_BASIC_PROOF_SIZE * 3) as usize; + let reading_main_contract_len = AccountCodes::::get(call_contract_address).len(); + let reading_callee_contract_len = + AccountCodes::::get(subcall_contract_address).len(); + // In order to do the subcall, we need to check metadata 3 times - + // one for each contract + one for the call opcode -, load two bytecodes - caller and callee. + let expected_proof_size = ((read_account_metadata * 2) + + reading_callee_contract_len + + reading_main_contract_len + + is_empty_check + increase_nonce) as u64; + + let actual_proof_size = result + .weight_info + .expect("weight info") + .proof_size_usage + .expect("proof size usage"); + + assert_eq!(expected_proof_size, actual_proof_size); + }); + } + + #[test] + fn proof_size_breaks_standard_transfer() { + new_test_ext().execute_with(|| { + // In this test we do a simple transfer to an address with an stored code which is + // greater in size (and thus load cost) than the transfer flat fee of 21_000. + + // We assert that providing 21_000 gas limit will not work, because the pov size limit + // will OOG. + let fake_contract_address = H160::random(); + let config = ::config().clone(); + let fake_contract_code = vec![0; config.create_contract_limit.expect("a value")]; + AccountCodes::::insert(fake_contract_address, fake_contract_code); + + let gas_limit: u64 = 21_000; + let weight_limit = FixedGasWeightMapping::::gas_to_weight(gas_limit, true); + + let result = ::Runner::call( + H160::default(), + fake_contract_address, + Vec::new(), + U256::from(777), + gas_limit, + Some(FixedGasPrice::min_gas_price().0), + None, + None, + Vec::new(), + true, // transactional + true, // must be validated + Some(weight_limit), + Some(0), + &config, + ) + .expect("call succeeds"); + + assert_eq!( + result.exit_reason, + crate::ExitReason::Error(crate::ExitError::OutOfGas) + ); + }); + } + + #[test] + fn proof_size_based_refunding_works() { + new_test_ext().execute_with(|| { + // In this test we do a simple transfer to an address with an stored code which is + // greater in size (and thus load cost) than the transfer flat fee of 21_000. + + // Assert that if we provide enough gas limit, the refund will be based on the pov + // size consumption, not the 21_000 gas. + let fake_contract_address = H160::random(); + let config = ::config().clone(); + let fake_contract_code = vec![0; config.create_contract_limit.expect("a value")]; + AccountCodes::::insert(fake_contract_address, fake_contract_code); + + let gas_limit: u64 = 700_000; + let weight_limit = FixedGasWeightMapping::::gas_to_weight(gas_limit, true); + + let result = ::Runner::call( + H160::default(), + fake_contract_address, + Vec::new(), + U256::from(777), + gas_limit, + Some(FixedGasPrice::min_gas_price().0), + None, + None, + Vec::new(), + true, // transactional + true, // must be validated + Some(weight_limit), + Some(0), + &config, + ) + .expect("call succeeds"); + + let ratio = <::GasLimitPovSizeRatio as Get>::get(); + let used_gas = result.used_gas; + let actual_proof_size = result + .weight_info + .expect("weight info") + .proof_size_usage + .expect("proof size usage"); + + assert_eq!(used_gas.standard, U256::from(21_000)); + assert_eq!(used_gas.effective, U256::from(actual_proof_size * ratio)); + }); + } +} + type Balances = pallet_balances::Pallet; type EVM = Pallet; @@ -461,6 +1059,8 @@ fn runner_non_transactional_calls_with_non_balance_accounts_is_ok_without_gas_pr Vec::new(), false, // non-transactional true, // must be validated + None, + None, &::config().clone(), ) .expect("Non transactional call succeeds"); @@ -495,6 +1095,8 @@ fn runner_non_transactional_calls_with_non_balance_accounts_is_err_with_gas_pric Vec::new(), false, // non-transactional true, // must be validated + None, + None, &::config().clone(), ); assert!(res.is_err()); @@ -517,6 +1119,8 @@ fn runner_transactional_call_with_zero_gas_price_fails() { Vec::new(), true, // transactional true, // must be validated + None, + None, &::config().clone(), ); assert!(res.is_err()); @@ -539,6 +1143,8 @@ fn runner_max_fee_per_gas_gte_max_priority_fee_per_gas() { Vec::new(), true, // transactional true, // must be validated + None, + None, &::config().clone(), ); assert!(res.is_err()); @@ -554,6 +1160,8 @@ fn runner_max_fee_per_gas_gte_max_priority_fee_per_gas() { Vec::new(), false, // non-transactional true, // must be validated + None, + None, &::config().clone(), ); assert!(res.is_err()); @@ -577,6 +1185,8 @@ fn eip3607_transaction_from_contract() { Vec::new(), true, // transactional false, // not sure be validated + None, + None, &::config().clone(), ) { Err(RunnerError { @@ -600,52 +1210,8 @@ fn eip3607_transaction_from_contract() { Vec::new(), false, // non-transactional true, // must be validated - &::config().clone(), - ) - .is_ok()); - }); -} - -#[test] -fn eip3607_transaction_from_precompile() { - new_test_ext().execute_with(|| { - // external transaction - match ::Runner::call( - // Precompile address. - H160::from_str("0000000000000000000000000000000000000001").unwrap(), - H160::from_str("1000000000000000000000000000000000000001").unwrap(), - Vec::new(), - U256::from(1u32), - 1000000, - None, None, None, - Vec::new(), - true, // transactional - false, // not sure be validated - &::config().clone(), - ) { - Err(RunnerError { - error: Error::TransactionMustComeFromEOA, - .. - }) => (), - _ => panic!("Should have failed"), - } - - // internal call - assert!(::Runner::call( - // Contract address. - H160::from_str("0000000000000000000000000000000000000001").unwrap(), - H160::from_str("1000000000000000000000000000000000000001").unwrap(), - Vec::new(), - U256::from(1u32), - 1000000, - None, - None, - None, - Vec::new(), - false, // non-transactional - true, // must be validated &::config().clone(), ) .is_ok()); diff --git a/frame/evm/test-vector-support/src/lib.rs b/frame/evm/test-vector-support/src/lib.rs index bfa41719b4..3566840969 100644 --- a/frame/evm/test-vector-support/src/lib.rs +++ b/frame/evm/test-vector-support/src/lib.rs @@ -80,6 +80,12 @@ impl PrecompileHandle for MockHandle { Ok(()) } + fn record_external_cost(&mut self, _: Option, _: Option) -> Result<(), ExitError> { + Ok(()) + } + + fn refund_external_cost(&mut self, _: Option, _: Option) {} + fn log(&mut self, _: H160, _: Vec, _: Vec) -> Result<(), ExitError> { unimplemented!() } diff --git a/primitives/evm/src/lib.rs b/primitives/evm/src/lib.rs index a0fcce9cd6..8f618c4b20 100644 --- a/primitives/evm/src/lib.rs +++ b/primitives/evm/src/lib.rs @@ -25,21 +25,20 @@ use frame_support::weights::{constants::WEIGHT_REF_TIME_PER_MILLIS, Weight}; use scale_codec::{Decode, Encode}; #[cfg(feature = "std")] use serde::{Deserialize, Serialize}; -use sp_core::{H160, U256}; +use sp_core::{H160, H256, U256}; use sp_runtime::Perbill; use sp_std::vec::Vec; pub use evm::{ backend::{Basic as Account, Log}, - executor::stack::IsPrecompileResult, - Config, ExitReason, + Config, ExitReason, Opcode, }; pub use self::{ precompile::{ - Context, ExitError, ExitRevert, ExitSucceed, LinearCostPrecompile, Precompile, - PrecompileFailure, PrecompileHandle, PrecompileOutput, PrecompileResult, PrecompileSet, - Transfer, + Context, ExitError, ExitRevert, ExitSucceed, IsPrecompileResult, LinearCostPrecompile, + Precompile, PrecompileFailure, PrecompileHandle, PrecompileOutput, PrecompileResult, + PrecompileSet, Transfer, }, validation::{ CheckEvmTransaction, CheckEvmTransactionConfig, CheckEvmTransactionInput, @@ -57,17 +56,124 @@ pub struct Vicinity { pub origin: H160, } +/// `System::Account` 16(hash) + 20 (key) + 52 (AccountInfo::max_encoded_len) +pub const ACCOUNT_BASIC_PROOF_SIZE: u64 = 88; +/// `AccountCodesMetadata` read, temptatively 16 (hash) + 20 (key) + 40 (CodeMetadata). +pub const ACCOUNT_CODES_METADATA_PROOF_SIZE: u64 = 76; +/// 16 (hash1) + 20 (key1) + 16 (hash2) + 32 (key2) + 32 (value) +pub const ACCOUNT_STORAGE_PROOF_SIZE: u64 = 116; +/// Fixed trie 32 byte hash. +pub const WRITE_PROOF_SIZE: u64 = 32; +/// Account basic proof size + 5 bytes max of `decode_len` call. +pub const IS_EMPTY_CHECK_PROOF_SIZE: u64 = 93; + +pub enum AccessedStorage { + AccountCodes(H160), + AccountStorages((H160, H256)), +} + +#[derive(Clone, Copy, Eq, PartialEq, Encode, Decode)] +#[cfg_attr(feature = "std", derive(Debug, Serialize, Deserialize))] +pub struct WeightInfo { + pub ref_time_limit: Option, + pub proof_size_limit: Option, + pub ref_time_usage: Option, + pub proof_size_usage: Option, +} + +impl WeightInfo { + pub fn new_from_weight_limit( + weight_limit: Option, + transaction_len: Option, + ) -> Result, &'static str> { + Ok(match (weight_limit, transaction_len) { + (None, _) => None, + (Some(weight_limit), Some(transaction_len)) + if weight_limit.proof_size() >= transaction_len => + { + Some(WeightInfo { + ref_time_limit: Some(weight_limit.ref_time()), + proof_size_limit: Some(weight_limit.proof_size()), + ref_time_usage: Some(0u64), + proof_size_usage: Some(transaction_len), + }) + } + (Some(weight_limit), None) => Some(WeightInfo { + ref_time_limit: Some(weight_limit.ref_time()), + proof_size_limit: None, + ref_time_usage: Some(0u64), + proof_size_usage: None, + }), + _ => return Err("must provide Some valid weight limit or None"), + }) + } + fn try_consume(&self, cost: u64, limit: u64, usage: u64) -> Result { + let usage = usage.checked_add(cost).ok_or(ExitError::OutOfGas)?; + if usage > limit { + return Err(ExitError::OutOfGas); + } + Ok(usage) + } + pub fn try_record_ref_time_or_fail(&mut self, cost: u64) -> Result<(), ExitError> { + if let (Some(ref_time_usage), Some(ref_time_limit)) = + (self.ref_time_usage, self.ref_time_limit) + { + let ref_time_usage = self.try_consume(cost, ref_time_limit, ref_time_usage)?; + if ref_time_usage > ref_time_limit { + return Err(ExitError::OutOfGas); + } + self.ref_time_usage = Some(ref_time_usage); + } + Ok(()) + } + pub fn try_record_proof_size_or_fail(&mut self, cost: u64) -> Result<(), ExitError> { + if let (Some(proof_size_usage), Some(proof_size_limit)) = + (self.proof_size_usage, self.proof_size_limit) + { + let proof_size_usage = self.try_consume(cost, proof_size_limit, proof_size_usage)?; + if proof_size_usage > proof_size_limit { + return Err(ExitError::OutOfGas); + } + self.proof_size_usage = Some(proof_size_usage); + } + Ok(()) + } + pub fn refund_proof_size(&mut self, amount: u64) { + if let Some(proof_size_usage) = self.proof_size_usage { + let proof_size_usage = proof_size_usage.saturating_sub(amount); + self.proof_size_usage = Some(proof_size_usage); + } + } + pub fn refund_ref_time(&mut self, amount: u64) { + if let Some(ref_time_usage) = self.ref_time_usage { + let ref_time_usage = ref_time_usage.saturating_sub(amount); + self.ref_time_usage = Some(ref_time_usage); + } + } +} + #[derive(Clone, Eq, PartialEq, Encode, Decode)] #[cfg_attr(feature = "std", derive(Debug, Serialize, Deserialize))] -pub struct ExecutionInfo { +pub struct UsedGas { + /// The used_gas as returned by the evm gasometer on exit. + pub standard: U256, + /// The result of applying a gas ratio to the most used + /// external metric during the evm execution. + pub effective: U256, +} + +#[derive(Clone, Eq, PartialEq, Encode, Decode)] +#[cfg_attr(feature = "std", derive(Debug, Serialize, Deserialize))] +pub struct ExecutionInfoV2 { pub exit_reason: ExitReason, pub value: T, - pub used_gas: U256, + pub used_gas: UsedGas, + pub weight_info: Option, pub logs: Vec, } -pub type CallInfo = ExecutionInfo>; -pub type CreateInfo = ExecutionInfo; +pub type CallInfo = ExecutionInfoV2>; +pub type CreateInfo = ExecutionInfoV2; #[derive(Clone, Eq, PartialEq, Encode, Decode)] #[cfg_attr(feature = "std", derive(Debug, Serialize, Deserialize))] @@ -76,6 +182,15 @@ pub enum CallOrCreateInfo { Create(CreateInfo), } +#[derive(Clone, Eq, PartialEq, Encode, Decode)] +#[cfg_attr(feature = "std", derive(Debug, Serialize, Deserialize))] +pub struct ExecutionInfo { + pub exit_reason: ExitReason, + pub value: T, + pub used_gas: U256, + pub logs: Vec, +} + /// Account definition used for genesis block construction. #[cfg(feature = "std")] #[derive(Clone, Eq, PartialEq, Encode, Decode, Debug, Serialize, Deserialize)] diff --git a/primitives/evm/src/precompile.rs b/primitives/evm/src/precompile.rs index 4996445cb6..aab7299d27 100644 --- a/primitives/evm/src/precompile.rs +++ b/primitives/evm/src/precompile.rs @@ -16,7 +16,9 @@ // limitations under the License. pub use evm::{ - executor::stack::{PrecompileFailure, PrecompileHandle, PrecompileOutput, PrecompileSet}, + executor::stack::{ + IsPrecompileResult, PrecompileFailure, PrecompileHandle, PrecompileOutput, PrecompileSet, + }, Context, ExitError, ExitRevert, ExitSucceed, Transfer, }; use sp_std::vec::Vec; diff --git a/primitives/evm/src/validation.rs b/primitives/evm/src/validation.rs index 3627f1d64d..a9ea6963d1 100644 --- a/primitives/evm/src/validation.rs +++ b/primitives/evm/src/validation.rs @@ -17,7 +17,7 @@ #![allow(clippy::comparison_chain)] pub use evm::backend::Basic as Account; -use frame_support::sp_runtime::traits::UniqueSaturatedInto; +use frame_support::{sp_runtime::traits::UniqueSaturatedInto, weights::Weight}; use sp_core::{H160, H256, U256}; use sp_std::vec::Vec; @@ -48,6 +48,8 @@ pub struct CheckEvmTransactionConfig<'config> { pub struct CheckEvmTransaction<'config, E: From> { pub config: CheckEvmTransactionConfig<'config>, pub transaction: CheckEvmTransactionInput, + pub weight_limit: Option, + pub proof_size_base_cost: Option, _marker: sp_std::marker::PhantomData, } @@ -68,10 +70,14 @@ impl<'config, E: From> CheckEvmTransaction<'config, pub fn new( config: CheckEvmTransactionConfig<'config>, transaction: CheckEvmTransactionInput, + weight_limit: Option, + proof_size_base_cost: Option, ) -> Self { CheckEvmTransaction { config, transaction, + weight_limit, + proof_size_base_cost, _marker: Default::default(), } } @@ -178,6 +184,17 @@ impl<'config, E: From> CheckEvmTransaction<'config, pub fn validate_common(&self) -> Result<&Self, E> { if self.config.is_transactional { + // Try to subtract the proof_size_base_cost from the Weight proof_size limit or fail. + // Validate the weight limit can afford recording the proof size cost. + if let (Some(weight_limit), Some(proof_size_base_cost)) = + (self.weight_limit, self.proof_size_base_cost) + { + let _ = weight_limit + .proof_size() + .checked_sub(proof_size_base_cost) + .ok_or(InvalidEvmTransactionError::GasLimitTooLow)?; + } + // We must ensure a transaction can pay the cost of its data bytes. // If it can't it should not be included in a block. let mut gasometer = evm::gasometer::Gasometer::new( @@ -257,6 +274,8 @@ mod tests { pub max_fee_per_gas: Option, pub max_priority_fee_per_gas: Option, pub value: U256, + pub weight_limit: Option, + pub proof_size_base_cost: Option, } impl Default for TestCase { @@ -273,6 +292,8 @@ mod tests { max_fee_per_gas: Some(U256::from(1_000_000_000u128)), max_priority_fee_per_gas: Some(U256::from(1_000_000_000u128)), value: U256::from(1u8), + weight_limit: None, + proof_size_base_cost: None, } } } @@ -290,6 +311,8 @@ mod tests { max_fee_per_gas, max_priority_fee_per_gas, value, + weight_limit, + proof_size_base_cost, } = input; CheckEvmTransaction::::new( CheckEvmTransactionConfig { @@ -311,6 +334,8 @@ mod tests { value, access_list: vec![], }, + weight_limit, + proof_size_base_cost, ) } @@ -332,6 +357,16 @@ mod tests { test_env(input) } + fn transaction_gas_limit_low_proof_size<'config>( + is_transactional: bool, + ) -> CheckEvmTransaction<'config, TestError> { + let mut input = TestCase::default(); + input.weight_limit = Some(Weight::from_parts(1, 1)); + input.proof_size_base_cost = Some(2); + input.is_transactional = is_transactional; + test_env(input) + } + fn transaction_gas_limit_high<'config>() -> CheckEvmTransaction<'config, TestError> { let mut input = TestCase::default(); input.blockchain_gas_limit = U256::from(1u8); @@ -500,6 +535,42 @@ mod tests { assert!(res.is_ok()); } + #[test] + // Gas limit too low for proof size recording transactional fails in pool and in block. + fn validate_in_pool_and_block_transactional_fails_gas_limit_too_low_proof_size() { + let who = Account { + balance: U256::from(1_000_000u128), + nonce: U256::zero(), + }; + let is_transactional = true; + let test = transaction_gas_limit_low_proof_size(is_transactional); + // Pool + let res = test.validate_in_pool_for(&who); + assert!(res.is_err()); + assert_eq!(res.unwrap_err(), TestError::GasLimitTooLow); + // Block + let res = test.validate_in_block_for(&who); + assert!(res.is_err()); + assert_eq!(res.unwrap_err(), TestError::GasLimitTooLow); + } + + #[test] + // Gas limit too low non-transactional succeeds in pool and in block. + fn validate_in_pool_and_block_non_transactional_succeeds_gas_limit_too_low_proof_size() { + let who = Account { + balance: U256::from(1_000_000u128), + nonce: U256::zero(), + }; + let is_transactional = false; + let test = transaction_gas_limit_low_proof_size(is_transactional); + // Pool + let res = test.validate_in_pool_for(&who); + assert!(res.is_ok()); + // Block + let res = test.validate_in_block_for(&who); + assert!(res.is_ok()); + } + #[test] // Gas limit too high fails in pool and in block. fn validate_in_pool_for_fails_gas_limit_too_high() { diff --git a/primitives/rpc/src/lib.rs b/primitives/rpc/src/lib.rs index e00d447dd1..1ea4db3460 100644 --- a/primitives/rpc/src/lib.rs +++ b/primitives/rpc/src/lib.rs @@ -79,7 +79,7 @@ impl RuntimeStorageOverride for () { sp_api::decl_runtime_apis! { /// API necessary for Ethereum-compatibility layer. - #[api_version(4)] + #[api_version(5)] pub trait EthereumRuntimeRPCApi { /// Returns runtime defined pallet_evm::ChainId. fn chain_id() -> u64; @@ -104,7 +104,7 @@ sp_api::decl_runtime_apis! { gas_price: Option, nonce: Option, estimate: bool, - ) -> Result; + ) -> Result>, sp_runtime::DispatchError>; #[changed_in(4)] fn call( from: H160, @@ -116,7 +116,8 @@ sp_api::decl_runtime_apis! { max_priority_fee_per_gas: Option, nonce: Option, estimate: bool, - ) -> Result; + ) -> Result>, sp_runtime::DispatchError>; + #[changed_in(5)] fn call( from: H160, to: H160, @@ -128,7 +129,19 @@ sp_api::decl_runtime_apis! { nonce: Option, estimate: bool, access_list: Option)>>, - ) -> Result; + ) -> Result>, sp_runtime::DispatchError>; + fn call( + from: H160, + to: H160, + data: Vec, + value: U256, + gas_limit: U256, + max_fee_per_gas: Option, + max_priority_fee_per_gas: Option, + nonce: Option, + estimate: bool, + access_list: Option)>>, + ) -> Result>, sp_runtime::DispatchError>; /// Returns a frame_ethereum::create response. #[changed_in(2)] fn create( @@ -139,7 +152,7 @@ sp_api::decl_runtime_apis! { gas_price: Option, nonce: Option, estimate: bool, - ) -> Result; + ) -> Result, sp_runtime::DispatchError>; #[changed_in(4)] fn create( from: H160, @@ -150,7 +163,19 @@ sp_api::decl_runtime_apis! { max_priority_fee_per_gas: Option, nonce: Option, estimate: bool, - ) -> Result; + ) -> Result, sp_runtime::DispatchError>; + #[changed_in(5)] + fn create( + from: H160, + data: Vec, + value: U256, + gas_limit: U256, + max_fee_per_gas: Option, + max_priority_fee_per_gas: Option, + nonce: Option, + estimate: bool, + access_list: Option)>>, + ) -> Result, sp_runtime::DispatchError>; fn create( from: H160, data: Vec, @@ -161,7 +186,7 @@ sp_api::decl_runtime_apis! { nonce: Option, estimate: bool, access_list: Option)>>, - ) -> Result; + ) -> Result, sp_runtime::DispatchError>; /// Return the current block. Legacy. #[changed_in(2)] fn current_block() -> Option; diff --git a/template/runtime/src/lib.rs b/template/runtime/src/lib.rs index 59981a59a2..947c00df27 100644 --- a/template/runtime/src/lib.rs +++ b/template/runtime/src/lib.rs @@ -311,9 +311,11 @@ impl> FindAuthor for FindAuthorTruncated { } const BLOCK_GAS_LIMIT: u64 = 75_000_000; +const MAX_POV_SIZE: u64 = 5 * 1024 * 1024; parameter_types! { pub BlockGasLimit: U256 = U256::from(BLOCK_GAS_LIMIT); + pub const GasLimitPovSizeRatio: u64 = BLOCK_GAS_LIMIT.saturating_div(MAX_POV_SIZE); pub PrecompilesValue: FrontierPrecompiles = FrontierPrecompiles::<_>::new(); pub WeightPerGas: Weight = Weight::from_parts(weight_per_gas(BLOCK_GAS_LIMIT, NORMAL_DISPATCH_RATIO, WEIGHT_MILLISECS_PER_BLOCK), 0); } @@ -336,6 +338,7 @@ impl pallet_evm::Config for Runtime { type OnChargeTransaction = (); type OnCreate = (); type FindAuthor = FindAuthorTruncated; + type GasLimitPovSizeRatio = GasLimitPovSizeRatio; type Timestamp = Timestamp; type WeightInfo = pallet_evm::weights::SubstrateWeight; } @@ -680,6 +683,9 @@ impl_runtime_apis! { access_list.unwrap_or_default(), is_transactional, validate, + // TODO we probably want to support external cost recording in non-transactional calls + None, + None, evm_config, ).map_err(|err| err.error.into()) } @@ -717,6 +723,9 @@ impl_runtime_apis! { access_list.unwrap_or_default(), is_transactional, validate, + // TODO we probably want to support external cost recording in non-transactional calls + None, + None, evm_config, ).map_err(|err| err.error.into()) } From ab3d3557c12d3571d0ef356aa9f75732872cd9f5 Mon Sep 17 00:00:00 2001 From: tgmichel Date: Thu, 22 Jun 2023 02:21:42 +0200 Subject: [PATCH 02/12] `eth_getLogs` return expected error/message for unknown hash (#1079) * `eth_getLogs` return expected error/message for unknown hash * prettier --- client/rpc/src/eth/filter.rs | 2 +- ts-tests/tests/test-log-filtering.ts | 10 ++++++++++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/client/rpc/src/eth/filter.rs b/client/rpc/src/eth/filter.rs index 593d4c2b71..fe3f1e2502 100644 --- a/client/rpc/src/eth/filter.rs +++ b/client/rpc/src/eth/filter.rs @@ -402,7 +402,7 @@ where .map_err(|err| internal_err(format!("{:?}", err)))? { Some(hash) => hash, - _ => return Ok(Vec::new()), + _ => return Err(crate::err(-32000, "unknown block", None)), }; let schema = fc_storage::onchain_storage_schema(client.as_ref(), substrate_hash); diff --git a/ts-tests/tests/test-log-filtering.ts b/ts-tests/tests/test-log-filtering.ts index d44c2beab4..b1bf7d95e9 100644 --- a/ts-tests/tests/test-log-filtering.ts +++ b/ts-tests/tests/test-log-filtering.ts @@ -83,4 +83,14 @@ describeWithFrontier("Frontier RPC (Log filtering)", (context) => { expect(request.result.length).to.be.eq(0); } }); + + step("EthApi::getLogs - should return `unknown block`.", async function () { + let request = await customRequest(context.web3, "eth_getLogs", [ + { + blockHash: "0x1234000000000000000000000000000000000000000000000000000000000000", + }, + ]); + expect(request.error.message).to.be.equal("unknown block"); + expect(request.error.code).to.be.equal(-32000); + }); }); From 9f49f7e4e7911311da07f9da2ac87aad79523d5a Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Fri, 23 Jun 2023 12:50:32 +0200 Subject: [PATCH 03/12] Move record_external_operation to StackState trait (#1085) * Move record_external_operation to StackState trait * Fix lints * Update evm pin --- Cargo.lock | 8 +-- frame/evm/src/runner/stack.rs | 110 +++++++++++++++++----------------- 2 files changed, 59 insertions(+), 59 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index bccd7d130d..fef979bbe4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1983,7 +1983,7 @@ checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" [[package]] name = "evm" version = "0.39.1" -source = "git+https://github.com/rust-blockchain/evm?branch=master#e85c34f96e3237c09955193b41154030b78119c5" +source = "git+https://github.com/rust-blockchain/evm?branch=master#b7b82c7e1fc57b7449d6dfa6826600de37cc1e65" dependencies = [ "auto_impl", "environmental", @@ -2003,7 +2003,7 @@ dependencies = [ [[package]] name = "evm-core" version = "0.39.0" -source = "git+https://github.com/rust-blockchain/evm?branch=master#e85c34f96e3237c09955193b41154030b78119c5" +source = "git+https://github.com/rust-blockchain/evm?branch=master#b7b82c7e1fc57b7449d6dfa6826600de37cc1e65" dependencies = [ "parity-scale-codec", "primitive-types", @@ -2014,7 +2014,7 @@ dependencies = [ [[package]] name = "evm-gasometer" version = "0.39.0" -source = "git+https://github.com/rust-blockchain/evm?branch=master#e85c34f96e3237c09955193b41154030b78119c5" +source = "git+https://github.com/rust-blockchain/evm?branch=master#b7b82c7e1fc57b7449d6dfa6826600de37cc1e65" dependencies = [ "environmental", "evm-core", @@ -2025,7 +2025,7 @@ dependencies = [ [[package]] name = "evm-runtime" version = "0.39.0" -source = "git+https://github.com/rust-blockchain/evm?branch=master#e85c34f96e3237c09955193b41154030b78119c5" +source = "git+https://github.com/rust-blockchain/evm?branch=master#b7b82c7e1fc57b7449d6dfa6826600de37cc1e65" dependencies = [ "auto_impl", "environmental", diff --git a/frame/evm/src/runner/stack.rs b/frame/evm/src/runner/stack.rs index cdff3369ce..0ac8caf0c0 100644 --- a/frame/evm/src/runner/stack.rs +++ b/frame/evm/src/runner/stack.rs @@ -768,61 +768,6 @@ where } } - fn record_external_operation(&mut self, op: evm::ExternalOperation) -> Result<(), ExitError> { - let size_limit: u64 = self - .metadata() - .gasometer() - .config() - .create_contract_limit - .unwrap_or_default() as u64; - - let (weight_info, recorded) = self.info_mut(); - - if let Some(weight_info) = weight_info { - match op { - evm::ExternalOperation::AccountBasicRead => { - weight_info.try_record_proof_size_or_fail(ACCOUNT_BASIC_PROOF_SIZE)? - } - evm::ExternalOperation::AddressCodeRead(address) => { - let maybe_record = !recorded.account_codes.contains(&address); - // Skip if the address has been already recorded this block - if maybe_record { - // First we record account emptiness check. - // Transfers to EOAs with standard 21_000 gas limit are able to - // pay for this pov size. - weight_info.try_record_proof_size_or_fail(IS_EMPTY_CHECK_PROOF_SIZE)?; - - if >::decode_len(address).unwrap_or(0) == 0 { - return Ok(()); - } - // Try to record fixed sized `AccountCodesMetadata` read - // Tentatively 16 + 20 + 40 - weight_info - .try_record_proof_size_or_fail(ACCOUNT_CODES_METADATA_PROOF_SIZE)?; - if let Some(meta) = >::get(address) { - weight_info.try_record_proof_size_or_fail(meta.size)?; - } else { - // If it does not exist, try to record `create_contract_limit` first. - weight_info.try_record_proof_size_or_fail(size_limit)?; - let meta = Pallet::::account_code_metadata(address); - let actual_size = meta.size; - // Refund if applies - weight_info.refund_proof_size(size_limit.saturating_sub(actual_size)); - } - recorded.account_codes.push(address); - } - } - evm::ExternalOperation::IsEmpty => { - weight_info.try_record_proof_size_or_fail(IS_EMPTY_CHECK_PROOF_SIZE)? - } - evm::ExternalOperation::Write => { - weight_info.try_record_proof_size_or_fail(WRITE_PROOF_SIZE)? - } - }; - } - Ok(()) - } - fn code(&self, address: H160) -> Vec { >::get(address) } @@ -994,6 +939,61 @@ where >::account_code_metadata(address).hash } + fn record_external_operation(&mut self, op: evm::ExternalOperation) -> Result<(), ExitError> { + let size_limit: u64 = self + .metadata() + .gasometer() + .config() + .create_contract_limit + .unwrap_or_default() as u64; + + let (weight_info, recorded) = self.info_mut(); + + if let Some(weight_info) = weight_info { + match op { + evm::ExternalOperation::AccountBasicRead => { + weight_info.try_record_proof_size_or_fail(ACCOUNT_BASIC_PROOF_SIZE)? + } + evm::ExternalOperation::AddressCodeRead(address) => { + let maybe_record = !recorded.account_codes.contains(&address); + // Skip if the address has been already recorded this block + if maybe_record { + // First we record account emptiness check. + // Transfers to EOAs with standard 21_000 gas limit are able to + // pay for this pov size. + weight_info.try_record_proof_size_or_fail(IS_EMPTY_CHECK_PROOF_SIZE)?; + + if >::decode_len(address).unwrap_or(0) == 0 { + return Ok(()); + } + // Try to record fixed sized `AccountCodesMetadata` read + // Tentatively 16 + 20 + 40 + weight_info + .try_record_proof_size_or_fail(ACCOUNT_CODES_METADATA_PROOF_SIZE)?; + if let Some(meta) = >::get(address) { + weight_info.try_record_proof_size_or_fail(meta.size)?; + } else { + // If it does not exist, try to record `create_contract_limit` first. + weight_info.try_record_proof_size_or_fail(size_limit)?; + let meta = Pallet::::account_code_metadata(address); + let actual_size = meta.size; + // Refund if applies + weight_info.refund_proof_size(size_limit.saturating_sub(actual_size)); + } + recorded.account_codes.push(address); + } + } + evm::ExternalOperation::IsEmpty => { + weight_info.try_record_proof_size_or_fail(IS_EMPTY_CHECK_PROOF_SIZE)? + } + evm::ExternalOperation::Write => { + weight_info.try_record_proof_size_or_fail(WRITE_PROOF_SIZE)? + } + }; + } + Ok(()) + } + fn record_external_dynamic_opcode_cost( &mut self, opcode: Opcode, From 4cde307601cba43e7436815ba7de36cc69ea944f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 23 Jun 2023 12:51:47 +0200 Subject: [PATCH 04/12] Bump @openzeppelin/contracts (#1081) Bumps [@openzeppelin/contracts](https://github.com/OpenZeppelin/openzeppelin-contracts) from 4.8.3 to 4.9.2. - [Release notes](https://github.com/OpenZeppelin/openzeppelin-contracts/releases) - [Changelog](https://github.com/OpenZeppelin/openzeppelin-contracts/blob/v4.9.2/CHANGELOG.md) - [Commits](https://github.com/OpenZeppelin/openzeppelin-contracts/compare/v4.8.3...v4.9.2) --- updated-dependencies: - dependency-name: "@openzeppelin/contracts" dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- template/examples/contract-erc20/truffle/package-lock.json | 6 +++--- template/examples/contract-erc20/truffle/package.json | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/template/examples/contract-erc20/truffle/package-lock.json b/template/examples/contract-erc20/truffle/package-lock.json index 6baed80830..1dcf0d2e76 100644 --- a/template/examples/contract-erc20/truffle/package-lock.json +++ b/template/examples/contract-erc20/truffle/package-lock.json @@ -430,9 +430,9 @@ "optional": true }, "@openzeppelin/contracts": { - "version": "4.8.3", - "resolved": "https://registry.npmjs.org/@openzeppelin/contracts/-/contracts-4.8.3.tgz", - "integrity": "sha512-bQHV8R9Me8IaJoJ2vPG4rXcL7seB7YVuskr4f+f5RyOStSZetwzkWtoqDMl5erkBJy0lDRUnIR2WIkPiC0GJlg==" + "version": "4.9.2", + "resolved": "https://registry.npmjs.org/@openzeppelin/contracts/-/contracts-4.9.2.tgz", + "integrity": "sha512-mO+y6JaqXjWeMh9glYVzVu8HYPGknAAnWyxTRhGeckOruyXQMNnlcW6w/Dx9ftLeIQk6N+ZJFuVmTwF7lEIFrg==" }, "@protobufjs/aspromise": { "version": "1.1.2", diff --git a/template/examples/contract-erc20/truffle/package.json b/template/examples/contract-erc20/truffle/package.json index fb1b3a8f2d..311b9e0ab7 100644 --- a/template/examples/contract-erc20/truffle/package.json +++ b/template/examples/contract-erc20/truffle/package.json @@ -2,7 +2,7 @@ "name": "substrate-evm-contracts", "version": "0.0.0", "dependencies": { - "@openzeppelin/contracts": "^4.8.3", + "@openzeppelin/contracts": "^4.9.2", "truffle": "^5.6.7" } } From 036e6afb856b02bb642df97854eaa47b4f16d33d Mon Sep 17 00:00:00 2001 From: Vedhavyas Singareddi Date: Fri, 23 Jun 2023 16:23:23 +0530 Subject: [PATCH 05/12] track only unique block hashes when writing to DB (#1087) Seems like import notifications is sending the block hash multiple times and everytime its called, even the same hash gets written to DB event though the DB has already seen it. This change should ensure we only write unique fork hashes --- client/db/src/kv/mod.rs | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/client/db/src/kv/mod.rs b/client/db/src/kv/mod.rs index 74d3c10ed3..03b00f01c0 100644 --- a/client/db/src/kv/mod.rs +++ b/client/db/src/kv/mod.rs @@ -308,13 +308,15 @@ impl MappingDb { let substrate_hashes = match self.block_hash(&commitment.ethereum_block_hash) { Ok(Some(mut data)) => { - data.push(commitment.block_hash); - log::warn!( - target: "fc-db", - "Possible equivocation at ethereum block hash {} {:?}", - &commitment.ethereum_block_hash, - &data - ); + if !data.contains(&commitment.block_hash) { + data.push(commitment.block_hash); + log::warn!( + target: "fc-db", + "Possible equivocation at ethereum block hash {} {:?}", + &commitment.ethereum_block_hash, + &data + ); + } data } _ => vec![commitment.block_hash], From 2cfa1d618f6a8afde967ac9d892cdcf23421c595 Mon Sep 17 00:00:00 2001 From: Nisheeth Barthwal Date: Fri, 23 Jun 2023 12:57:49 +0200 Subject: [PATCH 06/12] add sql indexer behavior tests, remove restriction for normal sync (#1077) * add tests, remove restriction for normal sync * refactor test name --- client/mapping-sync/src/sql/mod.rs | 688 ++++++++++++++++++++++++++++- 1 file changed, 675 insertions(+), 13 deletions(-) diff --git a/client/mapping-sync/src/sql/mod.rs b/client/mapping-sync/src/sql/mod.rs index 213d7a0f61..343229f9a6 100644 --- a/client/mapping-sync/src/sql/mod.rs +++ b/client/mapping-sync/src/sql/mod.rs @@ -86,7 +86,6 @@ where tokio::task::spawn(async move { while let Some(cmd) = rx.recv().await { log::debug!(target: "frontier-sql", "💬 Recv Worker Command {cmd:?}"); - println!("💬 Recv Worker Command {cmd:?}"); match cmd { WorkerCommand::ResumeSync => { // Attempt to resume from last indexed block. If there is no data in the db, sync genesis. @@ -181,17 +180,12 @@ where indexer_backend: Arc>, import_notifications: sc_client_api::ImportNotifications, worker_config: SyncWorkerConfig, - sync_strategy: SyncStrategy, + _sync_strategy: SyncStrategy, sync_oracle: Arc, pubsub_notification_sinks: Arc< EthereumBlockNotificationSinks>, >, ) { - // work in progress for `SyncStrategy::Normal` to also index non-best blocks. - if sync_strategy == SyncStrategy::Normal { - panic!("'SyncStrategy::Normal' is not supported") - } - let tx = Self::spawn_worker( client.clone(), substrate_backend.clone(), @@ -202,7 +196,6 @@ where // Resume sync from the last indexed block until we reach an already indexed parent tx.send(WorkerCommand::ResumeSync).await.ok(); - // check missing blocks every interval let tx2 = tx.clone(); tokio::task::spawn(async move { @@ -478,7 +471,11 @@ async fn index_genesis_block( mod test { use super::*; - use std::{collections::BTreeMap, path::Path, sync::Arc}; + use std::{ + collections::BTreeMap, + path::Path, + sync::{Arc, Mutex}, + }; use futures::executor; use scale_codec::Encode; @@ -1170,7 +1167,7 @@ mod test { .hash(sp_runtime::traits::Zero::zero()) .unwrap() .expect("genesis hash"); - let mut block_hashes: Vec = vec![]; + let mut best_block_hashes: Vec = vec![]; for _block_number in 1..=5 { let builder = client .new_block_at(parent_hash, ethereum_digest(), false) @@ -1178,12 +1175,12 @@ mod test { let block = builder.build().unwrap().block; let block_hash = block.header.hash(); executor::block_on(client.import(BlockOrigin::Own, block)).unwrap(); - block_hashes.insert(0, block_hash.clone()); + best_block_hashes.insert(0, block_hash.clone()); parent_hash = block_hash; } // Mark the block as canon and indexed - let block_resume_at = block_hashes[0]; + let block_resume_at = best_block_hashes[0]; sqlx::query("INSERT INTO blocks(substrate_block_hash, ethereum_block_hash, ethereum_storage_schema, block_number, is_canon) VALUES (?, ?, ?, 5, 1)") .bind(block_resume_at.as_bytes()) .bind(H256::zero().as_bytes()) @@ -1235,7 +1232,672 @@ mod test { .iter() .map(|row| H256::from_slice(&row.get::, _>(0)[..])) .collect::>(); - let expected_imported_blocks = block_hashes.clone(); + let expected_imported_blocks = best_block_hashes.clone(); + assert_eq!(expected_imported_blocks, actual_imported_blocks); + } + + struct TestSyncOracle { + sync_status: Arc>, + } + impl sp_consensus::SyncOracle for TestSyncOracle { + fn is_major_syncing(&self) -> bool { + *self.sync_status.lock().expect("failed getting lock") + } + fn is_offline(&self) -> bool { + false + } + } + + struct TestSyncOracleWrapper { + oracle: Arc, + sync_status: Arc>, + } + impl TestSyncOracleWrapper { + fn new() -> Self { + let sync_status = Arc::new(Mutex::new(false)); + TestSyncOracleWrapper { + oracle: Arc::new(TestSyncOracle { + sync_status: sync_status.clone(), + }), + sync_status, + } + } + fn set_sync_status(&mut self, value: bool) { + *self.sync_status.lock().expect("failed getting lock") = value; + } + } + + #[tokio::test] + async fn sync_strategy_normal_indexes_best_blocks_if_not_major_sync() { + let tmp = tempdir().expect("create a temporary directory"); + let builder = TestClientBuilder::new().add_extra_storage( + PALLET_ETHEREUM_SCHEMA.to_vec(), + Encode::encode(&EthereumStorageSchema::V3), + ); + let backend = builder.backend(); + let (client, _) = + builder.build_with_native_executor::(None); + let mut client = Arc::new(client); + let mut overrides_map = BTreeMap::new(); + overrides_map.insert( + EthereumStorageSchema::V3, + Box::new(SchemaV3Override::new(client.clone())) as Box>, + ); + let overrides = Arc::new(OverrideHandle { + schemas: overrides_map, + fallback: Box::new(SchemaV3Override::new(client.clone())), + }); + let indexer_backend = fc_db::sql::Backend::new( + fc_db::sql::BackendConfig::Sqlite(fc_db::sql::SqliteBackendConfig { + path: Path::new("sqlite:///") + .join(tmp.path()) + .join("test.db3") + .to_str() + .unwrap(), + create_if_missing: true, + cache_size: 204800, + thread_count: 4, + }), + 100, + None, + overrides.clone(), + ) + .await + .expect("indexer pool to be created"); + + // Pool + let pool = indexer_backend.pool().clone(); + + // Spawn indexer task + let pubsub_notification_sinks: crate::EthereumBlockNotificationSinks< + crate::EthereumBlockNotification, + > = Default::default(); + let pubsub_notification_sinks = Arc::new(pubsub_notification_sinks); + let mut sync_oracle_wrapper = TestSyncOracleWrapper::new(); + let sync_oracle = sync_oracle_wrapper.oracle.clone(); + let client_inner = client.clone(); + tokio::task::spawn(async move { + crate::sql::SyncWorker::run( + client_inner.clone(), + backend.clone(), + Arc::new(indexer_backend), + client_inner.import_notification_stream(), + SyncWorkerConfig { + read_notification_timeout: Duration::from_secs(10), + check_indexed_blocks_interval: Duration::from_secs(60), + }, + SyncStrategy::Normal, + Arc::new(sync_oracle), + pubsub_notification_sinks.clone(), + ) + .await + }); + // Enough time for startup + futures_timer::Delay::new(std::time::Duration::from_millis(200)).await; + + // Import 3 blocks as part of normal operation, storing them oldest first. + sync_oracle_wrapper.set_sync_status(false); + let mut parent_hash = client + .hash(sp_runtime::traits::Zero::zero()) + .unwrap() + .expect("genesis hash"); + let mut best_block_hashes: Vec = vec![]; + for _block_number in 1..=3 { + let builder = client + .new_block_at(parent_hash, ethereum_digest(), false) + .unwrap(); + let block = builder.build().unwrap().block; + let block_hash = block.header.hash(); + + executor::block_on(client.import(BlockOrigin::Own, block)).unwrap(); + best_block_hashes.push(block_hash.clone()); + parent_hash = block_hash; + } + + // Enough time for indexing + futures_timer::Delay::new(std::time::Duration::from_millis(3000)).await; + + // Test the chain is correctly indexed. + let actual_imported_blocks = + sqlx::query("SELECT substrate_block_hash, is_canon, block_number FROM blocks") + .fetch_all(&pool) + .await + .expect("test query result") + .iter() + .map(|row| H256::from_slice(&row.get::, _>(0)[..])) + .collect::>(); + let expected_imported_blocks = best_block_hashes.clone(); + assert_eq!(expected_imported_blocks, actual_imported_blocks); + } + + #[tokio::test] + async fn sync_strategy_normal_ignores_non_best_block_if_not_major_sync() { + let tmp = tempdir().expect("create a temporary directory"); + let builder = TestClientBuilder::new().add_extra_storage( + PALLET_ETHEREUM_SCHEMA.to_vec(), + Encode::encode(&EthereumStorageSchema::V3), + ); + let backend = builder.backend(); + let (client, _) = + builder.build_with_native_executor::(None); + let mut client = Arc::new(client); + let mut overrides_map = BTreeMap::new(); + overrides_map.insert( + EthereumStorageSchema::V3, + Box::new(SchemaV3Override::new(client.clone())) as Box>, + ); + let overrides = Arc::new(OverrideHandle { + schemas: overrides_map, + fallback: Box::new(SchemaV3Override::new(client.clone())), + }); + let indexer_backend = fc_db::sql::Backend::new( + fc_db::sql::BackendConfig::Sqlite(fc_db::sql::SqliteBackendConfig { + path: Path::new("sqlite:///") + .join(tmp.path()) + .join("test.db3") + .to_str() + .unwrap(), + create_if_missing: true, + cache_size: 204800, + thread_count: 4, + }), + 100, + None, + overrides.clone(), + ) + .await + .expect("indexer pool to be created"); + + // Pool + let pool = indexer_backend.pool().clone(); + + // Spawn indexer task + let pubsub_notification_sinks: crate::EthereumBlockNotificationSinks< + crate::EthereumBlockNotification, + > = Default::default(); + let pubsub_notification_sinks = Arc::new(pubsub_notification_sinks); + let mut sync_oracle_wrapper = TestSyncOracleWrapper::new(); + let sync_oracle = sync_oracle_wrapper.oracle.clone(); + let client_inner = client.clone(); + tokio::task::spawn(async move { + crate::sql::SyncWorker::run( + client_inner.clone(), + backend.clone(), + Arc::new(indexer_backend), + client_inner.import_notification_stream(), + SyncWorkerConfig { + read_notification_timeout: Duration::from_secs(10), + check_indexed_blocks_interval: Duration::from_secs(60), + }, + SyncStrategy::Normal, + Arc::new(sync_oracle), + pubsub_notification_sinks.clone(), + ) + .await + }); + // Enough time for startup + futures_timer::Delay::new(std::time::Duration::from_millis(200)).await; + + // Import 3 blocks as part of normal operation, storing them oldest first. + sync_oracle_wrapper.set_sync_status(false); + let mut parent_hash = client + .hash(sp_runtime::traits::Zero::zero()) + .unwrap() + .expect("genesis hash"); + let mut best_block_hashes: Vec = vec![]; + for _block_number in 1..=3 { + let builder = client + .new_block_at(parent_hash, ethereum_digest(), false) + .unwrap(); + let block = builder.build().unwrap().block; + let block_hash = block.header.hash(); + + executor::block_on(client.import(BlockOrigin::Own, block)).unwrap(); + best_block_hashes.push(block_hash.clone()); + parent_hash = block_hash; + } + + // create non-best block + let builder = client + .new_block_at(best_block_hashes[0], ethereum_digest(), false) + .unwrap(); + let block = builder.build().unwrap().block; + + executor::block_on(client.import(BlockOrigin::Own, block)).unwrap(); + + // Enough time for indexing + futures_timer::Delay::new(std::time::Duration::from_millis(3000)).await; + + // Test the chain is correctly indexed. + let actual_imported_blocks = + sqlx::query("SELECT substrate_block_hash, is_canon, block_number FROM blocks") + .fetch_all(&pool) + .await + .expect("test query result") + .iter() + .map(|row| H256::from_slice(&row.get::, _>(0)[..])) + .collect::>(); + let expected_imported_blocks = best_block_hashes.clone(); + assert_eq!(expected_imported_blocks, actual_imported_blocks); + } + + #[tokio::test] + async fn sync_strategy_parachain_indexes_best_blocks_if_not_major_sync() { + let tmp = tempdir().expect("create a temporary directory"); + let builder = TestClientBuilder::new().add_extra_storage( + PALLET_ETHEREUM_SCHEMA.to_vec(), + Encode::encode(&EthereumStorageSchema::V3), + ); + let backend = builder.backend(); + let (client, _) = + builder.build_with_native_executor::(None); + let mut client = Arc::new(client); + let mut overrides_map = BTreeMap::new(); + overrides_map.insert( + EthereumStorageSchema::V3, + Box::new(SchemaV3Override::new(client.clone())) as Box>, + ); + let overrides = Arc::new(OverrideHandle { + schemas: overrides_map, + fallback: Box::new(SchemaV3Override::new(client.clone())), + }); + let indexer_backend = fc_db::sql::Backend::new( + fc_db::sql::BackendConfig::Sqlite(fc_db::sql::SqliteBackendConfig { + path: Path::new("sqlite:///") + .join(tmp.path()) + .join("test.db3") + .to_str() + .unwrap(), + create_if_missing: true, + cache_size: 204800, + thread_count: 4, + }), + 100, + None, + overrides.clone(), + ) + .await + .expect("indexer pool to be created"); + + // Pool + let pool = indexer_backend.pool().clone(); + + // Spawn indexer task + let pubsub_notification_sinks: crate::EthereumBlockNotificationSinks< + crate::EthereumBlockNotification, + > = Default::default(); + let pubsub_notification_sinks = Arc::new(pubsub_notification_sinks); + let mut sync_oracle_wrapper = TestSyncOracleWrapper::new(); + let sync_oracle = sync_oracle_wrapper.oracle.clone(); + let client_inner = client.clone(); + tokio::task::spawn(async move { + crate::sql::SyncWorker::run( + client_inner.clone(), + backend.clone(), + Arc::new(indexer_backend), + client_inner.import_notification_stream(), + SyncWorkerConfig { + read_notification_timeout: Duration::from_secs(10), + check_indexed_blocks_interval: Duration::from_secs(60), + }, + SyncStrategy::Parachain, + Arc::new(sync_oracle), + pubsub_notification_sinks.clone(), + ) + .await + }); + // Enough time for startup + futures_timer::Delay::new(std::time::Duration::from_millis(200)).await; + + // Import 3 blocks as part of normal operation, storing them oldest first. + sync_oracle_wrapper.set_sync_status(false); + let mut parent_hash = client + .hash(sp_runtime::traits::Zero::zero()) + .unwrap() + .expect("genesis hash"); + let mut best_block_hashes: Vec = vec![]; + for _block_number in 1..=3 { + let builder = client + .new_block_at(parent_hash, ethereum_digest(), false) + .unwrap(); + let block = builder.build().unwrap().block; + let block_hash = block.header.hash(); + + executor::block_on(client.import(BlockOrigin::Own, block)).unwrap(); + best_block_hashes.push(block_hash.clone()); + parent_hash = block_hash; + } + + // Enough time for indexing + futures_timer::Delay::new(std::time::Duration::from_millis(3000)).await; + + // Test the chain is correctly indexed. + let actual_imported_blocks = + sqlx::query("SELECT substrate_block_hash, is_canon, block_number FROM blocks") + .fetch_all(&pool) + .await + .expect("test query result") + .iter() + .map(|row| H256::from_slice(&row.get::, _>(0)[..])) + .collect::>(); + let expected_imported_blocks = best_block_hashes.clone(); + assert_eq!(expected_imported_blocks, actual_imported_blocks); + } + + #[tokio::test] + async fn sync_strategy_parachain_ignores_non_best_blocks_if_not_major_sync() { + let tmp = tempdir().expect("create a temporary directory"); + let builder = TestClientBuilder::new().add_extra_storage( + PALLET_ETHEREUM_SCHEMA.to_vec(), + Encode::encode(&EthereumStorageSchema::V3), + ); + let backend = builder.backend(); + let (client, _) = + builder.build_with_native_executor::(None); + let mut client = Arc::new(client); + let mut overrides_map = BTreeMap::new(); + overrides_map.insert( + EthereumStorageSchema::V3, + Box::new(SchemaV3Override::new(client.clone())) as Box>, + ); + let overrides = Arc::new(OverrideHandle { + schemas: overrides_map, + fallback: Box::new(SchemaV3Override::new(client.clone())), + }); + let indexer_backend = fc_db::sql::Backend::new( + fc_db::sql::BackendConfig::Sqlite(fc_db::sql::SqliteBackendConfig { + path: Path::new("sqlite:///") + .join(tmp.path()) + .join("test.db3") + .to_str() + .unwrap(), + create_if_missing: true, + cache_size: 204800, + thread_count: 4, + }), + 100, + None, + overrides.clone(), + ) + .await + .expect("indexer pool to be created"); + + // Pool + let pool = indexer_backend.pool().clone(); + + // Spawn indexer task + let pubsub_notification_sinks: crate::EthereumBlockNotificationSinks< + crate::EthereumBlockNotification, + > = Default::default(); + let pubsub_notification_sinks = Arc::new(pubsub_notification_sinks); + let mut sync_oracle_wrapper = TestSyncOracleWrapper::new(); + let sync_oracle = sync_oracle_wrapper.oracle.clone(); + let client_inner = client.clone(); + tokio::task::spawn(async move { + crate::sql::SyncWorker::run( + client_inner.clone(), + backend.clone(), + Arc::new(indexer_backend), + client_inner.import_notification_stream(), + SyncWorkerConfig { + read_notification_timeout: Duration::from_secs(10), + check_indexed_blocks_interval: Duration::from_secs(60), + }, + SyncStrategy::Parachain, + Arc::new(sync_oracle), + pubsub_notification_sinks.clone(), + ) + .await + }); + // Enough time for startup + futures_timer::Delay::new(std::time::Duration::from_millis(200)).await; + + // Import 3 blocks as part of normal operation, storing them oldest first. + sync_oracle_wrapper.set_sync_status(false); + let mut parent_hash = client + .hash(sp_runtime::traits::Zero::zero()) + .unwrap() + .expect("genesis hash"); + let mut best_block_hashes: Vec = vec![]; + for _block_number in 1..=3 { + let builder = client + .new_block_at(parent_hash, ethereum_digest(), false) + .unwrap(); + let block = builder.build().unwrap().block; + let block_hash = block.header.hash(); + + executor::block_on(client.import(BlockOrigin::Own, block)).unwrap(); + best_block_hashes.push(block_hash.clone()); + parent_hash = block_hash; + } + + // create non-best block + let builder = client + .new_block_at(best_block_hashes[0], ethereum_digest(), false) + .unwrap(); + let block = builder.build().unwrap().block; + + executor::block_on(client.import(BlockOrigin::Own, block)).unwrap(); + + // Enough time for indexing + futures_timer::Delay::new(std::time::Duration::from_millis(3000)).await; + + // Test the chain is correctly indexed. + let actual_imported_blocks = + sqlx::query("SELECT substrate_block_hash, is_canon, block_number FROM blocks") + .fetch_all(&pool) + .await + .expect("test query result") + .iter() + .map(|row| H256::from_slice(&row.get::, _>(0)[..])) + .collect::>(); + let expected_imported_blocks = best_block_hashes.clone(); + assert_eq!(expected_imported_blocks, actual_imported_blocks); + } + + #[tokio::test] + async fn sync_strategy_normal_ignores_best_blocks_if_major_sync() { + let tmp = tempdir().expect("create a temporary directory"); + let builder = TestClientBuilder::new().add_extra_storage( + PALLET_ETHEREUM_SCHEMA.to_vec(), + Encode::encode(&EthereumStorageSchema::V3), + ); + let backend = builder.backend(); + let (client, _) = + builder.build_with_native_executor::(None); + let mut client = Arc::new(client); + let mut overrides_map = BTreeMap::new(); + overrides_map.insert( + EthereumStorageSchema::V3, + Box::new(SchemaV3Override::new(client.clone())) as Box>, + ); + let overrides = Arc::new(OverrideHandle { + schemas: overrides_map, + fallback: Box::new(SchemaV3Override::new(client.clone())), + }); + let indexer_backend = fc_db::sql::Backend::new( + fc_db::sql::BackendConfig::Sqlite(fc_db::sql::SqliteBackendConfig { + path: Path::new("sqlite:///") + .join(tmp.path()) + .join("test.db3") + .to_str() + .unwrap(), + create_if_missing: true, + cache_size: 204800, + thread_count: 4, + }), + 100, + None, + overrides.clone(), + ) + .await + .expect("indexer pool to be created"); + + // Pool + let pool = indexer_backend.pool().clone(); + + // Spawn indexer task + let pubsub_notification_sinks: crate::EthereumBlockNotificationSinks< + crate::EthereumBlockNotification, + > = Default::default(); + let pubsub_notification_sinks = Arc::new(pubsub_notification_sinks); + let mut sync_oracle_wrapper = TestSyncOracleWrapper::new(); + let sync_oracle = sync_oracle_wrapper.oracle.clone(); + let client_inner = client.clone(); + tokio::task::spawn(async move { + crate::sql::SyncWorker::run( + client_inner.clone(), + backend.clone(), + Arc::new(indexer_backend), + client_inner.import_notification_stream(), + SyncWorkerConfig { + read_notification_timeout: Duration::from_secs(10), + check_indexed_blocks_interval: Duration::from_secs(60), + }, + SyncStrategy::Normal, + Arc::new(sync_oracle), + pubsub_notification_sinks.clone(), + ) + .await + }); + // Enough time for startup + futures_timer::Delay::new(std::time::Duration::from_millis(200)).await; + + // Import 3 blocks as part of initial network sync, storing them oldest first. + sync_oracle_wrapper.set_sync_status(true); + let mut parent_hash = client + .hash(sp_runtime::traits::Zero::zero()) + .unwrap() + .expect("genesis hash"); + let mut best_block_hashes: Vec = vec![]; + for _block_number in 1..=3 { + let builder = client + .new_block_at(parent_hash, ethereum_digest(), false) + .unwrap(); + let block = builder.build().unwrap().block; + let block_hash = block.header.hash(); + + executor::block_on(client.import(BlockOrigin::NetworkInitialSync, block)).unwrap(); + best_block_hashes.push(block_hash.clone()); + parent_hash = block_hash; + } + + // Enough time for indexing + futures_timer::Delay::new(std::time::Duration::from_millis(3000)).await; + + // Test the chain is correctly indexed. + let actual_imported_blocks = + sqlx::query("SELECT substrate_block_hash, is_canon, block_number FROM blocks") + .fetch_all(&pool) + .await + .expect("test query result") + .iter() + .map(|row| H256::from_slice(&row.get::, _>(0)[..])) + .collect::>(); + let expected_imported_blocks = Vec::::new(); + assert_eq!(expected_imported_blocks, actual_imported_blocks); + } + + #[tokio::test] + async fn sync_strategy_parachain_ignores_best_blocks_if_major_sync() { + let tmp = tempdir().expect("create a temporary directory"); + let builder = TestClientBuilder::new().add_extra_storage( + PALLET_ETHEREUM_SCHEMA.to_vec(), + Encode::encode(&EthereumStorageSchema::V3), + ); + let backend = builder.backend(); + let (client, _) = + builder.build_with_native_executor::(None); + let mut client = Arc::new(client); + let mut overrides_map = BTreeMap::new(); + overrides_map.insert( + EthereumStorageSchema::V3, + Box::new(SchemaV3Override::new(client.clone())) as Box>, + ); + let overrides = Arc::new(OverrideHandle { + schemas: overrides_map, + fallback: Box::new(SchemaV3Override::new(client.clone())), + }); + let indexer_backend = fc_db::sql::Backend::new( + fc_db::sql::BackendConfig::Sqlite(fc_db::sql::SqliteBackendConfig { + path: Path::new("sqlite:///") + .join(tmp.path()) + .join("test.db3") + .to_str() + .unwrap(), + create_if_missing: true, + cache_size: 204800, + thread_count: 4, + }), + 100, + None, + overrides.clone(), + ) + .await + .expect("indexer pool to be created"); + + // Pool + let pool = indexer_backend.pool().clone(); + + // Spawn indexer task + let pubsub_notification_sinks: crate::EthereumBlockNotificationSinks< + crate::EthereumBlockNotification, + > = Default::default(); + let pubsub_notification_sinks = Arc::new(pubsub_notification_sinks); + let mut sync_oracle_wrapper = TestSyncOracleWrapper::new(); + let sync_oracle = sync_oracle_wrapper.oracle.clone(); + let client_inner = client.clone(); + tokio::task::spawn(async move { + crate::sql::SyncWorker::run( + client_inner.clone(), + backend.clone(), + Arc::new(indexer_backend), + client_inner.import_notification_stream(), + SyncWorkerConfig { + read_notification_timeout: Duration::from_secs(10), + check_indexed_blocks_interval: Duration::from_secs(60), + }, + SyncStrategy::Parachain, + Arc::new(sync_oracle), + pubsub_notification_sinks.clone(), + ) + .await + }); + // Enough time for startup + futures_timer::Delay::new(std::time::Duration::from_millis(200)).await; + + // Import 3 blocks as part of initial network sync, storing them oldest first. + sync_oracle_wrapper.set_sync_status(true); + let mut parent_hash = client + .hash(sp_runtime::traits::Zero::zero()) + .unwrap() + .expect("genesis hash"); + let mut best_block_hashes: Vec = vec![]; + for _block_number in 1..=3 { + let builder = client + .new_block_at(parent_hash, ethereum_digest(), false) + .unwrap(); + let block = builder.build().unwrap().block; + let block_hash = block.header.hash(); + + executor::block_on(client.import(BlockOrigin::NetworkInitialSync, block)).unwrap(); + best_block_hashes.push(block_hash.clone()); + parent_hash = block_hash; + } + + // Enough time for indexing + futures_timer::Delay::new(std::time::Duration::from_millis(3000)).await; + + // Test the chain is correctly indexed. + let actual_imported_blocks = + sqlx::query("SELECT substrate_block_hash, is_canon, block_number FROM blocks") + .fetch_all(&pool) + .await + .expect("test query result") + .iter() + .map(|row| H256::from_slice(&row.get::, _>(0)[..])) + .collect::>(); + let expected_imported_blocks = Vec::::new(); assert_eq!(expected_imported_blocks, actual_imported_blocks); } } From 67df5c8ad4ecb2ecc60ea1a054662902dff78502 Mon Sep 17 00:00:00 2001 From: tgmichel Date: Fri, 23 Jun 2023 13:39:39 +0200 Subject: [PATCH 07/12] Add `pending` support for `eth_getBlockByNumber` (#1048) * Add `pending` support for `eth_getBlockByNumber` * header not needed * cleanup * prettier * update some fields to be optional on pending * update test * cleanup --- client/rpc-core/src/types/block.rs | 4 +- client/rpc/src/eth/block.rs | 112 ++++++++++++++++++++--------- client/rpc/src/eth/mod.rs | 21 ++++-- client/rpc/src/eth_pubsub.rs | 2 +- primitives/rpc/src/lib.rs | 4 ++ template/runtime/src/lib.rs | 17 ++++- ts-tests/tests/test-block.ts | 54 +++++++++++++- 7 files changed, 170 insertions(+), 44 deletions(-) diff --git a/client/rpc-core/src/types/block.rs b/client/rpc-core/src/types/block.rs index 0ab435e253..1ce10681c8 100644 --- a/client/rpc-core/src/types/block.rs +++ b/client/rpc-core/src/types/block.rs @@ -52,7 +52,7 @@ pub struct Block { #[serde(flatten)] pub header: Header, /// Total difficulty - pub total_difficulty: U256, + pub total_difficulty: Option, /// Uncles' hashes pub uncles: Vec, /// Transactions @@ -78,7 +78,7 @@ pub struct Header { /// Authors address pub author: H160, /// Alias of `author` - pub miner: H160, + pub miner: Option, /// State root hash pub state_root: H256, /// Transactions root hash diff --git a/client/rpc/src/eth/block.rs b/client/rpc/src/eth/block.rs index 1ffce180a2..c9a1f88017 100644 --- a/client/rpc/src/eth/block.rs +++ b/client/rpc/src/eth/block.rs @@ -23,6 +23,7 @@ use jsonrpsee::core::RpcResult; // Substrate use sc_client_api::backend::{Backend, StorageProvider}; use sc_transaction_pool::ChainApi; +use sc_transaction_pool_api::InPoolTransaction; use sp_api::ProvideRuntimeApi; use sp_blockchain::HeaderBackend; use sp_core::hashing::keccak_256; @@ -43,6 +44,7 @@ where C::Api: EthereumRuntimeRPCApi, C: HeaderBackend + StorageProvider + 'static, BE: Backend, + A: ChainApi + 'static, { pub async fn block_by_hash(&self, hash: H256, full: bool) -> RpcResult> { let client = Arc::clone(&self.client); @@ -78,6 +80,7 @@ where Some(hash), full, base_fee, + false, ); let substrate_hash = H256::from_slice(substrate_hash.as_ref()); @@ -103,54 +106,99 @@ where let client = Arc::clone(&self.client); let block_data_cache = Arc::clone(&self.block_data_cache); let backend = Arc::clone(&self.backend); + let graph = Arc::clone(&self.graph); - let id = match frontier_backend_client::native_block_id::( + match frontier_backend_client::native_block_id::( client.as_ref(), backend.as_ref(), Some(number), ) .await? { - Some(id) => id, - None => return Ok(None), - }; - let substrate_hash = client - .expect_block_hash_from_id(&id) - .map_err(|_| internal_err(format!("Expect block number from id: {}", id)))?; + Some(id) => { + let substrate_hash = client + .expect_block_hash_from_id(&id) + .map_err(|_| internal_err(format!("Expect block number from id: {}", id)))?; - let schema = fc_storage::onchain_storage_schema(client.as_ref(), substrate_hash); + let schema = fc_storage::onchain_storage_schema(client.as_ref(), substrate_hash); - let block = block_data_cache.current_block(schema, substrate_hash).await; - let statuses = block_data_cache - .current_transaction_statuses(schema, substrate_hash) - .await; + let block = block_data_cache.current_block(schema, substrate_hash).await; + let statuses = block_data_cache + .current_transaction_statuses(schema, substrate_hash) + .await; - let base_fee = client.runtime_api().gas_price(substrate_hash).ok(); + let base_fee = client.runtime_api().gas_price(substrate_hash).ok(); - match (block, statuses) { - (Some(block), Some(statuses)) => { - let hash = H256::from(keccak_256(&rlp::encode(&block.header))); + match (block, statuses) { + (Some(block), Some(statuses)) => { + let hash = H256::from(keccak_256(&rlp::encode(&block.header))); + let mut rich_block = rich_block_build( + block, + statuses.into_iter().map(Option::Some).collect(), + Some(hash), + full, + base_fee, + false, + ); - let mut rich_block = rich_block_build( - block, - statuses.into_iter().map(Option::Some).collect(), - Some(hash), - full, - base_fee, - ); + let substrate_hash = H256::from_slice(substrate_hash.as_ref()); + if let Some(parent_hash) = self + .forced_parent_hashes + .as_ref() + .and_then(|parent_hashes| parent_hashes.get(&substrate_hash).cloned()) + { + rich_block.inner.header.parent_hash = parent_hash + } - let substrate_hash = H256::from_slice(substrate_hash.as_ref()); - if let Some(parent_hash) = self - .forced_parent_hashes - .as_ref() - .and_then(|parent_hashes| parent_hashes.get(&substrate_hash).cloned()) - { - rich_block.inner.header.parent_hash = parent_hash + Ok(Some(rich_block)) + } + _ => Ok(None), } + } + None if number == BlockNumber::Pending => { + let api = client.runtime_api(); + let best_hash = client.info().best_hash; - Ok(Some(rich_block)) + // Get current in-pool transactions + let mut xts: Vec<::Extrinsic> = Vec::new(); + // ready validated pool + xts.extend( + graph + .validated_pool() + .ready() + .map(|in_pool_tx| in_pool_tx.data().clone()) + .collect::::Extrinsic>>(), + ); + + // future validated pool + xts.extend( + graph + .validated_pool() + .futures() + .iter() + .map(|(_hash, extrinsic)| extrinsic.clone()) + .collect::::Extrinsic>>(), + ); + + let (block, statuses) = api + .pending_block(best_hash, xts) + .map_err(|_| internal_err(format!("Runtime access error at {}", best_hash)))?; + + let base_fee = api.gas_price(best_hash).ok(); + + match (block, statuses) { + (Some(block), Some(statuses)) => Ok(Some(rich_block_build( + block, + statuses.into_iter().map(Option::Some).collect(), + None, + full, + base_fee, + true, + ))), + _ => Ok(None), + } } - _ => Ok(None), + None => Ok(None), } } diff --git a/client/rpc/src/eth/mod.rs b/client/rpc/src/eth/mod.rs index db28d177e7..ed7a1491a1 100644 --- a/client/rpc/src/eth/mod.rs +++ b/client/rpc/src/eth/mod.rs @@ -404,17 +404,26 @@ fn rich_block_build( hash: Option, full_transactions: bool, base_fee: Option, + is_pending: bool, ) -> RichBlock { + let (hash, miner, nonce, total_difficulty) = if !is_pending { + ( + Some(hash.unwrap_or_else(|| H256::from(keccak_256(&rlp::encode(&block.header))))), + Some(block.header.beneficiary), + Some(block.header.nonce), + Some(U256::zero()), + ) + } else { + (None, None, None, None) + }; Rich { inner: Block { header: Header { - hash: Some( - hash.unwrap_or_else(|| H256::from(keccak_256(&rlp::encode(&block.header)))), - ), + hash, parent_hash: block.header.parent_hash, uncles_hash: block.header.ommers_hash, author: block.header.beneficiary, - miner: block.header.beneficiary, + miner, state_root: block.header.state_root, transactions_root: block.header.transactions_root, receipts_root: block.header.receipts_root, @@ -425,10 +434,10 @@ fn rich_block_build( logs_bloom: block.header.logs_bloom, timestamp: U256::from(block.header.timestamp / 1000), difficulty: block.header.difficulty, - nonce: Some(block.header.nonce), + nonce, size: Some(U256::from(rlp::encode(&block.header).len() as u32)), }, - total_difficulty: U256::zero(), + total_difficulty, uncles: vec![], transactions: { if full_transactions { diff --git a/client/rpc/src/eth_pubsub.rs b/client/rpc/src/eth_pubsub.rs index 8f08934c39..9466983b95 100644 --- a/client/rpc/src/eth_pubsub.rs +++ b/client/rpc/src/eth_pubsub.rs @@ -107,7 +107,7 @@ impl EthSubscriptionResult { parent_hash: block.header.parent_hash, uncles_hash: block.header.ommers_hash, author: block.header.beneficiary, - miner: block.header.beneficiary, + miner: Some(block.header.beneficiary), state_root: block.header.state_root, transactions_root: block.header.transactions_root, receipts_root: block.header.receipts_root, diff --git a/primitives/rpc/src/lib.rs b/primitives/rpc/src/lib.rs index 1ea4db3460..f32b32408a 100644 --- a/primitives/rpc/src/lib.rs +++ b/primitives/rpc/src/lib.rs @@ -232,6 +232,10 @@ sp_api::decl_runtime_apis! { /// Used to determine if gas limit multiplier for non-transactional calls (eth_call/estimateGas) /// is supported. fn gas_limit_multiplier_support(); + /// Return the pending block. + fn pending_block( + xts: Vec<::Extrinsic>, + ) -> (Option, Option>); } #[api_version(2)] diff --git a/template/runtime/src/lib.rs b/template/runtime/src/lib.rs index 947c00df27..fbcba0a165 100644 --- a/template/runtime/src/lib.rs +++ b/template/runtime/src/lib.rs @@ -35,7 +35,7 @@ use frame_support::weights::constants::ParityDbWeight as RuntimeDbWeight; use frame_support::weights::constants::RocksDbWeight as RuntimeDbWeight; use frame_support::{ construct_runtime, parameter_types, - traits::{ConstU32, ConstU8, FindAuthor, OnTimestampSet}, + traits::{ConstU32, ConstU8, FindAuthor, OnFinalize, OnTimestampSet}, weights::{constants::WEIGHT_REF_TIME_PER_MILLIS, ConstantMultiplier, IdentityFee, Weight}, }; use pallet_grandpa::{ @@ -768,6 +768,21 @@ impl_runtime_apis! { } fn gas_limit_multiplier_support() {} + + fn pending_block( + xts: Vec<::Extrinsic>, + ) -> (Option, Option>) { + for ext in xts.into_iter() { + let _ = Executive::apply_extrinsic(ext); + } + + Ethereum::on_finalize(System::block_number() + 1); + + ( + pallet_ethereum::CurrentBlock::::get(), + pallet_ethereum::CurrentTransactionStatuses::::get() + ) + } } impl fp_rpc::ConvertTransactionRuntimeApi for Runtime { diff --git a/ts-tests/tests/test-block.ts b/ts-tests/tests/test-block.ts index 8fd331874c..3ec80a545b 100644 --- a/ts-tests/tests/test-block.ts +++ b/ts-tests/tests/test-block.ts @@ -1,8 +1,8 @@ import { expect } from "chai"; import { step } from "mocha-steps"; -import { BLOCK_TIMESTAMP, ETH_BLOCK_GAS_LIMIT } from "./config"; -import { createAndFinalizeBlock, describeWithFrontier } from "./util"; +import { BLOCK_TIMESTAMP, ETH_BLOCK_GAS_LIMIT, GENESIS_ACCOUNT, GENESIS_ACCOUNT_PRIVATE_KEY } from "./config"; +import { createAndFinalizeBlock, describeWithFrontier, customRequest } from "./util"; describeWithFrontier("Frontier RPC (Block)", (context) => { let previousBlock; @@ -145,3 +145,53 @@ describeWithFrontier("Frontier RPC (Block)", (context) => { expect(block.parentHash).to.equal(previousBlock.hash); }); }); + +describeWithFrontier("Frontier RPC (Pending Block)", (context) => { + const TEST_ACCOUNT = "0x1111111111111111111111111111111111111111"; + + it("should return pending block", async function () { + var nonce = 0; + let sendTransaction = async () => { + const tx = await context.web3.eth.accounts.signTransaction( + { + from: GENESIS_ACCOUNT, + to: TEST_ACCOUNT, + value: "0x200", // Must be higher than ExistentialDeposit + gasPrice: "0x3B9ACA00", + gas: "0x100000", + nonce: nonce, + }, + GENESIS_ACCOUNT_PRIVATE_KEY + ); + nonce = nonce + 1; + return (await customRequest(context.web3, "eth_sendRawTransaction", [tx.rawTransaction])).result; + }; + + // block 1 send 5 transactions + const expectedXtsNumber = 5; + for (var _ of Array(expectedXtsNumber)) { + await sendTransaction(); + } + + // test still invalid future transactions can be safely applied (they are applied, just not overlayed) + nonce = nonce + 100; + await sendTransaction(); + + // do not seal, get pendign block + let pending_transactions = []; + { + const pending = (await customRequest(context.web3, "eth_getBlockByNumber", ["pending", false])).result; + expect(pending.hash).to.be.null; + expect(pending.miner).to.be.null; + expect(pending.nonce).to.be.null; + expect(pending.totalDifficulty).to.be.null; + pending_transactions = pending.transactions; + expect(pending_transactions.length).to.be.eq(expectedXtsNumber); + } + + // seal and compare latest blocks transactions with the previously pending + await createAndFinalizeBlock(context.web3); + const latest_block = await context.web3.eth.getBlock("latest", false); + expect(pending_transactions).to.be.deep.eq(latest_block.transactions); + }); +}); From 433d8f7da8f7565f42c163e51f8943a82d17075c Mon Sep 17 00:00:00 2001 From: Vedhavyas Singareddi Date: Fri, 23 Jun 2023 17:16:52 +0530 Subject: [PATCH 08/12] make rocksdb and paritydb optional. (#1088) Since `sc-cli` and `sc-service` brings rocksdb as default, workspace was adjusted to not use default features. Rest of the changes are ensuring necessary code is made optional. --- Cargo.toml | 4 ++-- client/cli/Cargo.toml | 3 ++- client/db/Cargo.toml | 12 +++++++++--- client/db/src/kv/mod.rs | 2 ++ client/db/src/kv/upgrade.rs | 34 +++++++++++++++++++++------------- client/db/src/kv/utils.rs | 10 +++++----- template/node/Cargo.toml | 2 +- 7 files changed, 42 insertions(+), 25 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index e71fc735cb..db5a09fd8c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -61,7 +61,7 @@ sqlx = "0.7.0-alpha.3" sc-basic-authorship = { version = "0.10.0-dev", git = "https://github.com/paritytech/substrate", branch = "master" } sc-block-builder = { version = "0.10.0-dev", git = "https://github.com/paritytech/substrate", branch = "master" } sc-chain-spec = { version = "4.0.0-dev", git = "https://github.com/paritytech/substrate", branch = "master" } -sc-cli = { version = "0.10.0-dev", git = "https://github.com/paritytech/substrate", branch = "master" } +sc-cli = { version = "0.10.0-dev", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } sc-client-api = { version = "4.0.0-dev", git = "https://github.com/paritytech/substrate", branch = "master" } sc-client-db = { version = "0.10.0-dev", git = "https://github.com/paritytech/substrate", branch = "master" } sc-consensus = { version = "0.10.0-dev", git = "https://github.com/paritytech/substrate", branch = "master" } @@ -75,7 +75,7 @@ sc-network-common = { version = "0.10.0-dev", git = "https://github.com/parityte sc-network-sync = { version = "0.10.0-dev", git = "https://github.com/paritytech/substrate", branch = "master" } sc-rpc = { version = "4.0.0-dev", git = "https://github.com/paritytech/substrate", branch = "master" } sc-rpc-api = { version = "0.10.0-dev", git = "https://github.com/paritytech/substrate", branch = "master" } -sc-service = { version = "0.10.0-dev", git = "https://github.com/paritytech/substrate", branch = "master" } +sc-service = { version = "0.10.0-dev", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } sc-telemetry = { version = "4.0.0-dev", git = "https://github.com/paritytech/substrate", branch = "master" } sc-transaction-pool = { version = "4.0.0-dev", git = "https://github.com/paritytech/substrate", branch = "master" } sc-transaction-pool-api = { version = "4.0.0-dev", git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/client/cli/Cargo.toml b/client/cli/Cargo.toml index a5a0689d72..69ea420852 100644 --- a/client/cli/Cargo.toml +++ b/client/cli/Cargo.toml @@ -31,9 +31,10 @@ scale-codec = { package = "parity-scale-codec", workspace = true } tempfile = "3.3.0" # Substrate sc-block-builder = { workspace = true } -sc-client-db = { workspace = true } +sc-client-db = { workspace = true, features = ["rocksdb"] } sp-consensus = { workspace = true } sp-io = { workspace = true } substrate-test-runtime-client = { workspace = true } # Frontier +fc-db = { workspace = true, features = ["rocksdb"] } frontier-template-runtime = { workspace = true, features = ["default"] } diff --git a/client/db/Cargo.toml b/client/db/Cargo.toml index 14d62a9ab1..92d489e089 100644 --- a/client/db/Cargo.toml +++ b/client/db/Cargo.toml @@ -19,12 +19,12 @@ log = "0.4.17" parity-db = { workspace = true, optional = true } parking_lot = "0.12.1" scale-codec = { package = "parity-scale-codec", workspace = true } -smallvec = "1.10" +smallvec = { version = "1.10", optional = true } sqlx = { workspace = true, features = ["runtime-tokio-native-tls", "sqlite"] } tokio = { version = "1.19", features = ["macros", "sync"] } # Substrate sc-client-api = { workspace = true } -sc-client-db = { workspace = true, features = ["rocksdb"] } +sc-client-db = { workspace = true } sp-api = { workspace = true } sp-blockchain = { workspace = true } sp-core = { workspace = true } @@ -38,7 +38,13 @@ fp-rpc = { workspace = true, features = ["default"] } fp-storage = { workspace = true, features = ["default"] } [features] -default = ["kvdb-rocksdb", "parity-db"] +default = ["parity-db"] +parity-db = ["dep:parity-db"] +rocksdb = [ + "kvdb-rocksdb", + "sc-client-db/rocksdb", + "smallvec", +] [dev-dependencies] maplit = "1.0.2" diff --git a/client/db/src/kv/mod.rs b/client/db/src/kv/mod.rs index 03b00f01c0..92f377f5d7 100644 --- a/client/db/src/kv/mod.rs +++ b/client/db/src/kv/mod.rs @@ -114,10 +114,12 @@ impl Backend { client, &DatabaseSettings { source: match database { + #[cfg(feature = "rocksdb")] DatabaseSource::RocksDb { .. } => DatabaseSource::RocksDb { path: frontier_database_dir(db_config_dir, "db"), cache_size: 0, }, + #[cfg(feature = "parity-db")] DatabaseSource::ParityDb { .. } => DatabaseSource::ParityDb { path: frontier_database_dir(db_config_dir, "paritydb"), }, diff --git a/client/db/src/kv/upgrade.rs b/client/db/src/kv/upgrade.rs index 20ab19befd..6243f53481 100644 --- a/client/db/src/kv/upgrade.rs +++ b/client/db/src/kv/upgrade.rs @@ -100,13 +100,11 @@ pub(crate) fn upgrade_db>( match db_version { 0 => return Err(UpgradeError::UnsupportedVersion(db_version)), 1 => { - let summary = match source { - DatabaseSource::ParityDb { .. } => { - migrate_1_to_2_parity_db::(client, db_path)? - } - DatabaseSource::RocksDb { .. } => { - migrate_1_to_2_rocks_db::(client, db_path)? - } + let summary: UpgradeVersion1To2Summary = match source { + #[cfg(feature = "parity-db")] + DatabaseSource::ParityDb { .. } => migrate_1_to_2_parity_db::(client, db_path)?, + #[cfg(feature = "rocksdb")] + DatabaseSource::RocksDb { .. } => migrate_1_to_2_rocks_db::(client, db_path)?, _ => panic!("DatabaseSource required for upgrade ParityDb | RocksDb"), }; if !summary.error.is_empty() { @@ -165,6 +163,7 @@ fn version_file_path(path: &Path) -> PathBuf { /// Migration from version1 to version2: /// - The format of the Ethereum<>Substrate block mapping changed to support equivocation. /// - Migrating schema from One-to-one to One-to-many (EthHash: Vec) relationship. +#[cfg(feature = "rocksdb")] pub(crate) fn migrate_1_to_2_rocks_db>( client: Arc, db_path: &Path, @@ -246,6 +245,7 @@ pub(crate) fn migrate_1_to_2_rocks_db>( Ok(res) } +#[cfg(feature = "parity-db")] pub(crate) fn migrate_1_to_2_parity_db>( client: Arc, db_path: &Path, @@ -333,6 +333,7 @@ mod tests { sync::Arc, }; + use crate::kv::DatabaseSettings; use scale_codec::Encode; use sp_blockchain::HeaderBackend; use sp_core::H256; @@ -352,23 +353,29 @@ mod tests { Ok(Arc::new(crate::kv::Backend::::new(client, setting)?)) } + #[cfg_attr(not(any(feature = "rocksdb", feature = "parity-db")), ignore)] #[test] fn upgrade_1_to_2_works() { - let tmp_1 = tempdir().expect("create a temporary directory"); - let tmp_2 = tempdir().expect("create a temporary directory"); - - let settings = vec![ + let settings: Vec = vec![ // Rocks db + #[cfg(feature = "rocksdb")] crate::kv::DatabaseSettings { source: sc_client_db::DatabaseSource::RocksDb { - path: tmp_1.path().to_owned(), + path: tempdir() + .expect("create a temporary directory") + .path() + .to_owned(), cache_size: 0, }, }, // Parity db + #[cfg(feature = "parity-db")] crate::kv::DatabaseSettings { source: sc_client_db::DatabaseSource::ParityDb { - path: tmp_2.path().to_owned(), + path: tempdir() + .expect("create a temporary directory") + .path() + .to_owned(), }, }, ]; @@ -495,6 +502,7 @@ mod tests { } } + #[cfg(feature = "rocksdb")] #[test] fn create_db_with_current_version_works() { let tmp = tempdir().expect("create a temporary directory"); diff --git a/client/db/src/kv/utils.rs b/client/db/src/kv/utils.rs index a6c4da2dfb..ff241b9cfb 100644 --- a/client/db/src/kv/utils.rs +++ b/client/db/src/kv/utils.rs @@ -29,9 +29,9 @@ pub fn open_database>( config: &DatabaseSettings, ) -> Result>, String> { let db: Arc> = match &config.source { - DatabaseSource::ParityDb { path } => { - open_parity_db::(client, path, &config.source)? - } + #[cfg(feature = "parity-db")] + DatabaseSource::ParityDb { path } => open_parity_db::(client, path, &config.source)?, + #[cfg(feature = "rocksdb")] DatabaseSource::RocksDb { path, .. } => { open_kvdb_rocksdb::(client, path, true, &config.source)? } @@ -51,7 +51,7 @@ pub fn open_database>( Ok(db) } -#[cfg(feature = "kvdb-rocksdb")] +#[cfg(feature = "rocksdb")] fn open_kvdb_rocksdb>( client: Arc, path: &Path, @@ -75,7 +75,7 @@ fn open_kvdb_rocksdb>( return Ok(sp_database::as_database(db)); } -#[cfg(not(feature = "kvdb-rocksdb"))] +#[cfg(not(feature = "rocksdb"))] fn open_kvdb_rocksdb>( _client: Arc, _path: &Path, diff --git a/template/node/Cargo.toml b/template/node/Cargo.toml index 95d238afc1..d6e8d53589 100644 --- a/template/node/Cargo.toml +++ b/template/node/Cargo.toml @@ -38,7 +38,7 @@ sc-network-common = { workspace = true } sc-network-sync = { workspace = true } sc-rpc = { workspace = true } sc-rpc-api = { workspace = true } -sc-service = { workspace = true } +sc-service = { workspace = true, features = ["default"] } sc-telemetry = { workspace = true } sc-transaction-pool = { workspace = true } sc-transaction-pool-api = { workspace = true } From 406c6c765961e820085a0444efbcb8b3004d3b8a Mon Sep 17 00:00:00 2001 From: bear Date: Sun, 25 Jun 2023 14:49:50 +0800 Subject: [PATCH 09/12] Add `txpool` RPC and impl `new_pending_transaction_filter` (#1073) * Add new filter type * Add txpool client side * Fix node compile * Fix rpc * Dev filter_changes * Add feature and format * Use sp_core::hashing * Fix clippy * Format code * Add pending transaction polling ts tests * Add txpool ts-test * Fix clippy CI * Fix review * Remove `TxPoolRuntimeApi` * Clean trait bound * Clean trait bound * Remove feature `txpool` --- Cargo.lock | 1 + client/rpc-core/src/lib.rs | 2 + client/rpc-core/src/txpool.rs | 37 ++++++ client/rpc-core/src/types/filter.rs | 3 +- client/rpc-core/src/types/mod.rs | 2 + client/rpc-core/src/types/txpool.rs | 170 +++++++++++++++++++++++++++ client/rpc/Cargo.toml | 1 + client/rpc/src/eth/filter.rs | 77 +++++++++--- client/rpc/src/lib.rs | 6 +- client/rpc/src/txpool.rs | 176 ++++++++++++++++++++++++++++ primitives/rpc/src/lib.rs | 6 + template/node/src/eth.rs | 6 +- template/node/src/rpc/eth.rs | 11 +- ts-tests/tests/test-filter-api.ts | 39 ++++-- ts-tests/tests/txpool.ts | 59 ++++++++++ 15 files changed, 565 insertions(+), 31 deletions(-) create mode 100644 client/rpc-core/src/txpool.rs create mode 100644 client/rpc-core/src/types/txpool.rs create mode 100644 client/rpc/src/txpool.rs create mode 100644 ts-tests/tests/txpool.ts diff --git a/Cargo.lock b/Cargo.lock index fef979bbe4..003e986fa6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2224,6 +2224,7 @@ dependencies = [ "sc-transaction-pool", "sc-transaction-pool-api", "sc-utils", + "serde", "sp-api", "sp-block-builder", "sp-blockchain", diff --git a/client/rpc-core/src/lib.rs b/client/rpc-core/src/lib.rs index 701c095e2f..c5728a8e7e 100644 --- a/client/rpc-core/src/lib.rs +++ b/client/rpc-core/src/lib.rs @@ -23,11 +23,13 @@ pub mod types; mod eth; mod eth_pubsub; mod net; +mod txpool; mod web3; pub use self::{ eth::{EthApiServer, EthFilterApiServer}, eth_pubsub::EthPubSubApiServer, net::NetApiServer, + txpool::TxPoolApiServer, web3::Web3ApiServer, }; diff --git a/client/rpc-core/src/txpool.rs b/client/rpc-core/src/txpool.rs new file mode 100644 index 0000000000..3cea401121 --- /dev/null +++ b/client/rpc-core/src/txpool.rs @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 +// This file is part of Frontier. +// +// Copyright (c) 2015-2022 Parity Technologies (UK) Ltd. +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! tx pool rpc interface + +use ethereum_types::U256; +use jsonrpsee::{core::RpcResult, proc_macros::rpc}; +// Frontier +use crate::types::*; + +/// TxPool rpc interface +#[rpc(server)] +pub trait TxPoolApi { + #[method(name = "txpool_content")] + fn content(&self) -> RpcResult>>; + + #[method(name = "txpool_inspect")] + fn inspect(&self) -> RpcResult>>; + + #[method(name = "txpool_status")] + fn status(&self) -> RpcResult>; +} diff --git a/client/rpc-core/src/types/filter.rs b/client/rpc-core/src/types/filter.rs index 6fa1adfc46..4d7dd09a9a 100644 --- a/client/rpc-core/src/types/filter.rs +++ b/client/rpc-core/src/types/filter.rs @@ -17,7 +17,7 @@ // along with this program. If not, see . use std::{ - collections::BTreeMap, + collections::{BTreeMap, HashSet}, sync::{Arc, Mutex}, }; @@ -460,6 +460,7 @@ pub struct FilterPoolItem { pub last_poll: BlockNumber, pub filter_type: FilterType, pub at_block: u64, + pub pending_transaction_hashes: HashSet, } /// On-memory stored filters created through the `eth_newFilter` RPC. diff --git a/client/rpc-core/src/types/mod.rs b/client/rpc-core/src/types/mod.rs index 4384415428..98b6e5b417 100644 --- a/client/rpc-core/src/types/mod.rs +++ b/client/rpc-core/src/types/mod.rs @@ -31,6 +31,7 @@ mod receipt; mod sync; mod transaction; mod transaction_request; +mod txpool; mod work; pub mod pubsub; @@ -55,5 +56,6 @@ pub use self::{ }, transaction::{LocalTransactionStatus, RichRawTransaction, Transaction}, transaction_request::{TransactionMessage, TransactionRequest}, + txpool::{Get, Summary, TransactionMap, TxPoolResult, TxPoolTransaction}, work::Work, }; diff --git a/client/rpc-core/src/types/txpool.rs b/client/rpc-core/src/types/txpool.rs new file mode 100644 index 0000000000..ed70f504ec --- /dev/null +++ b/client/rpc-core/src/types/txpool.rs @@ -0,0 +1,170 @@ +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 +// This file is part of Frontier. +// +// Copyright (c) 2015-2022 Parity Technologies (UK) Ltd. +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::collections::HashMap; + +use ethereum::{TransactionAction, TransactionV2 as EthereumTransaction}; +use ethereum_types::{H160, H256, U256}; +use serde::{Serialize, Serializer}; +// Frontier +use crate::types::Bytes; + +pub type TransactionMap = HashMap>; + +pub trait Get { + fn get(hash: H256, from_address: H160, txn: &EthereumTransaction) -> Self; +} + +#[derive(Debug, Serialize)] +pub struct TxPoolResult { + pub pending: T, + pub queued: T, +} + +#[derive(Clone, Debug)] +pub struct Summary { + pub to: Option, + pub value: U256, + pub gas: U256, + pub gas_price: U256, +} + +impl Serialize for Summary { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + let res = format!( + "0x{:x}: {} wei + {} gas x {} wei", + self.to.unwrap_or_default(), + self.value, + self.gas, + self.gas_price + ); + serializer.serialize_str(&res) + } +} + +impl Get for Summary { + fn get(_hash: H256, _from_address: H160, txn: &EthereumTransaction) -> Self { + let (action, value, gas_price, gas_limit) = match txn { + EthereumTransaction::Legacy(t) => (t.action, t.value, t.gas_price, t.gas_limit), + EthereumTransaction::EIP2930(t) => (t.action, t.value, t.gas_price, t.gas_limit), + EthereumTransaction::EIP1559(t) => (t.action, t.value, t.max_fee_per_gas, t.gas_limit), + }; + Self { + to: match action { + TransactionAction::Call(to) => Some(to), + _ => None, + }, + value, + gas_price, + gas: gas_limit, + } + } +} + +#[derive(Debug, Default, Clone, PartialEq, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct TxPoolTransaction { + /// Hash + pub hash: H256, + /// Nonce + pub nonce: U256, + /// Block hash + #[serde(serialize_with = "block_hash_serialize")] + pub block_hash: Option, + /// Block number + pub block_number: Option, + /// Sender + pub from: H160, + /// Recipient + #[serde(serialize_with = "to_serialize")] + pub to: Option, + /// Transfered value + pub value: U256, + /// Gas Price + pub gas_price: U256, + /// Gas + pub gas: U256, + /// Data + pub input: Bytes, + /// Transaction Index + pub transaction_index: Option, +} + +fn block_hash_serialize(hash: &Option, serializer: S) -> Result +where + S: Serializer, +{ + serializer.serialize_str(&format!("0x{:x}", hash.unwrap_or_default())) +} + +fn to_serialize(hash: &Option, serializer: S) -> Result +where + S: Serializer, +{ + serializer.serialize_str(&format!("0x{:x}", hash.unwrap_or_default())) +} + +impl Get for TxPoolTransaction { + fn get(hash: H256, from_address: H160, txn: &EthereumTransaction) -> Self { + let (nonce, action, value, gas_price, gas_limit, input) = match txn { + EthereumTransaction::Legacy(t) => ( + t.nonce, + t.action, + t.value, + t.gas_price, + t.gas_limit, + t.input.clone(), + ), + EthereumTransaction::EIP2930(t) => ( + t.nonce, + t.action, + t.value, + t.gas_price, + t.gas_limit, + t.input.clone(), + ), + EthereumTransaction::EIP1559(t) => ( + t.nonce, + t.action, + t.value, + t.max_fee_per_gas, + t.gas_limit, + t.input.clone(), + ), + }; + Self { + hash, + nonce, + block_hash: None, + block_number: None, + from: from_address, + to: match action { + TransactionAction::Call(to) => Some(to), + _ => None, + }, + value, + gas_price, + gas: gas_limit, + input: Bytes(input), + transaction_index: None, + } + } +} diff --git a/client/rpc/Cargo.toml b/client/rpc/Cargo.toml index 820bdbb4ed..000f065a44 100644 --- a/client/rpc/Cargo.toml +++ b/client/rpc/Cargo.toml @@ -24,6 +24,7 @@ prometheus = { version = "0.13.1", default-features = false } rand = "0.8" rlp = { workspace = true } scale-codec = { package = "parity-scale-codec", workspace = true } +serde = { workspace = true } tokio = { version = "1.24", features = ["sync"] } # Substrate diff --git a/client/rpc/src/eth/filter.rs b/client/rpc/src/eth/filter.rs index fe3f1e2502..c0dd08ab8f 100644 --- a/client/rpc/src/eth/filter.rs +++ b/client/rpc/src/eth/filter.rs @@ -16,13 +16,14 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::{marker::PhantomData, sync::Arc, time}; +use std::{collections::HashSet, marker::PhantomData, sync::Arc, time}; use ethereum::BlockV2 as EthereumBlock; use ethereum_types::{H256, U256}; use jsonrpsee::core::{async_trait, RpcResult}; // Substrate use sc_client_api::backend::{Backend, StorageProvider}; +use sc_transaction_pool::ChainApi; use sp_api::ProvideRuntimeApi; use sp_blockchain::HeaderBackend; use sp_core::hashing::keccak_256; @@ -31,14 +32,14 @@ use sp_runtime::{ traits::{Block as BlockT, NumberFor, One, Saturating, UniqueSaturatedInto}, }; // Frontier +use crate::{eth::cache::EthBlockDataCacheTask, frontier_backend_client, internal_err, TxPool}; use fc_rpc_core::{types::*, EthFilterApiServer}; use fp_rpc::{EthereumRuntimeRPCApi, TransactionStatus}; -use crate::{eth::cache::EthBlockDataCacheTask, frontier_backend_client, internal_err}; - -pub struct EthFilter { +pub struct EthFilter { client: Arc, backend: Arc + Send + Sync>, + tx_pool: TxPool, filter_pool: FilterPool, max_stored_filters: usize, max_past_logs: u32, @@ -46,10 +47,11 @@ pub struct EthFilter { _marker: PhantomData, } -impl EthFilter { +impl EthFilter { pub fn new( client: Arc, backend: Arc + Send + Sync>, + tx_pool: TxPool, filter_pool: FilterPool, max_stored_filters: usize, max_past_logs: u32, @@ -58,6 +60,7 @@ impl EthFilter { Self { client, backend, + tx_pool, filter_pool, max_stored_filters, max_past_logs, @@ -67,10 +70,12 @@ impl EthFilter { } } -impl EthFilter +impl EthFilter where + A: ChainApi + 'static, B: BlockT, - C: HeaderBackend, + C: HeaderBackend + ProvideRuntimeApi + 'static, + C::Api: EthereumRuntimeRPCApi, { fn create_filter(&self, filter_type: FilterType) -> RpcResult { let block_number = @@ -90,6 +95,16 @@ where Some((k, _)) => *k, None => U256::zero(), }; + let pending_transaction_hashes = if let FilterType::PendingTransaction = filter_type { + self.tx_pool + .tx_pool_response()? + .ready + .into_iter() + .map(|tx| tx.hash()) + .collect() + } else { + HashSet::new() + }; // Assume `max_stored_filters` is always < U256::max. let key = last_key.checked_add(U256::one()).unwrap(); locked.insert( @@ -98,6 +113,7 @@ where last_poll: BlockNumber::Num(block_number), filter_type, at_block: block_number, + pending_transaction_hashes, }, ); Ok(key) @@ -109,12 +125,12 @@ where } #[async_trait] -impl EthFilterApiServer for EthFilter +impl EthFilterApiServer for EthFilter where + A: ChainApi + 'static, B: BlockT, - C: ProvideRuntimeApi, + C: HeaderBackend + ProvideRuntimeApi + StorageProvider + 'static, C::Api: EthereumRuntimeRPCApi, - C: HeaderBackend + StorageProvider + 'static, BE: Backend + 'static, { fn new_filter(&self, filter: Filter) -> RpcResult { @@ -126,7 +142,7 @@ where } fn new_pending_transaction_filter(&self) -> RpcResult { - Err(internal_err("Method not available.")) + self.create_filter(FilterType::PendingTransaction) } async fn filter_changes(&self, index: Index) -> RpcResult { @@ -143,6 +159,9 @@ where last: u64, next: u64, }, + PendingTransaction { + new_hashes: Vec, + }, Log { filter: Filter, from_number: NumberFor, @@ -171,11 +190,40 @@ where last_poll: BlockNumber::Num(next), filter_type: pool_item.filter_type.clone(), at_block: pool_item.at_block, + pending_transaction_hashes: HashSet::new(), }, ); FuturePath::::Block { last, next } } + FilterType::PendingTransaction => { + let previous_hashes = pool_item.pending_transaction_hashes; + let current_hashes: HashSet = self + .tx_pool + .tx_pool_response()? + .ready + .into_iter() + .map(|tx| tx.hash()) + .collect(); + + // Update filter `last_poll`. + locked.insert( + key, + FilterPoolItem { + last_poll: BlockNumber::Num(block_number + 1), + filter_type: pool_item.filter_type.clone(), + at_block: pool_item.at_block, + pending_transaction_hashes: current_hashes.clone(), + }, + ); + + let mew_hashes = current_hashes + .difference(&previous_hashes) + .collect::>(); + FuturePath::PendingTransaction { + new_hashes: mew_hashes.into_iter().copied().collect(), + } + } // For each event since last poll, get a vector of ethereum logs. FilterType::Log(filter) => { // Update filter `last_poll`. @@ -185,6 +233,7 @@ where last_poll: BlockNumber::Num(block_number + 1), filter_type: pool_item.filter_type.clone(), at_block: pool_item.at_block, + pending_transaction_hashes: HashSet::new(), }, ); @@ -222,8 +271,6 @@ where current_number, } } - // Should never reach here. - _ => FuturePath::Error(internal_err("Method not available.")), } } else { FuturePath::Error(internal_err(format!("Filter id {:?} does not exist.", key))) @@ -257,6 +304,7 @@ where } Ok(FilterChanges::Hashes(ethereum_hashes)) } + FuturePath::PendingTransaction { new_hashes } => Ok(FilterChanges::Hashes(new_hashes)), FuturePath::Log { filter, from_number, @@ -472,9 +520,8 @@ async fn filter_range_logs_indexed( ) -> RpcResult<()> where B: BlockT, - C: ProvideRuntimeApi, + C: HeaderBackend + ProvideRuntimeApi + StorageProvider + 'static, C::Api: EthereumRuntimeRPCApi, - C: HeaderBackend + StorageProvider + 'static, BE: Backend + 'static, { use std::time::Instant; diff --git a/client/rpc/src/lib.rs b/client/rpc/src/lib.rs index 2cc91cc820..c9e78b379c 100644 --- a/client/rpc/src/lib.rs +++ b/client/rpc/src/lib.rs @@ -30,6 +30,7 @@ mod eth; mod eth_pubsub; mod net; mod signer; +mod txpool; mod web3; pub use self::{ @@ -37,11 +38,14 @@ pub use self::{ eth_pubsub::{EthPubSub, EthereumSubIdProvider}, net::Net, signer::{EthDevSigner, EthSigner}, + txpool::TxPool, web3::Web3, }; + pub use ethereum::TransactionV2 as EthereumTransaction; pub use fc_rpc_core::{ - EthApiServer, EthFilterApiServer, EthPubSubApiServer, NetApiServer, Web3ApiServer, + EthApiServer, EthFilterApiServer, EthPubSubApiServer, NetApiServer, TxPoolApiServer, + Web3ApiServer, }; pub use fc_storage::{ OverrideHandle, RuntimeApiStorageOverride, SchemaV1Override, SchemaV2Override, diff --git a/client/rpc/src/txpool.rs b/client/rpc/src/txpool.rs new file mode 100644 index 0000000000..ab8e7c54a3 --- /dev/null +++ b/client/rpc/src/txpool.rs @@ -0,0 +1,176 @@ +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 +// This file is part of Frontier. +// +// Copyright (c) 2020-2022 Parity Technologies (UK) Ltd. +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::{collections::HashMap, marker::PhantomData, sync::Arc}; + +use ethereum::TransactionV2; +use ethereum_types::{H160, H256, U256}; +use jsonrpsee::core::RpcResult; +use serde::Serialize; +// substrate +use sc_transaction_pool::{ChainApi, Pool}; +use sc_transaction_pool_api::InPoolTransaction; +use sp_api::ProvideRuntimeApi; +use sp_blockchain::HeaderBackend; +use sp_core::hashing::keccak_256; +use sp_runtime::traits::Block as BlockT; +// Frontier +use crate::{internal_err, public_key}; +use fc_rpc_core::{ + types::{Get, Summary, TransactionMap, TxPoolResult, TxPoolTransaction}, + TxPoolApiServer, +}; +use fp_rpc::{EthereumRuntimeRPCApi, TxPoolResponse}; + +pub struct TxPool { + client: Arc, + graph: Arc>, + _marker: PhantomData, +} + +impl Clone for TxPool { + fn clone(&self) -> Self { + Self { + client: self.client.clone(), + graph: self.graph.clone(), + _marker: PhantomData, + } + } +} + +impl TxPool +where + A: ChainApi + 'static, + B: BlockT + Send + Sync + 'static, + C: Send + Sync + 'static, + C: HeaderBackend + ProvideRuntimeApi, + C::Api: EthereumRuntimeRPCApi, +{ + /// Use the transaction graph interface to get the extrinsics currently in the ready and future + /// queues. + fn map_build(&self) -> RpcResult>> + where + T: Get + Serialize, + { + // Get the pending and queued ethereum transactions. + let ethereum_txns = self.tx_pool_response()?; + + // Build the T response. + let mut pending = TransactionMap::::new(); + for txn in ethereum_txns.ready.iter() { + let hash = txn.hash(); + let nonce = match txn { + TransactionV2::Legacy(t) => t.nonce, + TransactionV2::EIP2930(t) => t.nonce, + TransactionV2::EIP1559(t) => t.nonce, + }; + let from_address = match public_key(txn) { + Ok(pk) => H160::from(H256::from_slice(keccak_256(&pk).as_slice())), + Err(_e) => H160::default(), + }; + pending + .entry(from_address) + .or_insert_with(HashMap::new) + .insert(nonce, T::get(hash, from_address, txn)); + } + let mut queued = TransactionMap::::new(); + for txn in ethereum_txns.future.iter() { + let hash = txn.hash(); + let nonce = match txn { + TransactionV2::Legacy(t) => t.nonce, + TransactionV2::EIP2930(t) => t.nonce, + TransactionV2::EIP1559(t) => t.nonce, + }; + let from_address = match public_key(txn) { + Ok(pk) => H160::from(H256::from_slice(keccak_256(&pk).as_slice())), + Err(_e) => H160::default(), + }; + queued + .entry(from_address) + .or_insert_with(HashMap::new) + .insert(nonce, T::get(hash, from_address, txn)); + } + Ok(TxPoolResult { pending, queued }) + } + + pub(crate) fn tx_pool_response(&self) -> RpcResult { + // Collect transactions in the ready validated pool. + let txs_ready = self + .graph + .validated_pool() + .ready() + .map(|in_pool_tx| in_pool_tx.data().clone()) + .collect(); + + // Collect transactions in the future validated pool. + let txs_future = self + .graph + .validated_pool() + .futures() + .iter() + .map(|(_hash, extrinsic)| extrinsic.clone()) + .collect(); + + // Use the runtime to match the (here) opaque extrinsics against ethereum transactions. + let best_block = self.client.info().best_hash; + let api = self.client.runtime_api(); + let ready = api + .extrinsic_filter(best_block, txs_ready) + .map_err(|err| internal_err(format!("fetch ready transactions failed: {:?}", err)))?; + let future = api + .extrinsic_filter(best_block, txs_future) + .map_err(|err| internal_err(format!("fetch future transactions failed: {:?}", err)))?; + + Ok(TxPoolResponse { ready, future }) + } +} + +impl TxPool { + pub fn new(client: Arc, graph: Arc>) -> Self { + Self { + client, + graph, + _marker: PhantomData, + } + } +} + +impl TxPoolApiServer for TxPool +where + A: ChainApi + 'static, + B: BlockT + Send + Sync + 'static, + C: Send + Sync + 'static, + C: HeaderBackend + ProvideRuntimeApi, + C::Api: EthereumRuntimeRPCApi, +{ + fn content(&self) -> RpcResult>> { + self.map_build::() + } + + fn inspect(&self) -> RpcResult>> { + self.map_build::() + } + + fn status(&self) -> RpcResult> { + let status = self.graph.validated_pool().status(); + Ok(TxPoolResult { + pending: U256::from(status.ready), + queued: U256::from(status.future), + }) + } +} diff --git a/primitives/rpc/src/lib.rs b/primitives/rpc/src/lib.rs index f32b32408a..045402fdf5 100644 --- a/primitives/rpc/src/lib.rs +++ b/primitives/rpc/src/lib.rs @@ -40,6 +40,12 @@ pub struct TransactionStatus { pub logs_bloom: Bloom, } +#[derive(Eq, PartialEq, Clone, Encode, Decode, sp_runtime::RuntimeDebug)] +pub struct TxPoolResponse { + pub ready: Vec, + pub future: Vec, +} + pub trait RuntimeStorageOverride: Send + Sync { fn is_enabled() -> bool; diff --git a/template/node/src/eth.rs b/template/node/src/eth.rs index 8f28a4b637..765ba6c3ae 100644 --- a/template/node/src/eth.rs +++ b/template/node/src/eth.rs @@ -123,8 +123,8 @@ pub fn new_frontier_partial( /// A set of APIs that ethereum-compatible runtimes must implement. pub trait EthCompatRuntimeApiCollection: sp_api::ApiExt - + fp_rpc::EthereumRuntimeRPCApi + fp_rpc::ConvertTransactionRuntimeApi + + fp_rpc::EthereumRuntimeRPCApi where >::StateBackend: sp_api::StateBackend, { @@ -133,8 +133,8 @@ where impl EthCompatRuntimeApiCollection for Api where Api: sp_api::ApiExt - + fp_rpc::EthereumRuntimeRPCApi - + fp_rpc::ConvertTransactionRuntimeApi, + + fp_rpc::ConvertTransactionRuntimeApi + + fp_rpc::EthereumRuntimeRPCApi, >::StateBackend: sp_api::StateBackend, { } diff --git a/template/node/src/rpc/eth.rs b/template/node/src/rpc/eth.rs index 67db9a8161..2117cc6ba7 100644 --- a/template/node/src/rpc/eth.rs +++ b/template/node/src/rpc/eth.rs @@ -17,7 +17,7 @@ use sp_blockchain::{Error as BlockChainError, HeaderBackend, HeaderMetadata}; use sp_core::H256; use sp_runtime::traits::Block as BlockT; // Frontier -pub use fc_rpc::{EthBlockDataCacheTask, EthConfig, OverrideHandle, StorageOverride}; +pub use fc_rpc::{EthBlockDataCacheTask, EthConfig, OverrideHandle, StorageOverride, TxPool}; pub use fc_rpc_core::types::{FeeHistoryCache, FeeHistoryCacheLimit, FilterPool}; pub use fc_storage::overrides_handle; use fp_rpc::{ConvertTransaction, ConvertTransactionRuntimeApi, EthereumRuntimeRPCApi}; @@ -99,7 +99,7 @@ pub fn create_eth>( where B: BlockT, C: CallApiAt + ProvideRuntimeApi, - C::Api: BlockBuilderApi + EthereumRuntimeRPCApi + ConvertTransactionRuntimeApi, + C::Api: BlockBuilderApi + ConvertTransactionRuntimeApi + EthereumRuntimeRPCApi, C: BlockchainEvents + 'static, C: HeaderBackend + HeaderMetadata + StorageProvider, BE: Backend + 'static, @@ -109,7 +109,7 @@ where { use fc_rpc::{ Eth, EthApiServer, EthDevSigner, EthFilter, EthFilterApiServer, EthPubSub, - EthPubSubApiServer, EthSigner, Net, NetApiServer, Web3, Web3ApiServer, + EthPubSubApiServer, EthSigner, Net, NetApiServer, TxPoolApiServer, Web3, Web3ApiServer, }; let EthDeps { @@ -141,7 +141,7 @@ where Eth::new( client.clone(), pool.clone(), - graph, + graph.clone(), converter, sync.clone(), signers, @@ -158,11 +158,13 @@ where .into_rpc(), )?; + let tx_pool = TxPool::new(client.clone(), graph); if let Some(filter_pool) = filter_pool { io.merge( EthFilter::new( client.clone(), frontier_backend, + tx_pool.clone(), filter_pool, 500_usize, // max stored filters max_past_logs, @@ -195,6 +197,7 @@ where )?; io.merge(Web3::new(client).into_rpc())?; + io.merge(tx_pool.into_rpc())?; Ok(io) } diff --git a/ts-tests/tests/test-filter-api.ts b/ts-tests/tests/test-filter-api.ts index f321a83621..f39387bd81 100644 --- a/ts-tests/tests/test-filter-api.ts +++ b/ts-tests/tests/test-filter-api.ts @@ -8,6 +8,7 @@ describeWithFrontier("Frontier RPC (EthFilterApi)", (context) => { const TEST_CONTRACT_BYTECODE = "0x608060405234801561001057600080fd5b50610041337fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff61004660201b60201c565b610291565b600073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff1614156100e9576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252601f8152602001807f45524332303a206d696e7420746f20746865207a65726f20616464726573730081525060200191505060405180910390fd5b6101028160025461020960201b610c7c1790919060201c565b60028190555061015d816000808573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000205461020960201b610c7c1790919060201c565b6000808473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020819055508173ffffffffffffffffffffffffffffffffffffffff16600073ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef836040518082815260200191505060405180910390a35050565b600080828401905083811015610287576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252601b8152602001807f536166654d6174683a206164646974696f6e206f766572666c6f77000000000081525060200191505060405180910390fd5b8091505092915050565b610e3a806102a06000396000f3fe608060405234801561001057600080fd5b50600436106100885760003560e01c806370a082311161005b57806370a08231146101fd578063a457c2d714610255578063a9059cbb146102bb578063dd62ed3e1461032157610088565b8063095ea7b31461008d57806318160ddd146100f357806323b872dd146101115780633950935114610197575b600080fd5b6100d9600480360360408110156100a357600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff16906020019092919080359060200190929190505050610399565b604051808215151515815260200191505060405180910390f35b6100fb6103b7565b6040518082815260200191505060405180910390f35b61017d6004803603606081101561012757600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190803573ffffffffffffffffffffffffffffffffffffffff169060200190929190803590602001909291905050506103c1565b604051808215151515815260200191505060405180910390f35b6101e3600480360360408110156101ad57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff1690602001909291908035906020019092919050505061049a565b604051808215151515815260200191505060405180910390f35b61023f6004803603602081101561021357600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff16906020019092919050505061054d565b6040518082815260200191505060405180910390f35b6102a16004803603604081101561026b57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff16906020019092919080359060200190929190505050610595565b604051808215151515815260200191505060405180910390f35b610307600480360360408110156102d157600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff16906020019092919080359060200190929190505050610662565b604051808215151515815260200191505060405180910390f35b6103836004803603604081101561033757600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050610680565b6040518082815260200191505060405180910390f35b60006103ad6103a6610707565b848461070f565b6001905092915050565b6000600254905090565b60006103ce848484610906565b61048f846103da610707565b61048a85604051806060016040528060288152602001610d7060289139600160008b73ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000206000610440610707565b73ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054610bbc9092919063ffffffff16565b61070f565b600190509392505050565b60006105436104a7610707565b8461053e85600160006104b8610707565b73ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008973ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054610c7c90919063ffffffff16565b61070f565b6001905092915050565b60008060008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020549050919050565b60006106586105a2610707565b8461065385604051806060016040528060258152602001610de160259139600160006105cc610707565b73ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008a73ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054610bbc9092919063ffffffff16565b61070f565b6001905092915050565b600061067661066f610707565b8484610906565b6001905092915050565b6000600160008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054905092915050565b600033905090565b600073ffffffffffffffffffffffffffffffffffffffff168373ffffffffffffffffffffffffffffffffffffffff161415610795576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401808060200182810382526024815260200180610dbd6024913960400191505060405180910390fd5b600073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff16141561081b576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401808060200182810382526022815260200180610d286022913960400191505060405180910390fd5b80600160008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020819055508173ffffffffffffffffffffffffffffffffffffffff168373ffffffffffffffffffffffffffffffffffffffff167f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925836040518082815260200191505060405180910390a3505050565b600073ffffffffffffffffffffffffffffffffffffffff168373ffffffffffffffffffffffffffffffffffffffff16141561098c576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401808060200182810382526025815260200180610d986025913960400191505060405180910390fd5b600073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff161415610a12576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401808060200182810382526023815260200180610d056023913960400191505060405180910390fd5b610a7d81604051806060016040528060268152602001610d4a602691396000808773ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054610bbc9092919063ffffffff16565b6000808573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002081905550610b10816000808573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054610c7c90919063ffffffff16565b6000808473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020819055508173ffffffffffffffffffffffffffffffffffffffff168373ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef836040518082815260200191505060405180910390a3505050565b6000838311158290610c69576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825283818151815260200191508051906020019080838360005b83811015610c2e578082015181840152602081019050610c13565b50505050905090810190601f168015610c5b5780820380516001836020036101000a031916815260200191505b509250505060405180910390fd5b5060008385039050809150509392505050565b600080828401905083811015610cfa576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252601b8152602001807f536166654d6174683a206164646974696f6e206f766572666c6f77000000000081525060200191505060405180910390fd5b809150509291505056fe45524332303a207472616e7366657220746f20746865207a65726f206164647265737345524332303a20617070726f766520746f20746865207a65726f206164647265737345524332303a207472616e7366657220616d6f756e7420657863656564732062616c616e636545524332303a207472616e7366657220616d6f756e74206578636565647320616c6c6f77616e636545524332303a207472616e736665722066726f6d20746865207a65726f206164647265737345524332303a20617070726f76652066726f6d20746865207a65726f206164647265737345524332303a2064656372656173656420616c6c6f77616e63652062656c6f77207a65726fa265627a7a72315820c7a5ffabf642bda14700b2de42f8c57b36621af020441df825de45fd2b3e1c5c64736f6c63430005100032"; + var nonce = 0; async function sendTransaction(context) { const tx = await context.web3.eth.accounts.signTransaction( { @@ -16,10 +17,11 @@ describeWithFrontier("Frontier RPC (EthFilterApi)", (context) => { value: "0x00", gasPrice: "0x3B9ACA00", gas: "0x100000", + nonce: nonce, }, GENESIS_ACCOUNT_PRIVATE_KEY ); - + nonce = nonce + 1; await customRequest(context.web3, "eth_sendRawTransaction", [tx.rawTransaction]); return tx; } @@ -54,10 +56,8 @@ describeWithFrontier("Frontier RPC (EthFilterApi)", (context) => { }); step("should return unsupported error for Pending Transaction filter creation", async function () { - let r = await customRequest(context.web3, "eth_newPendingTransactionFilter", []); - expect(r.error).to.include({ - message: "Method not available.", - }); + let createFilter = await customRequest(context.web3, "eth_newPendingTransactionFilter", []); + expect(createFilter.result).to.be.eq("0x4"); }); step("should return responses for Block filter polling.", async function () { @@ -87,6 +87,31 @@ describeWithFrontier("Frontier RPC (EthFilterApi)", (context) => { expect(poll.result[1]).to.be.eq(block_b.hash); }); + step("should return responses for pending transaction polling.", async function () { + let poll = await customRequest(context.web3, "eth_getFilterChanges", ["0x4"]); + expect(poll.result.length).to.be.eq(0); + + // fist polling + let tx = await sendTransaction(context); + poll = await customRequest(context.web3, "eth_getFilterChanges", ["0x4"]); + expect(poll.result.length).to.be.eq(1); + expect(poll.result).contains(tx.transactionHash); + + // second polling + let tx1 = await sendTransaction(context); + let tx2 = await sendTransaction(context); + poll = await customRequest(context.web3, "eth_getFilterChanges", ["0x4"]); + expect(poll.result.length).to.be.eq(2); + expect(poll.result).contains(tx1.transactionHash); + expect(poll.result).contains(tx2.transactionHash); + + await createAndFinalizeBlock(context.web3); + + // the last polling after finalized block + poll = await customRequest(context.web3, "eth_getFilterChanges", ["0x4"]); + expect(poll.result.length).to.be.eq(0); + }); + step("should return responses for Log filter polling.", async function () { // Create contract. let tx = await sendTransaction(context); @@ -157,7 +182,7 @@ describeWithFrontier("Frontier RPC (EthFilterApi)", (context) => { // Should return error if does not exist. let r = await customRequest(context.web3, "eth_uninstallFilter", [filterId]); expect(r.error).to.include({ - message: "Filter id 6 does not exist.", + message: "Filter id 7 does not exist.", }); }); @@ -174,7 +199,7 @@ describeWithFrontier("Frontier RPC (EthFilterApi)", (context) => { let r = await customRequest(context.web3, "eth_getFilterChanges", [filterId]); expect(r.error).to.include({ - message: "Filter id 6 does not exist.", + message: "Filter id 7 does not exist.", }); }); diff --git a/ts-tests/tests/txpool.ts b/ts-tests/tests/txpool.ts new file mode 100644 index 0000000000..102368ec0a --- /dev/null +++ b/ts-tests/tests/txpool.ts @@ -0,0 +1,59 @@ +import { expect } from "chai"; +import { step } from "mocha-steps"; + +import { GENESIS_ACCOUNT, GENESIS_ACCOUNT_PRIVATE_KEY } from "./config"; +import { describeWithFrontier, customRequest } from "./util"; + +describeWithFrontier("Frontier RPC (TxPoolApi)", (context) => { + const TEST_CONTRACT_BYTECODE = + "0x608060405234801561001057600080fd5b50610041337fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff61004660201b60201c565b610291565b600073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff1614156100e9576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252601f8152602001807f45524332303a206d696e7420746f20746865207a65726f20616464726573730081525060200191505060405180910390fd5b6101028160025461020960201b610c7c1790919060201c565b60028190555061015d816000808573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000205461020960201b610c7c1790919060201c565b6000808473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020819055508173ffffffffffffffffffffffffffffffffffffffff16600073ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef836040518082815260200191505060405180910390a35050565b600080828401905083811015610287576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252601b8152602001807f536166654d6174683a206164646974696f6e206f766572666c6f77000000000081525060200191505060405180910390fd5b8091505092915050565b610e3a806102a06000396000f3fe608060405234801561001057600080fd5b50600436106100885760003560e01c806370a082311161005b57806370a08231146101fd578063a457c2d714610255578063a9059cbb146102bb578063dd62ed3e1461032157610088565b8063095ea7b31461008d57806318160ddd146100f357806323b872dd146101115780633950935114610197575b600080fd5b6100d9600480360360408110156100a357600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff16906020019092919080359060200190929190505050610399565b604051808215151515815260200191505060405180910390f35b6100fb6103b7565b6040518082815260200191505060405180910390f35b61017d6004803603606081101561012757600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190803573ffffffffffffffffffffffffffffffffffffffff169060200190929190803590602001909291905050506103c1565b604051808215151515815260200191505060405180910390f35b6101e3600480360360408110156101ad57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff1690602001909291908035906020019092919050505061049a565b604051808215151515815260200191505060405180910390f35b61023f6004803603602081101561021357600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff16906020019092919050505061054d565b6040518082815260200191505060405180910390f35b6102a16004803603604081101561026b57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff16906020019092919080359060200190929190505050610595565b604051808215151515815260200191505060405180910390f35b610307600480360360408110156102d157600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff16906020019092919080359060200190929190505050610662565b604051808215151515815260200191505060405180910390f35b6103836004803603604081101561033757600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050610680565b6040518082815260200191505060405180910390f35b60006103ad6103a6610707565b848461070f565b6001905092915050565b6000600254905090565b60006103ce848484610906565b61048f846103da610707565b61048a85604051806060016040528060288152602001610d7060289139600160008b73ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000206000610440610707565b73ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054610bbc9092919063ffffffff16565b61070f565b600190509392505050565b60006105436104a7610707565b8461053e85600160006104b8610707565b73ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008973ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054610c7c90919063ffffffff16565b61070f565b6001905092915050565b60008060008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020549050919050565b60006106586105a2610707565b8461065385604051806060016040528060258152602001610de160259139600160006105cc610707565b73ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008a73ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054610bbc9092919063ffffffff16565b61070f565b6001905092915050565b600061067661066f610707565b8484610906565b6001905092915050565b6000600160008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054905092915050565b600033905090565b600073ffffffffffffffffffffffffffffffffffffffff168373ffffffffffffffffffffffffffffffffffffffff161415610795576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401808060200182810382526024815260200180610dbd6024913960400191505060405180910390fd5b600073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff16141561081b576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401808060200182810382526022815260200180610d286022913960400191505060405180910390fd5b80600160008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020819055508173ffffffffffffffffffffffffffffffffffffffff168373ffffffffffffffffffffffffffffffffffffffff167f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925836040518082815260200191505060405180910390a3505050565b600073ffffffffffffffffffffffffffffffffffffffff168373ffffffffffffffffffffffffffffffffffffffff16141561098c576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401808060200182810382526025815260200180610d986025913960400191505060405180910390fd5b600073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff161415610a12576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401808060200182810382526023815260200180610d056023913960400191505060405180910390fd5b610a7d81604051806060016040528060268152602001610d4a602691396000808773ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054610bbc9092919063ffffffff16565b6000808573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002081905550610b10816000808573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054610c7c90919063ffffffff16565b6000808473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020819055508173ffffffffffffffffffffffffffffffffffffffff168373ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef836040518082815260200191505060405180910390a3505050565b6000838311158290610c69576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825283818151815260200191508051906020019080838360005b83811015610c2e578082015181840152602081019050610c13565b50505050905090810190601f168015610c5b5780820380516001836020036101000a031916815260200191505b509250505060405180910390fd5b5060008385039050809150509392505050565b600080828401905083811015610cfa576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252601b8152602001807f536166654d6174683a206164646974696f6e206f766572666c6f77000000000081525060200191505060405180910390fd5b809150509291505056fe45524332303a207472616e7366657220746f20746865207a65726f206164647265737345524332303a20617070726f766520746f20746865207a65726f206164647265737345524332303a207472616e7366657220616d6f756e7420657863656564732062616c616e636545524332303a207472616e7366657220616d6f756e74206578636565647320616c6c6f77616e636545524332303a207472616e736665722066726f6d20746865207a65726f206164647265737345524332303a20617070726f76652066726f6d20746865207a65726f206164647265737345524332303a2064656372656173656420616c6c6f77616e63652062656c6f77207a65726fa265627a7a72315820c7a5ffabf642bda14700b2de42f8c57b36621af020441df825de45fd2b3e1c5c64736f6c63430005100032"; + + var nonce = 0; + let pending_tx; + let future_tx; + async function sendTransaction(context, nonce) { + const tx = await context.web3.eth.accounts.signTransaction( + { + from: GENESIS_ACCOUNT, + data: TEST_CONTRACT_BYTECODE, + value: "0x00", + gasPrice: "0x3B9ACA00", + gas: "0x100000", + nonce: nonce, + }, + GENESIS_ACCOUNT_PRIVATE_KEY + ); + await customRequest(context.web3, "eth_sendRawTransaction", [tx.rawTransaction]); + return tx; + } + + step("txpool_status should return correct result", async function () { + let txpoolStatus = await customRequest(context.web3, "txpool_status", []); + expect(txpoolStatus.result.pending).to.be.equal("0x0"); + expect(txpoolStatus.result.queued).to.be.equal("0x0"); + + pending_tx = await sendTransaction(context, nonce); + future_tx = await sendTransaction(context, nonce + 3); + txpoolStatus = await customRequest(context.web3, "txpool_status", []); + expect(txpoolStatus.result.pending).to.be.equal("0x1"); + expect(txpoolStatus.result.queued).to.be.equal("0x1"); + }); + + step("txpool_content should return correct result", async function () { + let txpoolContent = await customRequest(context.web3, "txpool_content", []); + expect(txpoolContent.result.pending[GENESIS_ACCOUNT]["0x0"].nonce).to.be.equal("0x0"); + expect(txpoolContent.result.pending[GENESIS_ACCOUNT]["0x0"].hash).to.be.equal(pending_tx.transactionHash); + expect(txpoolContent.result.queued[GENESIS_ACCOUNT]["0x3"].nonce).to.be.equal("0x3"); + expect(txpoolContent.result.queued[GENESIS_ACCOUNT]["0x3"].hash).to.be.equal(future_tx.transactionHash); + }); + + step("txpool_inspect should return correct result", async function () { + let txpoolInspect = await customRequest(context.web3, "txpool_inspect", []); + expect(txpoolInspect.result.pending[GENESIS_ACCOUNT]["0x0"]).to.be.equal( + "0x0000000000000000000000000000000000000000: 0 wei + 1048576 gas x 1000000000 wei" + ); + expect(txpoolInspect.result.queued[GENESIS_ACCOUNT]["0x3"]).to.be.equal( + "0x0000000000000000000000000000000000000000: 0 wei + 1048576 gas x 1000000000 wei" + ); + }); +}); From 6102a1ac7a0cc7a6104fce94f178ae55caa75ea0 Mon Sep 17 00:00:00 2001 From: bear Date: Sun, 25 Jun 2023 17:35:53 +0800 Subject: [PATCH 10/12] Rename others `transaction_len` (#1090) --- frame/ethereum/src/lib.rs | 30 +++++++++++++++++------------- frame/evm/src/runner/mod.rs | 8 ++++---- primitives/evm/src/lib.rs | 10 +++++----- 3 files changed, 26 insertions(+), 22 deletions(-) diff --git a/frame/ethereum/src/lib.rs b/frame/ethereum/src/lib.rs index d9f1131db8..e667341c92 100644 --- a/frame/ethereum/src/lib.rs +++ b/frame/ethereum/src/lib.rs @@ -353,7 +353,8 @@ pub mod pallet { } impl Pallet { - fn transaction_len(transaction: &Transaction) -> u64 { + /// The call wrapped in the extrinsic is part of the PoV, record this as a base cost for the size of the proof. + fn proof_size_base_cost(transaction: &Transaction) -> u64 { transaction .encode() .len() @@ -488,9 +489,10 @@ impl Pallet { transaction_data.gas_limit.unique_saturated_into(), true, ) { - weight_limit if weight_limit.proof_size() > 0 => { - (Some(weight_limit), Some(Self::transaction_len(transaction))) - } + weight_limit if weight_limit.proof_size() > 0 => ( + Some(weight_limit), + Some(Self::proof_size_base_cost(transaction)), + ), _ => (None, None), }; @@ -772,14 +774,15 @@ impl Pallet { let is_transactional = true; let validate = false; - let (transaction_len, weight_limit) = + let (proof_size_base_cost, weight_limit) = match ::GasWeightMapping::gas_to_weight( gas_limit.unique_saturated_into(), true, ) { - weight_limit if weight_limit.proof_size() > 0 => { - (Some(Self::transaction_len(transaction)), Some(weight_limit)) - } + weight_limit if weight_limit.proof_size() > 0 => ( + Some(Self::proof_size_base_cost(transaction)), + Some(weight_limit), + ), _ => (None, None), }; match action { @@ -797,7 +800,7 @@ impl Pallet { is_transactional, validate, weight_limit, - transaction_len, + proof_size_base_cost, config.as_ref().unwrap_or_else(|| T::config()), ) { Ok(res) => res, @@ -827,7 +830,7 @@ impl Pallet { is_transactional, validate, weight_limit, - transaction_len, + proof_size_base_cost, config.as_ref().unwrap_or_else(|| T::config()), ) { Ok(res) => res, @@ -865,9 +868,10 @@ impl Pallet { transaction_data.gas_limit.unique_saturated_into(), true, ) { - weight_limit if weight_limit.proof_size() > 0 => { - (Some(weight_limit), Some(Self::transaction_len(transaction))) - } + weight_limit if weight_limit.proof_size() > 0 => ( + Some(weight_limit), + Some(Self::proof_size_base_cost(transaction)), + ), _ => (None, None), }; diff --git a/frame/evm/src/runner/mod.rs b/frame/evm/src/runner/mod.rs index b23379690b..bda922d39c 100644 --- a/frame/evm/src/runner/mod.rs +++ b/frame/evm/src/runner/mod.rs @@ -43,7 +43,7 @@ pub trait Runner { access_list: Vec<(H160, Vec)>, is_transactional: bool, weight_limit: Option, - transaction_len: Option, + proof_size_base_cost: Option, evm_config: &evm::Config, ) -> Result<(), RunnerError>; @@ -60,7 +60,7 @@ pub trait Runner { is_transactional: bool, validate: bool, weight_limit: Option, - transaction_len: Option, + proof_size_base_cost: Option, config: &evm::Config, ) -> Result>; @@ -76,7 +76,7 @@ pub trait Runner { is_transactional: bool, validate: bool, weight_limit: Option, - transaction_len: Option, + proof_size_base_cost: Option, config: &evm::Config, ) -> Result>; @@ -93,7 +93,7 @@ pub trait Runner { is_transactional: bool, validate: bool, weight_limit: Option, - transaction_len: Option, + proof_size_base_cost: Option, config: &evm::Config, ) -> Result>; } diff --git a/primitives/evm/src/lib.rs b/primitives/evm/src/lib.rs index 8f618c4b20..0ac7d81559 100644 --- a/primitives/evm/src/lib.rs +++ b/primitives/evm/src/lib.rs @@ -84,18 +84,18 @@ pub struct WeightInfo { impl WeightInfo { pub fn new_from_weight_limit( weight_limit: Option, - transaction_len: Option, + proof_size_base_cost: Option, ) -> Result, &'static str> { - Ok(match (weight_limit, transaction_len) { + Ok(match (weight_limit, proof_size_base_cost) { (None, _) => None, - (Some(weight_limit), Some(transaction_len)) - if weight_limit.proof_size() >= transaction_len => + (Some(weight_limit), Some(proof_size_base_cost)) + if weight_limit.proof_size() >= proof_size_base_cost => { Some(WeightInfo { ref_time_limit: Some(weight_limit.ref_time()), proof_size_limit: Some(weight_limit.proof_size()), ref_time_usage: Some(0u64), - proof_size_usage: Some(transaction_len), + proof_size_usage: Some(proof_size_base_cost), }) } (Some(weight_limit), None) => Some(WeightInfo { From 308409d9913fbfeb4871ffc3432a5491f932fb96 Mon Sep 17 00:00:00 2001 From: Vedhavyas Singareddi Date: Mon, 26 Jun 2023 07:29:07 +0530 Subject: [PATCH 11/12] make sql and its dependencies optional (#1089) Making SQL optional would be great and does not brings unwanted dependencies when SQL is not used. Here is an example: https://github.com/paritytech/frontier/issues/1086 --- Cargo.lock | 2 +- client/db/Cargo.toml | 33 +++++++++++++++++++++++---------- client/db/src/lib.rs | 5 ++++- client/mapping-sync/Cargo.toml | 11 +++++++++-- client/mapping-sync/src/lib.rs | 1 + template/node/Cargo.toml | 4 ++-- 6 files changed, 40 insertions(+), 16 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 003e986fa6..bb893341fd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9997,7 +9997,7 @@ checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" dependencies = [ "cfg-if", "digest 0.10.6", - "rand 0.7.3", + "rand 0.8.5", "static_assertions", ] diff --git a/client/db/Cargo.toml b/client/db/Cargo.toml index 92d489e089..1f8f83b062 100644 --- a/client/db/Cargo.toml +++ b/client/db/Cargo.toml @@ -12,33 +12,45 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] async-trait = "0.1" -ethereum = { workspace = true, features = ["with-codec"] } -futures = "0.3.25" +ethereum = { workspace = true, features = ["with-codec"], optional = true } +futures = { version = "0.3.25", optional = true } kvdb-rocksdb = { workspace = true, optional = true } log = "0.4.17" parity-db = { workspace = true, optional = true } parking_lot = "0.12.1" scale-codec = { package = "parity-scale-codec", workspace = true } smallvec = { version = "1.10", optional = true } -sqlx = { workspace = true, features = ["runtime-tokio-native-tls", "sqlite"] } -tokio = { version = "1.19", features = ["macros", "sync"] } +sqlx = { workspace = true, features = ["runtime-tokio-native-tls", "sqlite"], optional = true } +tokio = { version = "1.19", features = ["macros", "sync"], optional = true } # Substrate -sc-client-api = { workspace = true } +sc-client-api = { workspace = true, optional = true } sc-client-db = { workspace = true } -sp-api = { workspace = true } +sp-api = { workspace = true, optional = true } sp-blockchain = { workspace = true } sp-core = { workspace = true } sp-database = { workspace = true } sp-runtime = { workspace = true } -sp-storage = { workspace = true } +sp-storage = { workspace = true, optional = true } # Frontier -fc-storage = { workspace = true } -fp-consensus = { workspace = true, features = ["default"] } -fp-rpc = { workspace = true, features = ["default"] } +fc-storage = { workspace = true, optional = true } +fp-consensus = { workspace = true, features = ["default"], optional = true } +fp-rpc = { workspace = true, features = ["default"], optional = true } fp-storage = { workspace = true, features = ["default"] } [features] default = ["parity-db"] +sql = [ + "ethereum", + "futures", + "sqlx", + "tokio", + "sc-client-api", + "sp-api", + "sp-storage", + "fc-storage", + "fp-consensus", + "fp-rpc", +] parity-db = ["dep:parity-db"] rocksdb = [ "kvdb-rocksdb", @@ -47,6 +59,7 @@ rocksdb = [ ] [dev-dependencies] +futures = { version = "0.3.25" } maplit = "1.0.2" tempfile = "3.3.0" # Substrate diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 8bb0f10618..2e4ed76aff 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -25,12 +25,15 @@ use sp_core::H256; use sp_runtime::traits::Block as BlockT; pub mod kv; -pub mod sql; use kv::{columns, static_keys}; +#[cfg(feature = "sql")] +pub mod sql; + #[derive(Clone)] pub enum Backend { KeyValue(kv::Backend), + #[cfg(feature = "sql")] Sql(sql::Backend), } diff --git a/client/mapping-sync/Cargo.toml b/client/mapping-sync/Cargo.toml index f3d6ca061b..35000a10a2 100644 --- a/client/mapping-sync/Cargo.toml +++ b/client/mapping-sync/Cargo.toml @@ -15,14 +15,14 @@ futures = "0.3.25" futures-timer = "3.0.2" log = "0.4.17" parking_lot = "0.12.1" -tokio = { version = "1.19", features = ["macros", "sync"] } +tokio = { version = "1.19", features = ["macros", "sync"], optional = true } # Substrate sc-client-api = { workspace = true } sc-utils = { workspace = true } sp-api = { workspace = true } sp-blockchain = { workspace = true } sp-consensus = { workspace = true, features = ["default"] } -sp-core = { workspace = true } +sp-core = { workspace = true, optional = true } sp-runtime = { workspace = true } # Frontier fc-db = { workspace = true } @@ -30,6 +30,13 @@ fc-storage = { workspace = true } fp-consensus = { workspace = true, features = ["default"] } fp-rpc = { workspace = true, features = ["default"] } +[features] +sql = [ + "tokio", + "sp-core", + "fc-db/sql", +] + [dev-dependencies] ethereum = { workspace = true } ethereum-types = { workspace = true } diff --git a/client/mapping-sync/src/lib.rs b/client/mapping-sync/src/lib.rs index 50872529e9..3a03e2ba8a 100644 --- a/client/mapping-sync/src/lib.rs +++ b/client/mapping-sync/src/lib.rs @@ -20,6 +20,7 @@ #![allow(clippy::too_many_arguments)] pub mod kv; +#[cfg(feature = "sql")] pub mod sql; use sp_api::BlockT; diff --git a/template/node/Cargo.toml b/template/node/Cargo.toml index d6e8d53589..3d958268c1 100644 --- a/template/node/Cargo.toml +++ b/template/node/Cargo.toml @@ -70,8 +70,8 @@ pallet-transaction-payment = { workspace = true } # Frontier fc-cli = { workspace = true } fc-consensus = { workspace = true } -fc-db = { workspace = true } -fc-mapping-sync = { workspace = true } +fc-db = { workspace = true, features = ["default", "sql"] } +fc-mapping-sync = { workspace = true, features = ["sql"] } fc-rpc = { workspace = true } fc-rpc-core = { workspace = true } fc-storage = { workspace = true } From 3b871456f7a23b17fc01b2d17d895a0bdf60b725 Mon Sep 17 00:00:00 2001 From: Xavier Lau Date: Tue, 27 Jun 2023 00:56:39 +0800 Subject: [PATCH 12/12] Improve `EthereumSigner` (#1057) * Improve `EthereumSigner` * Add `RuntimeDebug` * Typo * Update primitives/account/src/lib.rs --------- Co-authored-by: Wei Tang --- Cargo.lock | 1 + Cargo.toml | 1 + primitives/account/Cargo.toml | 2 ++ primitives/account/src/lib.rs | 19 +++++++++++++++++-- 4 files changed, 21 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index bb893341fd..c204965bef 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2428,6 +2428,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", + "sp-runtime-interface", "sp-std", ] diff --git a/Cargo.toml b/Cargo.toml index db5a09fd8c..b534664352 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -94,6 +94,7 @@ sp-io = { version = "7.0.0", git = "https://github.com/paritytech/substrate", br sp-keyring = { version = "7.0.0", git = "https://github.com/paritytech/substrate", branch = "master" } sp-offchain = { version = "4.0.0-dev", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } sp-runtime = { version = "7.0.0", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-runtime-interface = { version = "7.0.0", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } sp-session = { version = "4.0.0-dev", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } sp-state-machine = { version = "0.13.0", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } sp-std = { version = "5.0.0", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } diff --git a/primitives/account/Cargo.toml b/primitives/account/Cargo.toml index a56506cca1..0a6f7a0f9c 100644 --- a/primitives/account/Cargo.toml +++ b/primitives/account/Cargo.toml @@ -20,6 +20,7 @@ serde = { workspace = true, optional = true } sp-core = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } +sp-runtime-interface = { workspace = true } sp-std = { workspace = true } [dev-dependencies] @@ -38,5 +39,6 @@ std = [ "sp-core/std", "sp-io/std", "sp-runtime/std", + "sp-runtime-interface/std", "sp-std/std", ] diff --git a/primitives/account/src/lib.rs b/primitives/account/src/lib.rs index acbf9a6b82..088951a699 100644 --- a/primitives/account/src/lib.rs +++ b/primitives/account/src/lib.rs @@ -20,8 +20,9 @@ use scale_codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; // Substrate -use sp_core::{ecdsa, H160, H256}; +use sp_core::{ecdsa, RuntimeDebug, H160, H256}; use sp_io::hashing::keccak_256; +use sp_runtime_interface::pass_by::PassByInner; /// A fully Ethereum-compatible `AccountId`. /// Conforms to H160 address and ECDSA key standards. @@ -114,7 +115,7 @@ impl From for AccountId20 { } #[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] -#[derive(Eq, PartialEq, Clone, Encode, Decode, sp_core::RuntimeDebug, TypeInfo)] +#[derive(Eq, PartialEq, Clone, Encode, Decode, RuntimeDebug, TypeInfo)] pub struct EthereumSignature(ecdsa::Signature); impl sp_runtime::traits::Verify for EthereumSignature { @@ -145,6 +146,20 @@ impl EthereumSignature { } } +#[derive( + PartialEq, + Eq, + PartialOrd, + Ord, + Clone, + Copy, + Encode, + Decode, + PassByInner, + MaxEncodedLen, + RuntimeDebug, + TypeInfo +)] pub struct EthereumSigner([u8; 20]); impl From<[u8; 20]> for EthereumSigner {