From a81265d693116d5555eeb84975f7c3c18be86ca0 Mon Sep 17 00:00:00 2001 From: nanocryk <6422796+nanocryk@users.noreply.github.com> Date: Wed, 31 May 2023 06:18:12 +0200 Subject: [PATCH 1/7] Cache code size/hash in storage (#893) * cache code size/hash in storage * use in-memory code * don't clone metadata * bump evm * don't cache empty code metadata + tests * clippy * remove deprecated getter attribute * remove dep on sha3 crate * feedback --- Cargo.lock | 1 + frame/evm/Cargo.toml | 1 + frame/evm/src/lib.rs | 54 ++++++++++++++++++++++++++++++++++- frame/evm/src/runner/stack.rs | 8 ++++++ frame/evm/src/tests.rs | 37 ++++++++++++++++++++++++ 5 files changed, 100 insertions(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index 36d44bfd40..9c9dec411f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5108,6 +5108,7 @@ dependencies = [ "frame-support", "frame-system", "hex", + "hex-literal", "impl-trait-for-tuples", "log", "pallet-balances", diff --git a/frame/evm/Cargo.toml b/frame/evm/Cargo.toml index 03b429fedc..b3d4ac5302 100644 --- a/frame/evm/Cargo.toml +++ b/frame/evm/Cargo.toml @@ -15,6 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] environmental = { workspace = true, optional = true } evm = { workspace = true, features = ["with-codec"] } hex = { version = "0.4.3", default-features = false, features = ["alloc"] } +hex-literal = { version = "0.3.4" } impl-trait-for-tuples = "0.2.2" log = { workspace = true } rlp = { workspace = true } diff --git a/frame/evm/src/lib.rs b/frame/evm/src/lib.rs index f46d0e4e8e..2100a29ce7 100644 --- a/frame/evm/src/lib.rs +++ b/frame/evm/src/lib.rs @@ -76,7 +76,8 @@ use frame_support::{ }; use frame_system::RawOrigin; use impl_trait_for_tuples::impl_for_tuples; -use sp_core::{Hasher, H160, H256, U256}; +use scale_info::TypeInfo; +use sp_core::{Decode, Encode, Hasher, H160, H256, U256}; use sp_runtime::{ traits::{BadOrigin, Saturating, UniqueSaturatedInto, Zero}, AccountId32, DispatchErrorWithPostInfo, @@ -512,6 +513,10 @@ pub mod pallet { #[pallet::storage] pub type AccountCodes = StorageMap<_, Blake2_128Concat, H160, Vec, ValueQuery>; + #[pallet::storage] + pub type AccountCodesMetadata = + StorageMap<_, Blake2_128Concat, H160, CodeMetadata, OptionQuery>; + #[pallet::storage] pub type AccountStorages = StorageDoubleMap<_, Blake2_128Concat, H160, Blake2_128Concat, H256, H256, ValueQuery>; @@ -525,6 +530,21 @@ pub type BalanceOf = type NegativeImbalanceOf = ::AccountId>>::NegativeImbalance; +#[derive(Debug, Clone, Copy, Eq, PartialEq, Encode, Decode, TypeInfo)] +pub struct CodeMetadata { + pub size: u64, + pub hash: H256, +} + +impl CodeMetadata { + fn from_code(code: &[u8]) -> Self { + let size = code.len() as u64; + let hash = H256::from(sp_io::hashing::keccak_256(code)); + + Self { size, hash } + } +} + pub trait EnsureAddressOrigin { /// Success return type. type Success; @@ -720,6 +740,7 @@ impl Pallet { } >::remove(address); + >::remove(address); #[allow(deprecated)] let _ = >::remove_prefix(address, None); } @@ -735,9 +756,40 @@ impl Pallet { let _ = frame_system::Pallet::::inc_sufficients(&account_id); } + // Update metadata. + let meta = CodeMetadata::from_code(&code); + >::insert(address, meta); + >::insert(address, code); } + /// Get the account metadata (hash and size) from storage if it exists, + /// or compute it from code and store it if it doesn't exist. + pub fn account_code_metadata(address: H160) -> CodeMetadata { + if let Some(meta) = >::get(address) { + return meta; + } + + let code = >::get(address); + + // If code is empty we return precomputed hash for empty code. + // We don't store it as this address could get code deployed in the future. + if code.is_empty() { + const EMPTY_CODE_HASH: [u8; 32] = hex_literal::hex!( + "c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470" + ); + return CodeMetadata { + size: 0, + hash: EMPTY_CODE_HASH.into(), + }; + } + + let meta = CodeMetadata::from_code(&code); + + >::insert(address, meta); + meta + } + /// Get the account basic in EVM format. pub fn account_basic(address: &H160) -> (Account, frame_support::weights::Weight) { let account_id = T::AddressMapping::into_account_id(*address); diff --git a/frame/evm/src/runner/stack.rs b/frame/evm/src/runner/stack.rs index 2c9f66c45f..f21a8b1fbd 100644 --- a/frame/evm/src/runner/stack.rs +++ b/frame/evm/src/runner/stack.rs @@ -854,6 +854,14 @@ where self.substate .recursive_is_cold(&|a: &Accessed| a.accessed_storage.contains(&(address, key))) } + + fn code_size(&self, address: H160) -> U256 { + U256::from(>::account_code_metadata(address).size) + } + + fn code_hash(&self, address: H160) -> H256 { + >::account_code_metadata(address).hash + } } #[cfg(feature = "forbid-evm-reentrancy")] diff --git a/frame/evm/src/tests.rs b/frame/evm/src/tests.rs index 80170483a8..bb09d665db 100644 --- a/frame/evm/src/tests.rs +++ b/frame/evm/src/tests.rs @@ -651,3 +651,40 @@ fn eip3607_transaction_from_precompile() { .is_ok()); }); } + +#[test] +fn metadata_code_gets_cached() { + new_test_ext().execute_with(|| { + let address = H160::repeat_byte(0xaa); + + crate::Pallet::::create_account(address, b"Exemple".to_vec()); + + let metadata = crate::Pallet::::account_code_metadata(address); + assert_eq!(metadata.size, 7); + assert_eq!( + metadata.hash, + hex_literal::hex!("e8396a990fe08f2402e64a00647e41dadf360ba078a59ba79f55e876e67ed4bc") + .into() + ); + + let metadata2 = >::get(&address).expect("to have metadata set"); + assert_eq!(metadata, metadata2); + }); +} + +#[test] +fn metadata_empty_dont_code_gets_cached() { + new_test_ext().execute_with(|| { + let address = H160::repeat_byte(0xaa); + + let metadata = crate::Pallet::::account_code_metadata(address); + assert_eq!(metadata.size, 0); + assert_eq!( + metadata.hash, + hex_literal::hex!("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470") + .into() + ); + + assert!(>::get(&address).is_none()); + }); +} From aa57359cd63b950ae7d74eb9aafd3755ee658bcd Mon Sep 17 00:00:00 2001 From: tgmichel Date: Wed, 31 May 2023 08:55:53 +0200 Subject: [PATCH 2/7] sql backend w/ receipt log indexing (#883) * Receipt log sql indexer * Wip unify worker and backend * Wip lay out service * Remove old crate * mapping sync works (wip genesis) * Genesis block indexing * General rpc update to async * WIP filter logs sql * Fix mapping-sync tests * WIP filter logs sql * Test filter_logs * Fix tests * Make it build and pass tests again * Use sql backend in rpc * Run ts tests with sql + ci * Fix key-value * editorconfig * taplo * clippy * Always fire interval first * Try move indexing * Always fire interval first * await on logs task * wal journal mode * Additional logging * Add pragmas + optimize vec insert * More logs * More logs * Move block metadata state read to blocking thread # Conflicts: # client/db/src/sql/mod.rs * Tweak filter_logs query * Update indexes * Additional metadata logging * Fix trailing wildcard bug + add more tests # Conflicts: # client/db/src/sql/mod.rs * Improve topic input sanitization * Move `block_number` column from `logs` to `blocks` # Conflicts: # client/db/src/sql/mod.rs * Add and handle `is_canon` column # Conflicts: # client/db/src/sql/mod.rs * Use ethereum digest in sync worker tests # Conflicts: # client/mapping-sync/Cargo.toml * Proper way of handling canonicalization + tests # Conflicts: # client/db/src/sql/mod.rs * Add progress handler timeout for sqlx queries # Conflicts: # template/node/src/cli.rs * improve query building * add cli params to tweak sql, fixes non canon import bug and sql query # Conflicts: # client/db/src/sql/mod.rs # client/mapping-sync/src/sql/mod.rs # template/node/src/cli.rs * add tests, fix redundant cloning # Conflicts: # Cargo.lock # client/db/Cargo.toml * improve docs and tests * fix gaps in sync # Conflicts: # client/mapping-sync/src/sql/mod.rs * Fix tests * formatting * editorconfig * fmt * add sql backend pool size config * Tmp disable threads pragma * Revert "Tmp disable threads pragma" This reverts commit abaeb11c3defd65b240dd1eda689a5c18fd2eb61. * Try CI fix, low cache, pool size and greater timeout * revert timeout param * add support for purge, move db in sql directory * bump * fix resume indexing, create indices at start, use spawn_blocking * simplify num_ops_timeout type, add docs * use channles for indexing operations * fix bugs, loop for check missing blocks, set canon query * cleanup, add docs * fix filter query * insert null for missing topics, check unindexed blocks * fix tests, add worker config * merge with v0.9.38 * fmt * fmt * fmt * clippy * clippy * cleanup * use upstream sqlx * lint * remove nightly feature * apply code suggestions * rebase * disallow SyncStrategy::Normal for the time being * emit notifications from sql backend * update tests --------- Co-authored-by: Nisheeth Barthwal --- .github/workflows/rust.yml | 2 + Cargo.lock | 497 ++++- client/cli/src/frontier_db_cmd/mapping_db.rs | 12 +- client/cli/src/frontier_db_cmd/meta_db.rs | 6 +- client/cli/src/frontier_db_cmd/mod.rs | 2 +- client/cli/src/frontier_db_cmd/tests.rs | 6 +- client/consensus/Cargo.toml | 1 - client/consensus/src/lib.rs | 5 +- client/db/Cargo.toml | 29 +- client/db/src/kv/mod.rs | 358 ++++ client/db/src/{ => kv}/parity_db_adapter.rs | 2 +- client/db/src/{ => kv}/upgrade.rs | 36 +- client/db/src/{ => kv}/utils.rs | 18 +- client/db/src/lib.rs | 322 +-- client/db/src/sql/mod.rs | 1912 ++++++++++++++++++ client/mapping-sync/Cargo.toml | 21 +- client/mapping-sync/src/kv/mod.rs | 304 +++ client/mapping-sync/src/{ => kv}/worker.rs | 21 +- client/mapping-sync/src/lib.rs | 290 +-- client/mapping-sync/src/sql/mod.rs | 1306 ++++++++++++ client/rpc-core/src/eth.rs | 23 +- client/rpc/src/eth/block.rs | 14 +- client/rpc/src/eth/execute.rs | 10 +- client/rpc/src/eth/fee.rs | 8 +- client/rpc/src/eth/filter.rs | 262 ++- client/rpc/src/eth/mod.rs | 42 +- client/rpc/src/eth/state.rs | 29 +- client/rpc/src/eth/submit.rs | 2 +- client/rpc/src/eth/transaction.rs | 9 +- client/rpc/src/lib.rs | 70 +- template/node/src/command.rs | 59 +- template/node/src/eth.rs | 102 +- template/node/src/rpc/eth.rs | 5 +- template/node/src/service.rs | 87 +- ts-tests/package.json | 3 +- ts-tests/tests/util.ts | 2 + 36 files changed, 5029 insertions(+), 848 deletions(-) create mode 100644 client/db/src/kv/mod.rs rename client/db/src/{ => kv}/parity_db_adapter.rs (96%) rename client/db/src/{ => kv}/upgrade.rs (94%) rename client/db/src/{ => kv}/utils.rs (87%) create mode 100644 client/db/src/sql/mod.rs create mode 100644 client/mapping-sync/src/kv/mod.rs rename client/mapping-sync/src/{ => kv}/worker.rs (97%) create mode 100644 client/mapping-sync/src/sql/mod.rs diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index a496baeb67..a40b4ef118 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -60,6 +60,8 @@ jobs: run: cd ts-tests && npm run fmt-check - name: Run functional tests run: cd ts-tests && npm run build && npm run test + - name: Run functional tests (sql) + run: cd ts-tests && npm run build && npm run test-sql lint: name: 'Run lints' diff --git a/Cargo.lock b/Cargo.lock index 9c9dec411f..510444e5aa 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -494,6 +494,15 @@ dependencies = [ "pin-project-lite 0.2.9", ] +[[package]] +name = "atoi" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f28d99ec8bfea296261ca1af174f24225171fea9664ba9003cbebee704810528" +dependencies = [ + "num-traits", +] + [[package]] name = "atomic-waker" version = "1.1.0" @@ -1292,6 +1301,16 @@ dependencies = [ "scopeguard", ] +[[package]] +name = "crossbeam-queue" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1cfb3ea8a53f37c40dea2c7bedcbd88bdfae54f5e2175d6ecaff1c988353add" +dependencies = [ + "cfg-if", + "crossbeam-utils", +] + [[package]] name = "crossbeam-utils" version = "0.8.14" @@ -1655,6 +1674,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8168378f4e5023e7218c89c891c0fd8ecdb5e5e4f18cb78f38cf245dd021e76f" dependencies = [ "block-buffer 0.10.3", + "const-oid", "crypto-common", "subtle", ] @@ -1678,6 +1698,15 @@ dependencies = [ "dirs-sys-next", ] +[[package]] +name = "dirs" +version = "4.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca3aa72a6f96ea37bbc5aa912f6788242832f75369bdfdadcb0e38423f100059" +dependencies = [ + "dirs-sys", +] + [[package]] name = "dirs-sys" version = "0.3.7" @@ -1711,6 +1740,12 @@ dependencies = [ "syn", ] +[[package]] +name = "dotenvy" +version = "0.15.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1aaf95b3e5c8f23aa320147307562d361db0ae0d51242340f558153b4eb2439b" + [[package]] name = "downcast" version = "0.11.0" @@ -1765,7 +1800,7 @@ dependencies = [ "der", "elliptic-curve", "rfc6979", - "signature", + "signature 1.6.4", ] [[package]] @@ -1774,7 +1809,7 @@ version = "1.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "91cff35c70bba8a626e3185d8cd48cc11b5437e1a5bcd15b9b5fa3c64b6dfee7" dependencies = [ - "signature", + "signature 1.6.4", ] [[package]] @@ -1810,6 +1845,9 @@ name = "either" version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "90e5c1c8368803113bf0c9584fc495a58b86dc8a29edbf8fe877d21d9507e797" +dependencies = [ + "serde", +] [[package]] name = "elliptic-curve" @@ -2072,7 +2110,6 @@ name = "fc-consensus" version = "2.0.0-dev" dependencies = [ "async-trait", - "fc-db", "fp-consensus", "fp-rpc", "sc-consensus", @@ -2087,23 +2124,35 @@ dependencies = [ name = "fc-db" version = "2.0.0-dev" dependencies = [ + "async-trait", + "ethereum", + "fc-rpc", + "fc-storage", + "fp-consensus", + "fp-rpc", "fp-storage", "futures", "kvdb-rocksdb", "log", + "maplit", "parity-db", "parity-scale-codec", "parking_lot 0.12.1", "sc-block-builder", + "sc-client-api", "sc-client-db", "smallvec", + "sp-api", "sp-blockchain", "sp-consensus", "sp-core", "sp-database", "sp-runtime", + "sp-storage", + "sqlx", "substrate-test-runtime-client", "tempfile", + "tokio", ] [[package]] @@ -2113,6 +2162,7 @@ dependencies = [ "ethereum", "ethereum-types", "fc-db", + "fc-rpc", "fc-storage", "fp-consensus", "fp-rpc", @@ -2121,6 +2171,7 @@ dependencies = [ "futures", "futures-timer", "log", + "parity-scale-codec", "parking_lot 0.12.1", "sc-block-builder", "sc-client-api", @@ -2130,7 +2181,9 @@ dependencies = [ "sp-blockchain", "sp-consensus", "sp-core", + "sp-io", "sp-runtime", + "sqlx", "substrate-test-runtime-client", "tempfile", "tokio", @@ -2312,12 +2365,39 @@ dependencies = [ "num-traits", ] +[[package]] +name = "flume" +version = "0.10.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1657b4441c3403d9f7b3409e47575237dac27b1b5726df654a6ecbf92f0f7577" +dependencies = [ + "futures-core", + "futures-sink", + "pin-project", + "spin 0.9.7", +] + [[package]] name = "fnv" version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +[[package]] +name = "foreign-types" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" +dependencies = [ + "foreign-types-shared", +] + +[[package]] +name = "foreign-types-shared" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" + [[package]] name = "fork-tree" version = "3.0.0" @@ -2847,6 +2927,17 @@ dependencies = [ "num_cpus", ] +[[package]] +name = "futures-intrusive" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d930c203dd0b6ff06e0201a4a2fe9149b43c684fd4420555b26d21b1a02956f" +dependencies = [ + "futures-core", + "lock_api", + "parking_lot 0.12.1", +] + [[package]] name = "futures-io" version = "0.3.25" @@ -3125,11 +3216,23 @@ dependencies = [ "ahash 0.8.3", ] +[[package]] +name = "hashlink" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69fe1fcf8b4278d860ad0548329f892a3631fb63f82574df68275f34cdbe0ffa" +dependencies = [ + "hashbrown 0.12.3", +] + [[package]] name = "heck" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2540771e65fc8cb83cd6e8a237f70c319bd5c29f78ed1084ba5d50eeac86f7f9" +dependencies = [ + "unicode-segmentation", +] [[package]] name = "hermit-abi" @@ -3715,7 +3818,7 @@ version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" dependencies = [ - "spin", + "spin 0.5.2", ] [[package]] @@ -4215,6 +4318,17 @@ dependencies = [ "libsecp256k1-core", ] +[[package]] +name = "libsqlite3-sys" +version = "0.25.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29f835d03d717946d28b1d1ed632eb6f0e24a299388ee623d0c23118d3e8a7fa" +dependencies = [ + "cc", + "pkg-config", + "vcpkg", +] + [[package]] name = "libz-sys" version = "1.1.8" @@ -4337,6 +4451,12 @@ dependencies = [ "libc", ] +[[package]] +name = "maplit" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e2e65a1a2e43cfcb47a895c4c8b10d1f4a61097f9f254f183aee60cad9c651d" + [[package]] name = "match_cfg" version = "0.1.0" @@ -4615,6 +4735,24 @@ dependencies = [ "rand 0.8.5", ] +[[package]] +name = "native-tls" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07226173c32f2926027b63cce4bcd8076c3552846cbe7925f3aaffeac0a3b92e" +dependencies = [ + "lazy_static", + "libc", + "log", + "openssl", + "openssl-probe", + "openssl-sys", + "schannel", + "security-framework", + "security-framework-sys", + "tempfile", +] + [[package]] name = "netlink-packet-core" version = "0.4.2" @@ -4740,6 +4878,23 @@ dependencies = [ "num-traits", ] +[[package]] +name = "num-bigint-dig" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2399c9463abc5f909349d8aa9ba080e0b88b3ce2885389b60b993f39b1a56905" +dependencies = [ + "byteorder", + "lazy_static", + "libm", + "num-integer", + "num-iter", + "num-traits", + "rand 0.8.5", + "smallvec", + "zeroize", +] + [[package]] name = "num-complex" version = "0.4.3" @@ -4799,6 +4954,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" dependencies = [ "autocfg", + "libm", ] [[package]] @@ -4889,12 +5045,51 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" +[[package]] +name = "openssl" +version = "0.10.48" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "518915b97df115dd36109bfa429a48b8f737bd05508cf9588977b599648926d2" +dependencies = [ + "bitflags", + "cfg-if", + "foreign-types", + "libc", + "once_cell", + "openssl-macros", + "openssl-sys", +] + +[[package]] +name = "openssl-macros" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b501e44f11665960c7e7fcf062c7d96a14ade4aa98116c004b2e37b5be7d736c" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "openssl-probe" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" +[[package]] +name = "openssl-sys" +version = "0.9.83" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "666416d899cf077260dac8698d60a60b435a46d57e82acb1be3d0dad87284e5b" +dependencies = [ + "autocfg", + "cc", + "libc", + "pkg-config", + "vcpkg", +] + [[package]] name = "os_str_bytes" version = "6.4.1" @@ -5678,6 +5873,18 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" +[[package]] +name = "pkcs1" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eff33bdbdfc54cc98a2eca766ebdec3e1b8fb7387523d5c9c9a2891da856f719" +dependencies = [ + "der", + "pkcs8", + "spki", + "zeroize", +] + [[package]] name = "pkcs8" version = "0.9.0" @@ -6277,7 +6484,7 @@ dependencies = [ "cc", "libc", "once_cell", - "spin", + "spin 0.5.2", "untrusted", "web-sys", "winapi", @@ -6335,6 +6542,26 @@ dependencies = [ "winapi", ] +[[package]] +name = "rsa" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55a77d189da1fee555ad95b7e50e7457d91c0e089ec68ca69ad2989413bbdab4" +dependencies = [ + "byteorder", + "digest 0.10.6", + "num-bigint-dig", + "num-integer", + "num-iter", + "num-traits", + "pkcs1", + "pkcs8", + "rand_core 0.6.4", + "signature 2.1.0", + "subtle", + "zeroize", +] + [[package]] name = "rtcp" version = "0.7.2" @@ -7808,6 +8035,17 @@ dependencies = [ "opaque-debug 0.3.0", ] +[[package]] +name = "sha1" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest 0.10.6", +] + [[package]] name = "sha2" version = "0.8.2" @@ -7888,6 +8126,16 @@ dependencies = [ "rand_core 0.6.4", ] +[[package]] +name = "signature" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e1788eed21689f9cf370582dfc467ef36ed9c707f073528ddafa8d83e3b8500" +dependencies = [ + "digest 0.10.6", + "rand_core 0.6.4", +] + [[package]] name = "simba" version = "0.8.0" @@ -8662,6 +8910,15 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" +[[package]] +name = "spin" +version = "0.9.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0959fd6f767df20b231736396e4f602171e00d95205676286e79d4a4eb67bef" +dependencies = [ + "lock_api", +] + [[package]] name = "spki" version = "0.6.0" @@ -8672,6 +8929,206 @@ dependencies = [ "der", ] +[[package]] +name = "sqlformat" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c12bc9199d1db8234678b7051747c07f517cdcf019262d1847b94ec8b1aee3e" +dependencies = [ + "itertools", + "nom", + "unicode_categories", +] + +[[package]] +name = "sqlx" +version = "0.7.0-alpha.2" +source = "git+https://github.com/launchbadge/sqlx?branch=main#4f1ac1d6060ee73edf83c8365fafb12df44deecc" +dependencies = [ + "sqlx-core", + "sqlx-macros", + "sqlx-mysql", + "sqlx-postgres", + "sqlx-sqlite", +] + +[[package]] +name = "sqlx-core" +version = "0.7.0-alpha.2" +source = "git+https://github.com/launchbadge/sqlx?branch=main#4f1ac1d6060ee73edf83c8365fafb12df44deecc" +dependencies = [ + "ahash 0.7.6", + "atoi", + "bitflags", + "byteorder", + "bytes", + "crc", + "crossbeam-queue", + "dotenvy", + "either", + "event-listener", + "futures-channel", + "futures-core", + "futures-intrusive", + "futures-io", + "futures-util", + "hashlink", + "hex", + "indexmap", + "log", + "memchr", + "native-tls", + "once_cell", + "paste", + "percent-encoding", + "serde", + "serde_json", + "sha2 0.10.6", + "smallvec", + "sqlformat", + "thiserror", + "tokio", + "tokio-stream", + "tracing", + "url", +] + +[[package]] +name = "sqlx-macros" +version = "0.7.0-alpha.2" +source = "git+https://github.com/launchbadge/sqlx?branch=main#4f1ac1d6060ee73edf83c8365fafb12df44deecc" +dependencies = [ + "proc-macro2", + "quote", + "sqlx-core", + "sqlx-macros-core", + "syn", +] + +[[package]] +name = "sqlx-macros-core" +version = "0.7.0-alpha.2" +source = "git+https://github.com/launchbadge/sqlx?branch=main#4f1ac1d6060ee73edf83c8365fafb12df44deecc" +dependencies = [ + "dotenvy", + "either", + "heck", + "hex", + "once_cell", + "proc-macro2", + "quote", + "serde", + "serde_json", + "sha2 0.10.6", + "sqlx-core", + "sqlx-mysql", + "sqlx-sqlite", + "syn", + "tempfile", + "tokio", + "url", +] + +[[package]] +name = "sqlx-mysql" +version = "0.7.0-alpha.2" +source = "git+https://github.com/launchbadge/sqlx?branch=main#4f1ac1d6060ee73edf83c8365fafb12df44deecc" +dependencies = [ + "atoi", + "base64 0.21.0", + "bitflags", + "byteorder", + "bytes", + "crc", + "digest 0.10.6", + "dirs", + "dotenvy", + "either", + "futures-channel", + "futures-core", + "futures-io", + "futures-util", + "generic-array 0.14.6", + "hex", + "hkdf", + "hmac 0.12.1", + "itoa", + "log", + "md-5", + "memchr", + "once_cell", + "percent-encoding", + "rand 0.8.5", + "rsa", + "serde", + "sha1", + "sha2 0.10.6", + "smallvec", + "sqlx-core", + "stringprep", + "thiserror", + "tracing", + "whoami", +] + +[[package]] +name = "sqlx-postgres" +version = "0.7.0-alpha.2" +source = "git+https://github.com/launchbadge/sqlx?branch=main#4f1ac1d6060ee73edf83c8365fafb12df44deecc" +dependencies = [ + "atoi", + "base64 0.21.0", + "bitflags", + "byteorder", + "crc", + "dirs", + "dotenvy", + "futures-channel", + "futures-core", + "futures-io", + "futures-util", + "hex", + "hkdf", + "hmac 0.12.1", + "itoa", + "log", + "md-5", + "memchr", + "once_cell", + "rand 0.8.5", + "serde", + "serde_json", + "sha1", + "sha2 0.10.6", + "smallvec", + "sqlx-core", + "stringprep", + "thiserror", + "tracing", + "whoami", +] + +[[package]] +name = "sqlx-sqlite" +version = "0.7.0-alpha.2" +source = "git+https://github.com/launchbadge/sqlx?branch=main#4f1ac1d6060ee73edf83c8365fafb12df44deecc" +dependencies = [ + "atoi", + "flume", + "futures-channel", + "futures-core", + "futures-executor", + "futures-intrusive", + "futures-util", + "libsqlite3-sys", + "log", + "percent-encoding", + "serde", + "sqlx-core", + "tracing", + "url", +] + [[package]] name = "ss58-registry" version = "1.38.0" @@ -8727,6 +9184,16 @@ dependencies = [ "syn", ] +[[package]] +name = "stringprep" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ee348cb74b87454fff4b551cbf727025810a004f88aeacae7f85b87f4e9a1c1" +dependencies = [ + "unicode-bidi", + "unicode-normalization", +] + [[package]] name = "strsim" version = "0.10.0" @@ -9564,6 +10031,12 @@ dependencies = [ "tinyvec", ] +[[package]] +name = "unicode-segmentation" +version = "1.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1dd624098567895118886609431a7c3b8f516e41d30e0643f03d94592a147e36" + [[package]] name = "unicode-width" version = "0.1.10" @@ -9576,6 +10049,12 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c" +[[package]] +name = "unicode_categories" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39ec24b3121d976906ece63c9daad25b85969647682eee313cb5779fdd69e14e" + [[package]] name = "universal-hash" version = "0.4.1" @@ -10182,7 +10661,7 @@ dependencies = [ "serde", "sha-1", "sha2 0.9.9", - "signature", + "signature 1.6.4", "subtle", "thiserror", "tokio", @@ -10327,6 +10806,12 @@ dependencies = [ "once_cell", ] +[[package]] +name = "whoami" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c70234412ca409cc04e864e89523cb0fc37f5e1344ebed5a3ebf4192b6b9f68" + [[package]] name = "wide" version = "0.7.8" diff --git a/client/cli/src/frontier_db_cmd/mapping_db.rs b/client/cli/src/frontier_db_cmd/mapping_db.rs index dfbd593d5f..0b1e56410c 100644 --- a/client/cli/src/frontier_db_cmd/mapping_db.rs +++ b/client/cli/src/frontier_db_cmd/mapping_db.rs @@ -43,7 +43,7 @@ pub enum MappingKey { pub struct MappingDb<'a, C, B: BlockT> { cmd: &'a FrontierDbCmd, client: Arc, - backend: Arc>, + backend: Arc>, } impl<'a, C, B: BlockT> MappingDb<'a, C, B> @@ -52,7 +52,11 @@ where C::Api: EthereumRuntimeRPCApi, C: HeaderBackend, { - pub fn new(cmd: &'a FrontierDbCmd, client: Arc, backend: Arc>) -> Self { + pub fn new( + cmd: &'a FrontierDbCmd, + client: Arc, + backend: Arc>, + ) -> Self { Self { cmd, client, @@ -93,7 +97,7 @@ where vec![] }; - let commitment = fc_db::MappingCommitment:: { + let commitment = fc_db::kv::MappingCommitment:: { block_hash: *substrate_block_hash, ethereum_block_hash: *ethereum_block_hash, ethereum_transaction_hashes: existing_transaction_hashes, @@ -151,7 +155,7 @@ where vec![] }; - let commitment = fc_db::MappingCommitment:: { + let commitment = fc_db::kv::MappingCommitment:: { block_hash: *substrate_block_hash, ethereum_block_hash: *ethereum_block_hash, ethereum_transaction_hashes: existing_transaction_hashes, diff --git a/client/cli/src/frontier_db_cmd/meta_db.rs b/client/cli/src/frontier_db_cmd/meta_db.rs index 80da559c24..878bd101ac 100644 --- a/client/cli/src/frontier_db_cmd/meta_db.rs +++ b/client/cli/src/frontier_db_cmd/meta_db.rs @@ -47,7 +47,7 @@ impl FromStr for MetaKey { // A convenience function to verify the user input is known. fn from_str(input: &str) -> Result { - let tips = str::from_utf8(fc_db::static_keys::CURRENT_SYNCING_TIPS).unwrap(); + let tips = str::from_utf8(fc_db::kv::static_keys::CURRENT_SYNCING_TIPS).unwrap(); let schema = str::from_utf8(fp_storage::PALLET_ETHEREUM_SCHEMA_CACHE).unwrap(); match input { x if x == tips => Ok(MetaKey::Tips), @@ -59,11 +59,11 @@ impl FromStr for MetaKey { pub struct MetaDb<'a, B: BlockT> { cmd: &'a FrontierDbCmd, - backend: Arc>, + backend: Arc>, } impl<'a, B: BlockT> MetaDb<'a, B> { - pub fn new(cmd: &'a FrontierDbCmd, backend: Arc>) -> Self { + pub fn new(cmd: &'a FrontierDbCmd, backend: Arc>) -> Self { Self { cmd, backend } } diff --git a/client/cli/src/frontier_db_cmd/mod.rs b/client/cli/src/frontier_db_cmd/mod.rs index b3f8762e38..a82436b6f3 100644 --- a/client/cli/src/frontier_db_cmd/mod.rs +++ b/client/cli/src/frontier_db_cmd/mod.rs @@ -101,7 +101,7 @@ impl FrontierDbCmd { pub fn run( &self, client: Arc, - backend: Arc>, + backend: Arc>, ) -> sc_cli::Result<()> where C: ProvideRuntimeApi, diff --git a/client/cli/src/frontier_db_cmd/tests.rs b/client/cli/src/frontier_db_cmd/tests.rs index c45367ad14..bce1bf7d3e 100644 --- a/client/cli/src/frontier_db_cmd/tests.rs +++ b/client/cli/src/frontier_db_cmd/tests.rs @@ -49,10 +49,10 @@ type OpaqueBlock = pub fn open_frontier_backend>( client: Arc, path: PathBuf, -) -> Result>, String> { - Ok(Arc::new(fc_db::Backend::::new( +) -> Result>, String> { + Ok(Arc::new(fc_db::kv::Backend::::new( client, - &fc_db::DatabaseSettings { + &fc_db::kv::DatabaseSettings { source: sc_client_db::DatabaseSource::RocksDb { path, cache_size: 0, diff --git a/client/consensus/Cargo.toml b/client/consensus/Cargo.toml index 5afdb76916..242a442f5a 100644 --- a/client/consensus/Cargo.toml +++ b/client/consensus/Cargo.toml @@ -20,6 +20,5 @@ sp-block-builder = { workspace = true, features = ["default"] } sp-consensus = { workspace = true } sp-runtime = { workspace = true } # Frontier -fc-db = { workspace = true } fp-consensus = { workspace = true, features = ["default"] } fp-rpc = { workspace = true, features = ["default"] } diff --git a/client/consensus/src/lib.rs b/client/consensus/src/lib.rs index c9575196e8..d58bd722db 100644 --- a/client/consensus/src/lib.rs +++ b/client/consensus/src/lib.rs @@ -64,7 +64,6 @@ impl From for ConsensusError { pub struct FrontierBlockImport { inner: I, client: Arc, - backend: Arc>, _marker: PhantomData, } @@ -73,7 +72,6 @@ impl, C> Clone for FrontierBlockImp FrontierBlockImport { inner: self.inner.clone(), client: self.client.clone(), - backend: self.backend.clone(), _marker: PhantomData, } } @@ -87,11 +85,10 @@ where C: ProvideRuntimeApi, C::Api: BlockBuilderApi + EthereumRuntimeRPCApi, { - pub fn new(inner: I, client: Arc, backend: Arc>) -> Self { + pub fn new(inner: I, client: Arc) -> Self { Self { inner, client, - backend, _marker: PhantomData, } } diff --git a/client/db/Cargo.toml b/client/db/Cargo.toml index 57002e6d6f..c21d87fd94 100644 --- a/client/db/Cargo.toml +++ b/client/db/Cargo.toml @@ -2,7 +2,7 @@ name = "fc-db" version = "2.0.0-dev" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -description = "Frontier database backend." +description = "Frontier database backend" authors = { workspace = true } edition = { workspace = true } repository = { workspace = true } @@ -11,28 +11,43 @@ repository = { workspace = true } targets = ["x86_64-unknown-linux-gnu"] [dependencies] -kvdb-rocksdb = { workspace = true, optional = true } +async-trait = "0.1" +ethereum = { workspace = true, features = ["with-codec"] } +futures = "0.3.25" log = "0.4.17" -parity-db = { workspace = true, optional = true } parking_lot = "0.12.1" -scale-codec = { package = "parity-scale-codec", workspace = true } smallvec = "1.10" +sqlx = { features = ["runtime-tokio-native-tls", "sqlite"], git = "https://github.com/launchbadge/sqlx", branch = "main" } +tokio = { version = "1.19", features = ["macros", "sync"] } + +# Parity +kvdb-rocksdb = { workspace = true, optional = true } +parity-db = { workspace = true, optional = true } +scale-codec = { package = "parity-scale-codec", workspace = true } + # Substrate +sc-client-api = { workspace = true } sc-client-db = { workspace = true, features = ["rocksdb"] } +sp-api = { workspace = true } sp-blockchain = { workspace = true } sp-core = { workspace = true } sp-database = { workspace = true } sp-runtime = { workspace = true } +sp-storage = { workspace = true } + # Frontier +fc-storage = { workspace = true } +fp-consensus = { workspace = true, features = ["default"] } +fp-rpc = { workspace = true, features = ["default"] } fp-storage = { workspace = true, features = ["default"] } [features] default = ["kvdb-rocksdb", "parity-db"] [dev-dependencies] -futures = "0.3.25" -tempfile = "3.3.0" -# Substrate +fc-rpc = { workspace = true } +maplit = "1.0.2" sc-block-builder = { workspace = true } sp-consensus = { workspace = true } substrate-test-runtime-client = { workspace = true } +tempfile = "3.3.0" diff --git a/client/db/src/kv/mod.rs b/client/db/src/kv/mod.rs new file mode 100644 index 0000000000..4fda22a4f6 --- /dev/null +++ b/client/db/src/kv/mod.rs @@ -0,0 +1,358 @@ +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 +// This file is part of Frontier. +// +// Copyright (c) 2021-2022 Parity Technologies (UK) Ltd. +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +#[cfg(feature = "parity-db")] +mod parity_db_adapter; +mod upgrade; +mod utils; + +use std::{ + marker::PhantomData, + path::{Path, PathBuf}, + sync::Arc, +}; + +use parking_lot::Mutex; +use scale_codec::{Decode, Encode}; +// Substrate +pub use sc_client_db::DatabaseSource; +use sp_blockchain::HeaderBackend; +use sp_core::H256; +pub use sp_database::Database; +use sp_runtime::traits::Block as BlockT; +// Frontier +use crate::TransactionMetadata; +use fp_storage::{EthereumStorageSchema, PALLET_ETHEREUM_SCHEMA_CACHE}; + +const DB_HASH_LEN: usize = 32; +/// Hash type that this backend uses for the database. +pub type DbHash = [u8; DB_HASH_LEN]; + +/// Database settings. +pub struct DatabaseSettings { + /// Where to find the database. + pub source: DatabaseSource, +} + +pub(crate) mod columns { + pub const NUM_COLUMNS: u32 = 4; + + pub const META: u32 = 0; + pub const BLOCK_MAPPING: u32 = 1; + pub const TRANSACTION_MAPPING: u32 = 2; + pub const SYNCED_MAPPING: u32 = 3; +} + +pub mod static_keys { + pub const CURRENT_SYNCING_TIPS: &[u8] = b"CURRENT_SYNCING_TIPS"; +} + +#[derive(Clone)] +pub struct Backend { + meta: Arc>, + mapping: Arc>, +} + +#[async_trait::async_trait] +impl crate::BackendReader for Backend { + async fn block_hash( + &self, + ethereum_block_hash: &H256, + ) -> Result>, String> { + self.mapping().block_hash(ethereum_block_hash) + } + async fn transaction_metadata( + &self, + ethereum_transaction_hash: &H256, + ) -> Result>, String> { + self.mapping() + .transaction_metadata(ethereum_transaction_hash) + } + async fn filter_logs( + &self, + _from_block: u64, + _to_block: u64, + _addresses: Vec, + _topics: Vec>>, + ) -> Result, String> { + Err("KeyValue db does not index logs".into()) + } + + fn is_indexed(&self) -> bool { + false + } +} + +/// Returns the frontier database directory. +pub fn frontier_database_dir(db_config_dir: &Path, db_path: &str) -> PathBuf { + db_config_dir.join("frontier").join(db_path) +} + +impl Backend { + pub fn open>( + client: Arc, + database: &DatabaseSource, + db_config_dir: &Path, + ) -> Result { + Self::new( + client, + &DatabaseSettings { + source: match database { + DatabaseSource::RocksDb { .. } => DatabaseSource::RocksDb { + path: frontier_database_dir(db_config_dir, "db"), + cache_size: 0, + }, + DatabaseSource::ParityDb { .. } => DatabaseSource::ParityDb { + path: frontier_database_dir(db_config_dir, "paritydb"), + }, + DatabaseSource::Auto { .. } => DatabaseSource::Auto { + rocksdb_path: frontier_database_dir(db_config_dir, "db"), + paritydb_path: frontier_database_dir(db_config_dir, "paritydb"), + cache_size: 0, + }, + _ => { + return Err( + "Supported db sources: `rocksdb` | `paritydb` | `auto`".to_string() + ) + } + }, + }, + ) + } + + pub fn new>( + client: Arc, + config: &DatabaseSettings, + ) -> Result { + let db = utils::open_database::(client, config)?; + + Ok(Self { + mapping: Arc::new(MappingDb { + db: db.clone(), + write_lock: Arc::new(Mutex::new(())), + _marker: PhantomData, + }), + meta: Arc::new(MetaDb { + db: db.clone(), + _marker: PhantomData, + }), + }) + } + + pub fn mapping(&self) -> &Arc> { + &self.mapping + } + + pub fn meta(&self) -> &Arc> { + &self.meta + } +} + +pub struct MetaDb { + db: Arc>, + _marker: PhantomData, +} + +impl MetaDb { + pub fn current_syncing_tips(&self) -> Result, String> { + match self.db.get( + crate::columns::META, + crate::static_keys::CURRENT_SYNCING_TIPS, + ) { + Some(raw) => { + Ok(Vec::::decode(&mut &raw[..]).map_err(|e| format!("{:?}", e))?) + } + None => Ok(Vec::new()), + } + } + + pub fn write_current_syncing_tips(&self, tips: Vec) -> Result<(), String> { + let mut transaction = sp_database::Transaction::new(); + + transaction.set( + crate::columns::META, + crate::static_keys::CURRENT_SYNCING_TIPS, + &tips.encode(), + ); + + self.db + .commit(transaction) + .map_err(|e| format!("{:?}", e))?; + + Ok(()) + } + + pub fn ethereum_schema(&self) -> Result>, String> { + match self + .db + .get(crate::columns::META, &PALLET_ETHEREUM_SCHEMA_CACHE.encode()) + { + Some(raw) => Ok(Some( + Decode::decode(&mut &raw[..]).map_err(|e| format!("{:?}", e))?, + )), + None => Ok(None), + } + } + + pub fn write_ethereum_schema( + &self, + new_cache: Vec<(EthereumStorageSchema, H256)>, + ) -> Result<(), String> { + let mut transaction = sp_database::Transaction::new(); + + transaction.set( + crate::columns::META, + &PALLET_ETHEREUM_SCHEMA_CACHE.encode(), + &new_cache.encode(), + ); + + self.db + .commit(transaction) + .map_err(|e| format!("{:?}", e))?; + + Ok(()) + } +} + +#[derive(Debug)] +pub struct MappingCommitment { + pub block_hash: Block::Hash, + pub ethereum_block_hash: H256, + pub ethereum_transaction_hashes: Vec, +} + +pub struct MappingDb { + db: Arc>, + write_lock: Arc>, + _marker: PhantomData, +} + +impl MappingDb { + pub fn is_synced(&self, block_hash: &Block::Hash) -> Result { + match self + .db + .get(crate::columns::SYNCED_MAPPING, &block_hash.encode()) + { + Some(raw) => Ok(bool::decode(&mut &raw[..]).map_err(|e| format!("{:?}", e))?), + None => Ok(false), + } + } + + pub fn block_hash( + &self, + ethereum_block_hash: &H256, + ) -> Result>, String> { + match self + .db + .get(crate::columns::BLOCK_MAPPING, ðereum_block_hash.encode()) + { + Some(raw) => Ok(Some( + Vec::::decode(&mut &raw[..]).map_err(|e| format!("{:?}", e))?, + )), + None => Ok(None), + } + } + + pub fn transaction_metadata( + &self, + ethereum_transaction_hash: &H256, + ) -> Result>, String> { + match self.db.get( + crate::columns::TRANSACTION_MAPPING, + ðereum_transaction_hash.encode(), + ) { + Some(raw) => Ok(Vec::>::decode(&mut &raw[..]) + .map_err(|e| format!("{:?}", e))?), + None => Ok(Vec::new()), + } + } + + pub fn write_none(&self, block_hash: Block::Hash) -> Result<(), String> { + let _lock = self.write_lock.lock(); + + let mut transaction = sp_database::Transaction::new(); + + transaction.set( + crate::columns::SYNCED_MAPPING, + &block_hash.encode(), + &true.encode(), + ); + + self.db + .commit(transaction) + .map_err(|e| format!("{:?}", e))?; + + Ok(()) + } + + pub fn write_hashes(&self, commitment: MappingCommitment) -> Result<(), String> { + let _lock = self.write_lock.lock(); + + let mut transaction = sp_database::Transaction::new(); + + let substrate_hashes = match self.block_hash(&commitment.ethereum_block_hash) { + Ok(Some(mut data)) => { + data.push(commitment.block_hash); + log::warn!( + target: "fc-db", + "Possible equivocation at ethereum block hash {} {:?}", + &commitment.ethereum_block_hash, + &data + ); + data + } + _ => vec![commitment.block_hash], + }; + + transaction.set( + crate::columns::BLOCK_MAPPING, + &commitment.ethereum_block_hash.encode(), + &substrate_hashes.encode(), + ); + + for (i, ethereum_transaction_hash) in commitment + .ethereum_transaction_hashes + .into_iter() + .enumerate() + { + let mut metadata = self.transaction_metadata(ðereum_transaction_hash)?; + metadata.push(TransactionMetadata:: { + block_hash: commitment.block_hash, + ethereum_block_hash: commitment.ethereum_block_hash, + ethereum_index: i as u32, + }); + transaction.set( + crate::columns::TRANSACTION_MAPPING, + ðereum_transaction_hash.encode(), + &metadata.encode(), + ); + } + + transaction.set( + crate::columns::SYNCED_MAPPING, + &commitment.block_hash.encode(), + &true.encode(), + ); + + self.db + .commit(transaction) + .map_err(|e| format!("{:?}", e))?; + + Ok(()) + } +} diff --git a/client/db/src/parity_db_adapter.rs b/client/db/src/kv/parity_db_adapter.rs similarity index 96% rename from client/db/src/parity_db_adapter.rs rename to client/db/src/kv/parity_db_adapter.rs index e527c2f116..7387852541 100644 --- a/client/db/src/parity_db_adapter.rs +++ b/client/db/src/kv/parity_db_adapter.rs @@ -61,6 +61,6 @@ impl> Database for DbAdapter { } fn sanitize_key(&self, key: &mut Vec) { - let _prefix = key.drain(0..key.len() - crate::DB_HASH_LEN); + let _prefix = key.drain(0..key.len() - super::DB_HASH_LEN); } } diff --git a/client/db/src/upgrade.rs b/client/db/src/kv/upgrade.rs similarity index 94% rename from client/db/src/upgrade.rs rename to client/db/src/kv/upgrade.rs index 919f440b3e..20ab19befd 100644 --- a/client/db/src/upgrade.rs +++ b/client/db/src/kv/upgrade.rs @@ -183,7 +183,7 @@ pub(crate) fn migrate_1_to_2_rocks_db>( let mut transaction = db.transaction(); for ethereum_hash in ethereum_hashes { let mut maybe_error = true; - if let Some(substrate_hash) = db.get(crate::columns::BLOCK_MAPPING, ethereum_hash)? { + if let Some(substrate_hash) = db.get(super::columns::BLOCK_MAPPING, ethereum_hash)? { // Only update version1 data let decoded = Vec::::decode(&mut &substrate_hash[..]); if decoded.is_err() || decoded.unwrap().is_empty() { @@ -191,7 +191,7 @@ pub(crate) fn migrate_1_to_2_rocks_db>( if let Ok(Some(number)) = client.number(Block::Hash::decode(&mut &substrate_hash[..]).unwrap()) { if let Ok(Some(hash)) = client.hash(number) { transaction.put_vec( - crate::columns::BLOCK_MAPPING, + super::columns::BLOCK_MAPPING, ethereum_hash, vec![hash].encode(), ); @@ -226,7 +226,7 @@ pub(crate) fn migrate_1_to_2_rocks_db>( // Get all the block hashes we need to update let ethereum_hashes: Vec<_> = db - .iter(crate::columns::BLOCK_MAPPING) + .iter(super::columns::BLOCK_MAPPING) .filter_map(|entry| entry.map_or(None, |r| Some(r.0))) .collect(); @@ -264,7 +264,7 @@ pub(crate) fn migrate_1_to_2_parity_db>( let mut transaction = vec![]; for ethereum_hash in ethereum_hashes { let mut maybe_error = true; - if let Some(substrate_hash) = db.get(crate::columns::BLOCK_MAPPING as u8, ethereum_hash).map_err(|_| + if let Some(substrate_hash) = db.get(super::columns::BLOCK_MAPPING as u8, ethereum_hash).map_err(|_| io::Error::new(ErrorKind::Other, "Key does not exist") )? { // Only update version1 data @@ -274,7 +274,7 @@ pub(crate) fn migrate_1_to_2_parity_db>( if let Ok(Some(number)) = client.number(Block::Hash::decode(&mut &substrate_hash[..]).unwrap()) { if let Ok(Some(hash)) = client.hash(number) { transaction.push(( - crate::columns::BLOCK_MAPPING as u8, + super::columns::BLOCK_MAPPING as u8, ethereum_hash, Some(vec![hash].encode()), )); @@ -294,13 +294,13 @@ pub(crate) fn migrate_1_to_2_parity_db>( }; let mut db_cfg = parity_db::Options::with_columns(db_path, V2_NUM_COLUMNS as u8); - db_cfg.columns[crate::columns::BLOCK_MAPPING as usize].btree_index = true; + db_cfg.columns[super::columns::BLOCK_MAPPING as usize].btree_index = true; let db = parity_db::Db::open_or_create(&db_cfg) .map_err(|_| io::Error::new(ErrorKind::Other, "Failed to open db"))?; // Get all the block hashes we need to update - let ethereum_hashes: Vec<_> = match db.iter(crate::columns::BLOCK_MAPPING as u8) { + let ethereum_hashes: Vec<_> = match db.iter(super::columns::BLOCK_MAPPING as u8) { Ok(mut iter) => { let mut hashes = vec![]; while let Ok(Some((k, _))) = iter.next() { @@ -347,9 +347,9 @@ mod tests { pub fn open_frontier_backend>( client: Arc, - setting: &crate::DatabaseSettings, - ) -> Result>, String> { - Ok(Arc::new(crate::Backend::::new(client, setting)?)) + setting: &crate::kv::DatabaseSettings, + ) -> Result>, String> { + Ok(Arc::new(crate::kv::Backend::::new(client, setting)?)) } #[test] @@ -359,14 +359,14 @@ mod tests { let settings = vec![ // Rocks db - crate::DatabaseSettings { + crate::kv::DatabaseSettings { source: sc_client_db::DatabaseSource::RocksDb { path: tmp_1.path().to_owned(), cache_size: 0, }, }, // Parity db - crate::DatabaseSettings { + crate::kv::DatabaseSettings { source: sc_client_db::DatabaseSource::ParityDb { path: tmp_2.path().to_owned(), }, @@ -399,7 +399,7 @@ mod tests { // Fill the tmp db with some data let mut transaction = sp_database::Transaction::new(); - for _ in 0..1000 { + for _ in 0..50 { // Ethereum hash let ethhash = H256::random(); // Create two branches, and map the orphan one. @@ -426,7 +426,7 @@ mod tests { substrate_hashes.push(next_canon_block_hash); // Set orphan hash block mapping transaction.set( - crate::columns::BLOCK_MAPPING, + crate::kv::columns::BLOCK_MAPPING, ðhash.encode(), &orphan_block_hash.encode(), ); @@ -436,14 +436,14 @@ mod tests { let eth_tx_hash = H256::random(); let mut metadata = vec![]; for hash in vec![next_canon_block_hash, orphan_block_hash].iter() { - metadata.push(crate::TransactionMetadata:: { + metadata.push(crate::kv::TransactionMetadata:: { block_hash: *hash, ethereum_block_hash: ethhash, ethereum_index: 0u32, }); } transaction.set( - crate::columns::TRANSACTION_MAPPING, + crate::kv::columns::TRANSACTION_MAPPING, ð_tx_hash.encode(), &metadata.encode(), ); @@ -505,7 +505,7 @@ mod tests { ); let client = Arc::new(client); - let setting = crate::DatabaseSettings { + let setting = crate::kv::DatabaseSettings { source: sc_client_db::DatabaseSource::RocksDb { path: tmp.path().to_owned(), cache_size: 0, @@ -515,7 +515,7 @@ mod tests { let _ = super::upgrade_db::(client.clone(), &path, &setting.source); let mut file = - std::fs::File::open(crate::upgrade::version_file_path(&path)).expect("file exist"); + std::fs::File::open(crate::kv::upgrade::version_file_path(&path)).expect("file exist"); let mut s = String::new(); file.read_to_string(&mut s).expect("read file contents"); diff --git a/client/db/src/utils.rs b/client/db/src/kv/utils.rs similarity index 87% rename from client/db/src/utils.rs rename to client/db/src/kv/utils.rs index 777ac33099..18fc760e5c 100644 --- a/client/db/src/utils.rs +++ b/client/db/src/kv/utils.rs @@ -21,7 +21,7 @@ use std::{path::Path, sync::Arc}; use sp_blockchain::HeaderBackend; use sp_runtime::traits::Block as BlockT; -use crate::{Database, DatabaseSettings, DatabaseSource, DbHash}; +use super::{Database, DatabaseSettings, DatabaseSource, DbHash}; pub fn open_database>( client: Arc, @@ -59,18 +59,18 @@ fn open_kvdb_rocksdb>( ) -> Result>, String> { // first upgrade database to required version #[cfg(not(test))] - match crate::upgrade::upgrade_db::(client, path, _source) { + match super::upgrade::upgrade_db::(client, path, _source) { Ok(_) => (), Err(_) => return Err("Frontier DB upgrade error".to_string()), } - let mut db_config = kvdb_rocksdb::DatabaseConfig::with_columns(crate::columns::NUM_COLUMNS); + let mut db_config = kvdb_rocksdb::DatabaseConfig::with_columns(super::columns::NUM_COLUMNS); db_config.create_if_missing = create; let db = kvdb_rocksdb::Database::open(&db_config, path).map_err(|err| format!("{}", err))?; // write database version only after the database is succesfully opened #[cfg(not(test))] - crate::upgrade::update_version(path).map_err(|_| "Cannot update db version".to_string())?; + super::upgrade::update_version(path).map_err(|_| "Cannot update db version".to_string())?; return Ok(sp_database::as_database(db)); } @@ -92,18 +92,18 @@ fn open_parity_db>( ) -> Result>, String> { // first upgrade database to required version #[cfg(not(test))] - match crate::upgrade::upgrade_db::(client, path, _source) { + match super::upgrade::upgrade_db::(client, path, _source) { Ok(_) => (), Err(_) => return Err("Frontier DB upgrade error".to_string()), } - let mut config = parity_db::Options::with_columns(path, crate::columns::NUM_COLUMNS as u8); - config.columns[crate::columns::BLOCK_MAPPING as usize].btree_index = true; + let mut config = parity_db::Options::with_columns(path, super::columns::NUM_COLUMNS as u8); + config.columns[super::columns::BLOCK_MAPPING as usize].btree_index = true; let db = parity_db::Db::open_or_create(&config).map_err(|err| format!("{}", err))?; // write database version only after the database is succesfully opened #[cfg(not(test))] - crate::upgrade::update_version(path).map_err(|_| "Cannot update db version".to_string())?; - Ok(Arc::new(crate::parity_db_adapter::DbAdapter(db))) + super::upgrade::update_version(path).map_err(|_| "Cannot update db version".to_string())?; + Ok(Arc::new(super::parity_db_adapter::DbAdapter(db))) } #[cfg(not(feature = "parity-db"))] diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index f54defe718..8bb0f10618 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This file is part of Frontier. // -// Copyright (c) 2021-2022 Parity Technologies (UK) Ltd. +// Copyright (c) 2020-2022 Parity Technologies (UK) Ltd. // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by @@ -18,318 +18,58 @@ #![deny(unused_crate_dependencies)] -#[cfg(feature = "parity-db")] -mod parity_db_adapter; -mod upgrade; -mod utils; - -use std::{ - marker::PhantomData, - path::{Path, PathBuf}, - sync::Arc, -}; - -use parking_lot::Mutex; use scale_codec::{Decode, Encode}; // Substrate pub use sc_client_db::DatabaseSource; -use sp_blockchain::HeaderBackend; use sp_core::H256; -pub use sp_database::Database; use sp_runtime::traits::Block as BlockT; -// Frontier -use fp_storage::{EthereumStorageSchema, PALLET_ETHEREUM_SCHEMA_CACHE}; -const DB_HASH_LEN: usize = 32; -/// Hash type that this backend uses for the database. -pub type DbHash = [u8; DB_HASH_LEN]; - -/// Database settings. -pub struct DatabaseSettings { - /// Where to find the database. - pub source: DatabaseSource, -} - -pub(crate) mod columns { - pub const NUM_COLUMNS: u32 = 4; - - pub const META: u32 = 0; - pub const BLOCK_MAPPING: u32 = 1; - pub const TRANSACTION_MAPPING: u32 = 2; - pub const SYNCED_MAPPING: u32 = 3; -} +pub mod kv; +pub mod sql; +use kv::{columns, static_keys}; -pub mod static_keys { - pub const CURRENT_SYNCING_TIPS: &[u8] = b"CURRENT_SYNCING_TIPS"; +#[derive(Clone)] +pub enum Backend { + KeyValue(kv::Backend), + Sql(sql::Backend), } -pub struct Backend { - meta: Arc>, - mapping: Arc>, -} - -/// Returns the frontier database directory. -pub fn frontier_database_dir(db_config_dir: &Path, db_path: &str) -> PathBuf { - db_config_dir.join("frontier").join(db_path) -} - -impl Backend { - pub fn open>( - client: Arc, - database: &DatabaseSource, - db_config_dir: &Path, - ) -> Result { - Self::new( - client, - &DatabaseSettings { - source: match database { - DatabaseSource::RocksDb { .. } => DatabaseSource::RocksDb { - path: frontier_database_dir(db_config_dir, "db"), - cache_size: 0, - }, - DatabaseSource::ParityDb { .. } => DatabaseSource::ParityDb { - path: frontier_database_dir(db_config_dir, "paritydb"), - }, - DatabaseSource::Auto { .. } => DatabaseSource::Auto { - rocksdb_path: frontier_database_dir(db_config_dir, "db"), - paritydb_path: frontier_database_dir(db_config_dir, "paritydb"), - cache_size: 0, - }, - _ => { - return Err( - "Supported db sources: `rocksdb` | `paritydb` | `auto`".to_string() - ) - } - }, - }, - ) - } - - pub fn new>( - client: Arc, - config: &DatabaseSettings, - ) -> Result { - let db = utils::open_database::(client, config)?; - - Ok(Self { - mapping: Arc::new(MappingDb { - db: db.clone(), - write_lock: Arc::new(Mutex::new(())), - _marker: PhantomData, - }), - meta: Arc::new(MetaDb { - db: db.clone(), - _marker: PhantomData, - }), - }) - } - - pub fn mapping(&self) -> &Arc> { - &self.mapping - } - - pub fn meta(&self) -> &Arc> { - &self.meta - } -} - -pub struct MetaDb { - db: Arc>, - _marker: PhantomData, -} - -impl MetaDb { - pub fn current_syncing_tips(&self) -> Result, String> { - match self.db.get( - crate::columns::META, - crate::static_keys::CURRENT_SYNCING_TIPS, - ) { - Some(raw) => { - Ok(Vec::::decode(&mut &raw[..]).map_err(|e| format!("{:?}", e))?) - } - None => Ok(Vec::new()), - } - } - - pub fn write_current_syncing_tips(&self, tips: Vec) -> Result<(), String> { - let mut transaction = sp_database::Transaction::new(); - - transaction.set( - crate::columns::META, - crate::static_keys::CURRENT_SYNCING_TIPS, - &tips.encode(), - ); - - self.db - .commit(transaction) - .map_err(|e| format!("{:?}", e))?; - - Ok(()) - } - - pub fn ethereum_schema(&self) -> Result>, String> { - match self - .db - .get(crate::columns::META, &PALLET_ETHEREUM_SCHEMA_CACHE.encode()) - { - Some(raw) => Ok(Some( - Decode::decode(&mut &raw[..]).map_err(|e| format!("{:?}", e))?, - )), - None => Ok(None), - } - } - - pub fn write_ethereum_schema( - &self, - new_cache: Vec<(EthereumStorageSchema, H256)>, - ) -> Result<(), String> { - let mut transaction = sp_database::Transaction::new(); - - transaction.set( - crate::columns::META, - &PALLET_ETHEREUM_SCHEMA_CACHE.encode(), - &new_cache.encode(), - ); - - self.db - .commit(transaction) - .map_err(|e| format!("{:?}", e))?; - - Ok(()) - } -} - -#[derive(Debug)] -pub struct MappingCommitment { - pub block_hash: Block::Hash, - pub ethereum_block_hash: H256, - pub ethereum_transaction_hashes: Vec, -} - -#[derive(Clone, Debug, Eq, PartialEq, Encode, Decode)] +#[derive(Clone, Encode, Debug, Decode, Eq, PartialEq)] pub struct TransactionMetadata { pub block_hash: Block::Hash, pub ethereum_block_hash: H256, pub ethereum_index: u32, } -pub struct MappingDb { - db: Arc>, - write_lock: Arc>, - _marker: PhantomData, +#[derive(Debug, Eq, PartialEq)] +pub struct FilteredLog { + pub substrate_block_hash: H256, + pub ethereum_block_hash: H256, + pub block_number: u32, + pub ethereum_storage_schema: fp_storage::EthereumStorageSchema, + pub transaction_index: u32, + pub log_index: u32, } -impl MappingDb { - pub fn is_synced(&self, block_hash: &Block::Hash) -> Result { - match self - .db - .get(crate::columns::SYNCED_MAPPING, &block_hash.encode()) - { - Some(raw) => Ok(bool::decode(&mut &raw[..]).map_err(|e| format!("{:?}", e))?), - None => Ok(false), - } - } - - pub fn block_hash( +#[async_trait::async_trait] +pub trait BackendReader { + async fn block_hash( &self, ethereum_block_hash: &H256, - ) -> Result>, String> { - match self - .db - .get(crate::columns::BLOCK_MAPPING, ðereum_block_hash.encode()) - { - Some(raw) => Ok(Some( - Vec::::decode(&mut &raw[..]).map_err(|e| format!("{:?}", e))?, - )), - None => Ok(None), - } - } + ) -> Result>, String>; - pub fn transaction_metadata( + async fn transaction_metadata( &self, ethereum_transaction_hash: &H256, - ) -> Result>, String> { - match self.db.get( - crate::columns::TRANSACTION_MAPPING, - ðereum_transaction_hash.encode(), - ) { - Some(raw) => Ok(Vec::>::decode(&mut &raw[..]) - .map_err(|e| format!("{:?}", e))?), - None => Ok(Vec::new()), - } - } + ) -> Result>, String>; - pub fn write_none(&self, block_hash: Block::Hash) -> Result<(), String> { - let _lock = self.write_lock.lock(); - - let mut transaction = sp_database::Transaction::new(); - - transaction.set( - crate::columns::SYNCED_MAPPING, - &block_hash.encode(), - &true.encode(), - ); - - self.db - .commit(transaction) - .map_err(|e| format!("{:?}", e))?; - - Ok(()) - } - - pub fn write_hashes(&self, commitment: MappingCommitment) -> Result<(), String> { - let _lock = self.write_lock.lock(); - - let mut transaction = sp_database::Transaction::new(); - - let substrate_hashes = match self.block_hash(&commitment.ethereum_block_hash) { - Ok(Some(mut data)) => { - data.push(commitment.block_hash); - log::warn!( - target: "fc-db", - "Possible equivocation at ethereum block hash {} {:?}", - &commitment.ethereum_block_hash, - &data - ); - data - } - _ => vec![commitment.block_hash], - }; - - transaction.set( - crate::columns::BLOCK_MAPPING, - &commitment.ethereum_block_hash.encode(), - &substrate_hashes.encode(), - ); - - for (i, ethereum_transaction_hash) in commitment - .ethereum_transaction_hashes - .into_iter() - .enumerate() - { - let mut metadata = self.transaction_metadata(ðereum_transaction_hash)?; - metadata.push(TransactionMetadata:: { - block_hash: commitment.block_hash, - ethereum_block_hash: commitment.ethereum_block_hash, - ethereum_index: i as u32, - }); - transaction.set( - crate::columns::TRANSACTION_MAPPING, - ðereum_transaction_hash.encode(), - &metadata.encode(), - ); - } - - transaction.set( - crate::columns::SYNCED_MAPPING, - &commitment.block_hash.encode(), - &true.encode(), - ); - - self.db - .commit(transaction) - .map_err(|e| format!("{:?}", e))?; + async fn filter_logs( + &self, + from_block: u64, + to_block: u64, + addresses: Vec, + topics: Vec>>, + ) -> Result, String>; - Ok(()) - } + fn is_indexed(&self) -> bool; } diff --git a/client/db/src/sql/mod.rs b/client/db/src/sql/mod.rs new file mode 100644 index 0000000000..3057cabe31 --- /dev/null +++ b/client/db/src/sql/mod.rs @@ -0,0 +1,1912 @@ +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 +// This file is part of Frontier. +// +// Copyright (c) 2020-2022 Parity Technologies (UK) Ltd. +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use fc_storage::OverrideHandle; +use fp_consensus::{FindLogError, Hashes, Log as ConsensusLog, PostLog, PreLog}; +use fp_rpc::EthereumRuntimeRPCApi; +use fp_storage::{EthereumStorageSchema, PALLET_ETHEREUM_SCHEMA}; +use futures::TryStreamExt; +use sc_client_api::backend::{Backend as BackendT, StateBackend, StorageProvider}; +use scale_codec::{Decode, Encode}; +use sp_api::{ApiExt, ProvideRuntimeApi}; +use sp_blockchain::HeaderBackend; +use sp_core::{H160, H256}; +use sp_runtime::{ + generic::BlockId, + traits::{BlakeTwo256, Block as BlockT, Header as HeaderT, UniqueSaturatedInto, Zero}, +}; +use sqlx::{ + query::Query, + sqlite::{ + SqliteArguments, SqliteConnectOptions, SqlitePool, SqlitePoolOptions, SqliteQueryResult, + }, + ConnectOptions, Error, Execute, QueryBuilder, Row, Sqlite, +}; +use std::num::NonZeroU32; + +use std::{cmp::Ordering, collections::HashSet, str::FromStr, sync::Arc}; + +use crate::FilteredLog; + +/// Maximum number to topics allowed to be filtered upon +const MAX_TOPIC_COUNT: u16 = 4; + +/// Represents a log item. +#[derive(Debug, Eq, PartialEq)] +pub struct Log { + pub address: Vec, + pub topic_1: Option>, + pub topic_2: Option>, + pub topic_3: Option>, + pub topic_4: Option>, + pub log_index: i32, + pub transaction_index: i32, + pub substrate_block_hash: Vec, +} + +/// Represents the block metadata. +#[derive(Eq, PartialEq)] +struct BlockMetadata { + pub substrate_block_hash: H256, + pub block_number: i32, + pub post_hashes: fp_consensus::Hashes, + pub schema: EthereumStorageSchema, + pub is_canon: i32, +} + +/// Represents the Sqlite connection options that are +/// used to establish a database connection. +#[derive(Debug)] +pub struct SqliteBackendConfig<'a> { + pub path: &'a str, + pub create_if_missing: bool, + pub thread_count: u32, + pub cache_size: u64, +} + +/// Represents the indexed status of a block and if it's canon or not. +#[derive(Debug, Default)] +pub struct BlockIndexedStatus { + pub indexed: bool, + pub canon: bool, +} + +/// Represents the backend configurations. +#[derive(Debug)] +pub enum BackendConfig<'a> { + Sqlite(SqliteBackendConfig<'a>), +} + +#[derive(Clone)] +pub struct Backend { + /// The Sqlite connection. + pool: SqlitePool, + + /// The additional overrides for the logs handler. + overrides: Arc>, + + /// The number of allowed operations for the Sqlite filter call. + /// A value of `0` disables the timeout. + num_ops_timeout: i32, +} + +impl Backend +where + Block: BlockT + Send + Sync, +{ + /// Creates a new instance of the SQL backend. + pub async fn new( + config: BackendConfig<'_>, + pool_size: u32, + num_ops_timeout: Option, + overrides: Arc>, + ) -> Result { + let any_pool = SqlitePoolOptions::new() + .max_connections(pool_size) + .connect_lazy_with(Self::connect_options(&config)?.disable_statement_logging()); + let _ = Self::create_database_if_not_exists(&any_pool).await?; + let _ = Self::create_indexes_if_not_exist(&any_pool).await?; + Ok(Self { + pool: any_pool, + overrides, + num_ops_timeout: num_ops_timeout + .map(|n| n.get()) + .unwrap_or(0) + .try_into() + .unwrap_or(i32::MAX), + }) + } + + fn connect_options(config: &BackendConfig) -> Result { + match config { + BackendConfig::Sqlite(config) => { + log::info!( + target: "frontier-sql", + "📑 Connection configuration: {:?}", + config, + ); + let config = sqlx::sqlite::SqliteConnectOptions::from_str(config.path)? + .create_if_missing(config.create_if_missing) + // https://www.sqlite.org/pragma.html#pragma_busy_timeout + .busy_timeout(std::time::Duration::from_secs(8)) + // 200MB, https://www.sqlite.org/pragma.html#pragma_cache_size + .pragma("cache_size", format!("-{}", config.cache_size)) + // https://www.sqlite.org/pragma.html#pragma_analysis_limit + .pragma("analysis_limit", "1000") + // https://www.sqlite.org/pragma.html#pragma_threads + .pragma("threads", config.thread_count.to_string()) + // https://www.sqlite.org/pragma.html#pragma_threads + .pragma("temp_store", "memory") + // https://www.sqlite.org/wal.html + .journal_mode(sqlx::sqlite::SqliteJournalMode::Wal) + // https://www.sqlite.org/pragma.html#pragma_synchronous + .synchronous(sqlx::sqlite::SqliteSynchronous::Normal); + Ok(config) + } + } + } + + /// Get the underlying Sqlite pool. + pub fn pool(&self) -> &SqlitePool { + &self.pool + } + + /// Canonicalize the indexed blocks, marking/demarking them as canon based on the + /// provided `retracted` and `enacted` values. + pub async fn canonicalize(&self, retracted: &[H256], enacted: &[H256]) -> Result<(), Error> { + let mut tx = self.pool().begin().await?; + + // Retracted + let mut builder: QueryBuilder = + QueryBuilder::new("UPDATE blocks SET is_canon = 0 WHERE substrate_block_hash IN ("); + let mut retracted_hashes = builder.separated(", "); + for hash in retracted.iter() { + let hash = hash.as_bytes(); + retracted_hashes.push_bind(hash); + } + retracted_hashes.push_unseparated(")"); + let query = builder.build(); + query.execute(&mut *tx).await?; + + // Enacted + let mut builder: QueryBuilder = + QueryBuilder::new("UPDATE blocks SET is_canon = 1 WHERE substrate_block_hash IN ("); + let mut enacted_hashes = builder.separated(", "); + for hash in enacted.iter() { + let hash = hash.as_bytes(); + enacted_hashes.push_bind(hash); + } + enacted_hashes.push_unseparated(")"); + let query = builder.build(); + query.execute(&mut *tx).await?; + + tx.commit().await + } + + /// Index the block metadata for the genesis block. + pub async fn insert_genesis_block_metadata( + &self, + client: Arc, + ) -> Result, Error> + where + Client: StorageProvider + HeaderBackend + Send + Sync + 'static, + Client: ProvideRuntimeApi, + Client::Api: EthereumRuntimeRPCApi, + BE: BackendT + 'static, + BE::State: StateBackend, + { + let id = BlockId::Number(Zero::zero()); + let substrate_genesis_hash = client + .expect_block_hash_from_id(&id) + .map_err(|_| Error::Protocol("Cannot resolve genesis hash".to_string()))?; + let maybe_substrate_hash: Option = if let Ok(Some(_)) = + client.header(substrate_genesis_hash) + { + let has_api = client + .runtime_api() + .has_api_with::, _>( + substrate_genesis_hash, + |version| version >= 1, + ) + .expect("runtime api reachable"); + + log::debug!( + target: "frontier-sql", + "Index genesis block, has_api={}, hash={:?}", + has_api, + substrate_genesis_hash, + ); + + if has_api { + // The chain has frontier support from genesis. + // Read from the runtime and store the block metadata. + let ethereum_block = client + .runtime_api() + .current_block(substrate_genesis_hash) + .expect("runtime api reachable") + .expect("ethereum genesis block"); + + let schema = + Self::onchain_storage_schema(client.as_ref(), substrate_genesis_hash).encode(); + let ethereum_block_hash = ethereum_block.header.hash().as_bytes().to_owned(); + let substrate_block_hash = substrate_genesis_hash.as_bytes(); + let block_number = 0i32; + let is_canon = 1i32; + + let _ = sqlx::query( + "INSERT OR IGNORE INTO blocks( + ethereum_block_hash, + substrate_block_hash, + block_number, + ethereum_storage_schema, + is_canon) + VALUES (?, ?, ?, ?, ?)", + ) + .bind(ethereum_block_hash) + .bind(substrate_block_hash) + .bind(block_number) + .bind(schema) + .bind(is_canon) + .execute(self.pool()) + .await?; + } + Some(substrate_genesis_hash) + } else { + None + }; + Ok(maybe_substrate_hash) + } + + fn insert_block_metadata_inner( + client: Arc, + hash: H256, + overrides: Arc>, + ) -> Result + where + Client: StorageProvider + HeaderBackend + Send + Sync + 'static, + BE: BackendT + 'static, + BE::State: StateBackend, + { + log::trace!( + target: "frontier-sql", + "🛠️ [Metadata] Retrieving digest data for block {:?}", + hash, + ); + if let Ok(Some(header)) = client.header(hash) { + match fp_consensus::find_log(header.digest()) { + Ok(log) => { + let schema = Self::onchain_storage_schema(client.as_ref(), hash); + let log_hashes = match log { + ConsensusLog::Post(PostLog::Hashes(post_hashes)) => post_hashes, + ConsensusLog::Post(PostLog::Block(block)) => Hashes::from_block(block), + ConsensusLog::Post(PostLog::BlockHash(expect_eth_block_hash)) => { + let ethereum_block = overrides + .schemas + .get(&schema) + .unwrap_or(&overrides.fallback) + .current_block(hash); + match ethereum_block { + Some(block) => { + let got_eth_block_hash = block.header.hash(); + if got_eth_block_hash != expect_eth_block_hash { + return Err(Error::Protocol(format!( + "Ethereum block hash mismatch: \ + frontier consensus digest ({expect_eth_block_hash:?}), \ + db state ({got_eth_block_hash:?})" + ))); + } else { + Hashes::from_block(block) + } + } + None => { + return Err(Error::Protocol(format!( + "Missing ethereum block for hash mismatch {expect_eth_block_hash:?}" + ))) + } + } + } + ConsensusLog::Pre(PreLog::Block(block)) => Hashes::from_block(block), + }; + + let header_number = *header.number(); + let block_number = + UniqueSaturatedInto::::unique_saturated_into(header_number) as i32; + let is_canon = + match client.hash(header_number) { + Ok(Some(inner_hash)) => (inner_hash == hash) as i32, + Ok(None) => { + log::debug!( + target: "frontier-sql", + "[Metadata] Missing header for block #{} ({:?})", + block_number, hash, + ); + 0 + } + Err(err) => { + log::debug!( + "[Metadata] Failed to retrieve header for block #{} ({:?}): {:?}", + block_number, hash, err, + ); + 0 + } + }; + + log::trace!( + target: "frontier-sql", + "[Metadata] Prepared block metadata for #{} ({:?}) canon={}", + block_number, + hash, + is_canon, + ); + Ok(BlockMetadata { + substrate_block_hash: hash, + block_number, + post_hashes: log_hashes, + schema, + is_canon, + }) + } + Err(FindLogError::NotFound) => { + return Err(Error::Protocol(format!( + "[Metadata] No logs found for hash {:?}", + hash + ))) + } + Err(FindLogError::MultipleLogs) => { + return Err(Error::Protocol(format!( + "[Metadata] Multiple logs found for hash {:?}", + hash + ))) + } + } + } else { + return Err(Error::Protocol(format!( + "[Metadata] Failed retrieving header for hash {:?}", + hash + ))); + } + } + + /// Insert the block metadata for the provided block hashes. + pub async fn insert_block_metadata( + &self, + client: Arc, + hash: H256, + ) -> Result<(), Error> + where + Client: StorageProvider + HeaderBackend + Send + Sync + 'static, + BE: BackendT + 'static, + BE::State: StateBackend, + { + // Spawn a blocking task to get block metadata from substrate backend. + let overrides = self.overrides.clone(); + let metadata = tokio::task::spawn_blocking(move || { + Self::insert_block_metadata_inner(client.clone(), hash, overrides) + }) + .await + .map_err(|_| Error::Protocol("tokio blocking metadata task failed".to_string()))??; + + let mut tx = self.pool().begin().await?; + + log::debug!( + target: "frontier-sql", + "🛠️ [Metadata] Starting execution of statements on db transaction" + ); + let post_hashes = metadata.post_hashes; + let ethereum_block_hash = post_hashes.block_hash.as_bytes(); + let substrate_block_hash = metadata.substrate_block_hash.as_bytes(); + let schema = metadata.schema.encode(); + let block_number = metadata.block_number; + let is_canon = metadata.is_canon; + + let _ = sqlx::query( + "INSERT OR IGNORE INTO blocks( + ethereum_block_hash, + substrate_block_hash, + block_number, + ethereum_storage_schema, + is_canon) + VALUES (?, ?, ?, ?, ?)", + ) + .bind(ethereum_block_hash) + .bind(substrate_block_hash) + .bind(block_number) + .bind(schema) + .bind(is_canon) + .execute(&mut *tx) + .await?; + for (i, &transaction_hash) in post_hashes.transaction_hashes.iter().enumerate() { + let ethereum_transaction_hash = transaction_hash.as_bytes(); + let ethereum_transaction_index = i as i32; + log::trace!( + target: "frontier-sql", + "[Metadata] Inserting TX for block #{} - {:?} index {}", + block_number, + transaction_hash, + ethereum_transaction_index, + ); + let _ = sqlx::query( + "INSERT OR IGNORE INTO transactions( + ethereum_transaction_hash, + substrate_block_hash, + ethereum_block_hash, + ethereum_transaction_index) + VALUES (?, ?, ?, ?)", + ) + .bind(ethereum_transaction_hash) + .bind(substrate_block_hash) + .bind(ethereum_block_hash) + .bind(ethereum_transaction_index) + .execute(&mut *tx) + .await?; + } + + sqlx::query("INSERT INTO sync_status(substrate_block_hash) VALUES (?)") + .bind(hash.as_bytes()) + .execute(&mut *tx) + .await?; + + log::debug!( + target: "frontier-sql", + "[Metadata] Ready to commit", + ); + tx.commit().await + } + + /// Index the logs for the newly indexed blocks upto a `max_pending_blocks` value. + pub async fn index_block_logs(&self, client: Arc, block_hash: Block::Hash) + where + Client: StorageProvider + HeaderBackend + Send + Sync + 'static, + BE: BackendT + 'static, + BE::State: StateBackend, + { + let pool = self.pool().clone(); + let overrides = self.overrides.clone(); + let _ = async { + // The overarching db transaction for the task. + // Due to the async nature of this task, the same work is likely to happen + // more than once. For example when a new batch is scheduled when the previous one + // didn't finished yet and the new batch happens to select the same substrate + // block hashes for the update. + // That is expected, we are exchanging extra work for *acid*ity. + // There is no case of unique constrain violation or race condition as already + // existing entries are ignored. + let mut tx = pool.begin().await?; + // Update statement returning the substrate block hashes for this batch. + match sqlx::query( + "UPDATE sync_status + SET status = 1 + WHERE substrate_block_hash IN + (SELECT substrate_block_hash + FROM sync_status + WHERE status = 0 AND substrate_block_hash = ?) RETURNING substrate_block_hash", + ) + .bind(block_hash.as_bytes()) + .fetch_one(&mut *tx) + .await + { + Ok(_) => { + // Spawn a blocking task to get log data from substrate backend. + let logs = tokio::task::spawn_blocking(move || { + Self::get_logs(client.clone(), overrides, block_hash) + }) + .await + .map_err(|_| Error::Protocol("tokio blocking task failed".to_string()))?; + + for log in logs { + let _ = sqlx::query( + "INSERT OR IGNORE INTO logs( + address, + topic_1, + topic_2, + topic_3, + topic_4, + log_index, + transaction_index, + substrate_block_hash) + VALUES (?, ?, ?, ?, ?, ?, ?, ?)", + ) + .bind(log.address) + .bind(log.topic_1) + .bind(log.topic_2) + .bind(log.topic_3) + .bind(log.topic_4) + .bind(log.log_index) + .bind(log.transaction_index) + .bind(log.substrate_block_hash) + .execute(&mut *tx) + .await?; + } + Ok(tx.commit().await?) + } + Err(e) => Err(e), + } + } + .await + .map_err(|e| { + log::error!( + target: "frontier-sql", + "{}", + e + ) + }); + // https://www.sqlite.org/pragma.html#pragma_optimize + let _ = sqlx::query("PRAGMA optimize").execute(&pool).await; + log::debug!( + target: "frontier-sql", + "Batch commited" + ); + } + + fn get_logs( + client: Arc, + overrides: Arc>, + substrate_block_hash: H256, + ) -> Vec + where + Client: StorageProvider + HeaderBackend + Send + Sync + 'static, + BE: BackendT + 'static, + BE::State: StateBackend, + { + let mut logs: Vec = vec![]; + let mut transaction_count: usize = 0; + let mut log_count: usize = 0; + let schema = Self::onchain_storage_schema(client.as_ref(), substrate_block_hash); + let handler = overrides + .schemas + .get(&schema) + .unwrap_or(&overrides.fallback); + + let receipts = handler + .current_receipts(substrate_block_hash) + .unwrap_or_default(); + + transaction_count += receipts.len(); + for (transaction_index, receipt) in receipts.iter().enumerate() { + let receipt_logs = match receipt { + ethereum::ReceiptV3::Legacy(d) + | ethereum::ReceiptV3::EIP2930(d) + | ethereum::ReceiptV3::EIP1559(d) => &d.logs, + }; + let transaction_index = transaction_index as i32; + log_count += receipt_logs.len(); + for (log_index, log) in receipt_logs.iter().enumerate() { + logs.push(Log { + address: log.address.as_bytes().to_owned(), + topic_1: log.topics.get(0).map(|l| l.as_bytes().to_owned()), + topic_2: log.topics.get(1).map(|l| l.as_bytes().to_owned()), + topic_3: log.topics.get(2).map(|l| l.as_bytes().to_owned()), + topic_4: log.topics.get(3).map(|l| l.as_bytes().to_owned()), + log_index: log_index as i32, + transaction_index, + substrate_block_hash: substrate_block_hash.as_bytes().to_owned(), + }); + } + } + log::debug!( + target: "frontier-sql", + "Ready to commit {} logs from {} transactions", + log_count, + transaction_count + ); + logs + } + + fn onchain_storage_schema(client: &Client, at: Block::Hash) -> EthereumStorageSchema + where + Client: StorageProvider + HeaderBackend + Send + Sync + 'static, + BE: BackendT + 'static, + BE::State: StateBackend, + { + match client.storage(at, &sp_storage::StorageKey(PALLET_ETHEREUM_SCHEMA.to_vec())) { + Ok(Some(bytes)) => Decode::decode(&mut &bytes.0[..]) + .ok() + .unwrap_or(EthereumStorageSchema::Undefined), + _ => EthereumStorageSchema::Undefined, + } + } + + /// Retrieves the status if a block has been already indexed. + pub async fn is_block_indexed(&self, block_hash: Block::Hash) -> bool { + sqlx::query("SELECT substrate_block_hash FROM sync_status WHERE substrate_block_hash = ?") + .bind(block_hash.as_bytes().to_owned()) + .fetch_optional(self.pool()) + .await + .map(|r| r.is_some()) + .unwrap_or(false) + } + + /// Retrieves the status if a block is indexed and if also marked as canon. + pub async fn block_indexed_and_canon_status( + &self, + block_hash: Block::Hash, + ) -> BlockIndexedStatus { + sqlx::query( + "SELECT b.is_canon FROM sync_status AS s + INNER JOIN blocks AS b + ON s.substrate_block_hash = b.substrate_block_hash + WHERE s.substrate_block_hash = ?", + ) + .bind(block_hash.as_bytes().to_owned()) + .fetch_optional(self.pool()) + .await + .map(|result| { + result + .map(|row| { + let is_canon: i32 = row.get(0); + BlockIndexedStatus { + indexed: true, + canon: is_canon != 0, + } + }) + .unwrap_or_default() + }) + .unwrap_or_default() + } + + /// Sets the provided block as canon. + pub async fn set_block_as_canon(&self, block_hash: H256) -> Result { + sqlx::query("UPDATE blocks SET is_canon = 1 WHERE substrate_block_hash = ?") + .bind(block_hash.as_bytes()) + .execute(self.pool()) + .await + } + + /// Retrieves the first missing canonical block number in decreasing order that hasn't been indexed yet. + /// If no unindexed block exists or the table or the rows do not exist, then the function + /// returns `None`. + pub async fn get_first_missing_canon_block(&self) -> Option { + match sqlx::query( + "SELECT b1.block_number-1 + FROM blocks as b1 + WHERE b1.block_number > 0 AND b1.is_canon=1 AND NOT EXISTS ( + SELECT 1 FROM blocks AS b2 + WHERE b2.block_number = b1.block_number-1 + AND b1.is_canon=1 + AND b2.is_canon=1 + ) + ORDER BY block_number LIMIT 1", + ) + .fetch_optional(self.pool()) + .await + { + Ok(result) => { + if let Some(row) = result { + let block_number: u32 = row.get(0); + return Some(block_number); + } + } + Err(err) => { + log::debug!( + target: "frontier-sql", + "Failed retrieving missing block {:?}", + err + ); + } + } + + None + } + + /// Retrieves the first pending canonical block hash in decreasing order that hasn't had + // its logs indexed yet. If no unindexed block exists or the table or the rows do not exist, + /// then the function returns `None`. + pub async fn get_first_pending_canon_block(&self) -> Option { + match sqlx::query( + "SELECT s.substrate_block_hash FROM sync_status AS s + INNER JOIN blocks as b + ON s.substrate_block_hash = b.substrate_block_hash + WHERE b.is_canon = 1 AND s.status = 0 + ORDER BY b.block_number LIMIT 1", + ) + .fetch_optional(self.pool()) + .await + { + Ok(result) => { + if let Some(row) = result { + let block_hash_bytes: Vec = row.get(0); + let block_hash = H256::from_slice(&block_hash_bytes[..]); + return Some(block_hash); + } + } + Err(err) => { + log::debug!( + target: "frontier-sql", + "Failed retrieving missing block {:?}", + err + ); + } + } + + None + } + + /// Retrieve the block hash for the last indexed canon block. + pub async fn get_last_indexed_canon_block(&self) -> Result { + let row = sqlx::query( + "SELECT b.substrate_block_hash FROM blocks AS b + INNER JOIN sync_status AS s + ON s.substrate_block_hash = b.substrate_block_hash + WHERE b.is_canon=1 AND s.status = 1 + ORDER BY b.id DESC LIMIT 1", + ) + .fetch_one(self.pool()) + .await?; + Ok(H256::from_slice( + &row.try_get::, _>(0).unwrap_or_default()[..], + )) + } + + /// Create the Sqlite database if it does not already exist. + async fn create_database_if_not_exists(pool: &SqlitePool) -> Result { + sqlx::query( + "BEGIN; + CREATE TABLE IF NOT EXISTS logs ( + id INTEGER PRIMARY KEY, + address BLOB NOT NULL, + topic_1 BLOB, + topic_2 BLOB, + topic_3 BLOB, + topic_4 BLOB, + log_index INTEGER NOT NULL, + transaction_index INTEGER NOT NULL, + substrate_block_hash BLOB NOT NULL, + UNIQUE ( + log_index, + transaction_index, + substrate_block_hash + ) + ); + CREATE TABLE IF NOT EXISTS sync_status ( + id INTEGER PRIMARY KEY, + substrate_block_hash BLOB NOT NULL, + status INTEGER DEFAULT 0 NOT NULL, + UNIQUE ( + substrate_block_hash + ) + ); + CREATE TABLE IF NOT EXISTS blocks ( + id INTEGER PRIMARY KEY, + block_number INTEGER NOT NULL, + ethereum_block_hash BLOB NOT NULL, + substrate_block_hash BLOB NOT NULL, + ethereum_storage_schema BLOB NOT NULL, + is_canon INTEGER NOT NULL, + UNIQUE ( + ethereum_block_hash, + substrate_block_hash + ) + ); + CREATE TABLE IF NOT EXISTS transactions ( + id INTEGER PRIMARY KEY, + ethereum_transaction_hash BLOB NOT NULL, + substrate_block_hash BLOB NOT NULL, + ethereum_block_hash BLOB NOT NULL, + ethereum_transaction_index INTEGER NOT NULL, + UNIQUE ( + ethereum_transaction_hash, + substrate_block_hash + ) + ); + COMMIT;", + ) + .execute(pool) + .await + } + + /// Create the Sqlite database indices if it does not already exist. + async fn create_indexes_if_not_exist(pool: &SqlitePool) -> Result { + sqlx::query( + "BEGIN; + CREATE INDEX IF NOT EXISTS logs_main_idx ON logs ( + address, + topic_1, + topic_2, + topic_3, + topic_4 + ); + CREATE INDEX IF NOT EXISTS logs_substrate_index ON logs ( + substrate_block_hash + ); + CREATE INDEX IF NOT EXISTS blocks_number_index ON blocks ( + block_number + ); + CREATE INDEX IF NOT EXISTS blocks_substrate_index ON blocks ( + substrate_block_hash + ); + CREATE INDEX IF NOT EXISTS eth_block_hash_idx ON blocks ( + ethereum_block_hash + ); + CREATE INDEX IF NOT EXISTS eth_tx_hash_idx ON transactions ( + ethereum_transaction_hash + ); + CREATE INDEX IF NOT EXISTS eth_tx_hash_2_idx ON transactions ( + ethereum_block_hash, + ethereum_transaction_index + ); + COMMIT;", + ) + .execute(pool) + .await + } +} + +#[async_trait::async_trait] +impl> crate::BackendReader for Backend { + async fn block_hash( + &self, + ethereum_block_hash: &H256, + ) -> Result>, String> { + let ethereum_block_hash = ethereum_block_hash.as_bytes(); + let res = + sqlx::query("SELECT substrate_block_hash FROM blocks WHERE ethereum_block_hash = ?") + .bind(ethereum_block_hash) + .fetch_all(&self.pool) + .await + .ok() + .map(|rows| { + rows.iter() + .map(|row| { + H256::from_slice(&row.try_get::, _>(0).unwrap_or_default()[..]) + }) + .collect() + }); + Ok(res) + } + async fn transaction_metadata( + &self, + ethereum_transaction_hash: &H256, + ) -> Result>, String> { + let ethereum_transaction_hash = ethereum_transaction_hash.as_bytes(); + let out = sqlx::query( + "SELECT + substrate_block_hash, ethereum_block_hash, ethereum_transaction_index + FROM transactions WHERE ethereum_transaction_hash = ?", + ) + .bind(ethereum_transaction_hash) + .fetch_all(&self.pool) + .await + .unwrap_or_default() + .iter() + .map(|row| { + let substrate_block_hash = + H256::from_slice(&row.try_get::, _>(0).unwrap_or_default()[..]); + let ethereum_block_hash = + H256::from_slice(&row.try_get::, _>(1).unwrap_or_default()[..]); + let ethereum_transaction_index = row.try_get::(2).unwrap_or_default() as u32; + crate::TransactionMetadata { + block_hash: substrate_block_hash, + ethereum_block_hash, + ethereum_index: ethereum_transaction_index, + } + }) + .collect(); + + Ok(out) + } + + async fn filter_logs( + &self, + from_block: u64, + to_block: u64, + addresses: Vec, + topics: Vec>>, + ) -> Result, String> { + let mut unique_topics: [HashSet; 4] = [ + HashSet::new(), + HashSet::new(), + HashSet::new(), + HashSet::new(), + ]; + for topic_combination in topics.into_iter() { + for (topic_index, topic) in topic_combination.into_iter().enumerate() { + if topic_index == MAX_TOPIC_COUNT as usize { + return Err("Invalid topic input. Maximum length is 4.".to_string()); + } + + if let Some(topic) = topic { + unique_topics[topic_index].insert(topic); + } + } + } + + let log_key = format!( + "{}-{}-{:?}-{:?}", + from_block, to_block, addresses, unique_topics + ); + let mut qb = QueryBuilder::new(""); + let query = build_query(&mut qb, from_block, to_block, addresses, unique_topics); + let sql = query.sql(); + + let mut conn = self + .pool() + .acquire() + .await + .map_err(|err| format!("failed acquiring sqlite connection: {}", err))?; + let log_key2 = log_key.clone(); + conn.lock_handle() + .await + .map_err(|err| format!("{:?}", err))? + .set_progress_handler(self.num_ops_timeout, move || { + log::debug!( + target: "frontier-sql", + "Sqlite progress_handler triggered for {}", + log_key2, + ); + false + }); + log::debug!( + target: "frontier-sql", + "Query: {:?} - {}", + sql, + log_key, + ); + + let mut out: Vec = vec![]; + let mut rows = query.fetch(&mut *conn); + let maybe_err = loop { + match rows.try_next().await { + Ok(Some(row)) => { + // Substrate block hash + let substrate_block_hash = + H256::from_slice(&row.try_get::, _>(0).unwrap_or_default()[..]); + // Ethereum block hash + let ethereum_block_hash = + H256::from_slice(&row.try_get::, _>(1).unwrap_or_default()[..]); + // Block number + let block_number = row.try_get::(2).unwrap_or_default() as u32; + // Ethereum storage schema + let ethereum_storage_schema: EthereumStorageSchema = + Decode::decode(&mut &row.try_get::, _>(3).unwrap_or_default()[..]) + .map_err(|_| { + "Cannot decode EthereumStorageSchema for block".to_string() + })?; + // Transaction index + let transaction_index = row.try_get::(4).unwrap_or_default() as u32; + // Log index + let log_index = row.try_get::(5).unwrap_or_default() as u32; + out.push(FilteredLog { + substrate_block_hash, + ethereum_block_hash, + block_number, + ethereum_storage_schema, + transaction_index, + log_index, + }); + } + Ok(None) => break None, // no more rows + Err(err) => break Some(err), + }; + }; + drop(rows); + conn.lock_handle() + .await + .map_err(|err| format!("{:?}", err))? + .remove_progress_handler(); + + if let Some(err) = maybe_err { + log::error!( + target: "frontier-sql", + "Failed to query sql db: {:?} - {}", + err, + log_key, + ); + return Err("Failed to query sql db with statement".to_string()); + } + + log::info!( + target: "frontier-sql", + "FILTER remove handler - {}", + log_key, + ); + Ok(out) + } + + fn is_indexed(&self) -> bool { + true + } +} + +/// Build a SQL query to retrieve a list of logs given certain constraints. +fn build_query<'a>( + qb: &'a mut QueryBuilder, + from_block: u64, + to_block: u64, + addresses: Vec, + topics: [HashSet; 4], +) -> Query<'a, Sqlite, SqliteArguments<'a>> { + qb.push( + " +SELECT + l.substrate_block_hash, + b.ethereum_block_hash, + b.block_number, + b.ethereum_storage_schema, + l.transaction_index, + l.log_index +FROM logs AS l +INNER JOIN blocks AS b +ON (b.block_number BETWEEN ", + ); + qb.separated(" AND ") + .push_bind(from_block as i64) + .push_bind(to_block as i64) + .push_unseparated(")"); + qb.push(" AND b.substrate_block_hash = l.substrate_block_hash") + .push(" AND b.is_canon = 1") + .push("\nWHERE 1"); + + if !addresses.is_empty() { + qb.push(" AND l.address IN ("); + let mut qb_addr = qb.separated(", "); + addresses.iter().for_each(|addr| { + qb_addr.push_bind(addr.as_bytes().to_owned()); + }); + qb_addr.push_unseparated(")"); + } + + for (i, topic_options) in topics.iter().enumerate() { + match topic_options.len().cmp(&1) { + Ordering::Greater => { + qb.push(format!(" AND l.topic_{} IN (", i + 1)); + let mut qb_topic = qb.separated(", "); + topic_options.iter().for_each(|t| { + qb_topic.push_bind(t.as_bytes().to_owned()); + }); + qb_topic.push_unseparated(")"); + } + Ordering::Equal => { + qb.push(format!(" AND l.topic_{} = ", i + 1)).push_bind( + topic_options + .iter() + .next() + .expect("length is 1, must exist; qed") + .as_bytes() + .to_owned(), + ); + } + Ordering::Less => {} + } + } + + qb.push( + " +ORDER BY b.block_number ASC, l.transaction_index ASC, l.log_index ASC +LIMIT 10001", + ); + + qb.build() +} + +#[cfg(test)] +mod test { + use super::FilteredLog; + + use crate::BackendReader; + use fc_rpc::{OverrideHandle, SchemaV3Override, StorageOverride}; + use fp_storage::{EthereumStorageSchema, PALLET_ETHEREUM_SCHEMA}; + use maplit::hashset; + use scale_codec::Encode; + use sp_core::{H160, H256}; + use sp_runtime::{ + generic::{Block, Header}, + traits::BlakeTwo256, + }; + use sqlx::{sqlite::SqliteRow, QueryBuilder, Row, SqlitePool}; + use std::{collections::BTreeMap, path::Path, sync::Arc}; + use substrate_test_runtime_client::{ + DefaultTestClientBuilderExt, TestClientBuilder, TestClientBuilderExt, + }; + use tempfile::tempdir; + + type OpaqueBlock = + Block, substrate_test_runtime_client::runtime::Extrinsic>; + + struct TestFilter { + pub from_block: u64, + pub to_block: u64, + pub addresses: Vec, + pub topics: Vec>>, + pub expected_result: Vec, + } + + #[derive(Debug, Clone)] + struct Log { + block_number: u32, + address: H160, + topics: [H256; 4], + substrate_block_hash: H256, + ethereum_block_hash: H256, + transaction_index: u32, + log_index: u32, + } + + #[allow(unused)] + struct TestData { + backend: super::Backend, + alice: H160, + bob: H160, + topics_a: H256, + topics_b: H256, + topics_c: H256, + topics_d: H256, + substrate_hash_1: H256, + substrate_hash_2: H256, + substrate_hash_3: H256, + ethereum_hash_1: H256, + ethereum_hash_2: H256, + ethereum_hash_3: H256, + log_1_abcd_0_0_alice: Log, + log_1_dcba_1_0_alice: Log, + log_1_badc_2_0_alice: Log, + log_2_abcd_0_0_bob: Log, + log_2_dcba_1_0_bob: Log, + log_2_badc_2_0_bob: Log, + log_3_abcd_0_0_bob: Log, + log_3_dcba_1_0_bob: Log, + log_3_badc_2_0_bob: Log, + } + + impl From for FilteredLog { + fn from(value: Log) -> Self { + Self { + substrate_block_hash: value.substrate_block_hash, + ethereum_block_hash: value.ethereum_block_hash, + block_number: value.block_number, + ethereum_storage_schema: EthereumStorageSchema::V3, + transaction_index: value.transaction_index, + log_index: value.log_index, + } + } + } + + async fn prepare() -> TestData { + let tmp = tempdir().expect("create a temporary directory"); + // Initialize storage with schema V3 + let builder = TestClientBuilder::new().add_extra_storage( + PALLET_ETHEREUM_SCHEMA.to_vec(), + Encode::encode(&EthereumStorageSchema::V3), + ); + // Client + let (client, _) = builder + .build_with_native_executor::( + None, + ); + let client = Arc::new(client); + // Overrides + let mut overrides_map = BTreeMap::new(); + overrides_map.insert( + EthereumStorageSchema::V3, + Box::new(SchemaV3Override::new(client.clone())) as Box>, + ); + let overrides = Arc::new(OverrideHandle { + schemas: overrides_map, + fallback: Box::new(SchemaV3Override::new(client.clone())), + }); + + // Indexer backend + let indexer_backend = super::Backend::new( + super::BackendConfig::Sqlite(super::SqliteBackendConfig { + path: Path::new("sqlite:///") + .join(tmp.path()) + .join("test.db3") + .to_str() + .unwrap(), + create_if_missing: true, + cache_size: 20480, + thread_count: 4, + }), + 1, + None, + overrides.clone(), + ) + .await + .expect("indexer pool to be created"); + + // Prepare test db data + // Addresses + let alice = H160::repeat_byte(0x01); + let bob = H160::repeat_byte(0x02); + // Topics + let topics_a = H256::repeat_byte(0x01); + let topics_b = H256::repeat_byte(0x02); + let topics_c = H256::repeat_byte(0x03); + let topics_d = H256::repeat_byte(0x04); + // Substrate block hashes + let substrate_hash_1 = H256::repeat_byte(0x05); + let substrate_hash_2 = H256::repeat_byte(0x06); + let substrate_hash_3 = H256::repeat_byte(0x07); + // Ethereum block hashes + let ethereum_hash_1 = H256::repeat_byte(0x08); + let ethereum_hash_2 = H256::repeat_byte(0x09); + let ethereum_hash_3 = H256::repeat_byte(0x0a); + // Ethereum storage schema + let ethereum_storage_schema = EthereumStorageSchema::V3; + + let block_entries = vec![ + // Block 1 + ( + 1i32, + ethereum_hash_1, + substrate_hash_1, + ethereum_storage_schema, + ), + // Block 2 + ( + 2i32, + ethereum_hash_2, + substrate_hash_2, + ethereum_storage_schema, + ), + // Block 3 + ( + 3i32, + ethereum_hash_3, + substrate_hash_3, + ethereum_storage_schema, + ), + ]; + let mut builder = QueryBuilder::new( + "INSERT INTO blocks( + block_number, + ethereum_block_hash, + substrate_block_hash, + ethereum_storage_schema, + is_canon + )", + ); + builder.push_values(block_entries, |mut b, entry| { + let block_number = entry.0; + let ethereum_block_hash = entry.1.as_bytes().to_owned(); + let substrate_block_hash = entry.2.as_bytes().to_owned(); + let ethereum_storage_schema = entry.3.encode(); + + b.push_bind(block_number); + b.push_bind(ethereum_block_hash); + b.push_bind(substrate_block_hash); + b.push_bind(ethereum_storage_schema); + b.push_bind(1i32); + }); + let query = builder.build(); + let _ = query + .execute(indexer_backend.pool()) + .await + .expect("insert should succeed"); + + // log_{BLOCK}_{TOPICS}_{LOG_INDEX}_{TX_INDEX} + let log_1_abcd_0_0_alice = Log { + block_number: 1, + address: alice, + topics: [topics_a, topics_b, topics_c, topics_d], + log_index: 0, + transaction_index: 0, + substrate_block_hash: substrate_hash_1, + ethereum_block_hash: ethereum_hash_1, + }; + let log_1_dcba_1_0_alice = Log { + block_number: 1, + address: alice, + topics: [topics_d, topics_c, topics_b, topics_a], + log_index: 1, + transaction_index: 0, + substrate_block_hash: substrate_hash_1, + ethereum_block_hash: ethereum_hash_1, + }; + let log_1_badc_2_0_alice = Log { + block_number: 1, + address: alice, + topics: [topics_b, topics_a, topics_d, topics_c], + log_index: 2, + transaction_index: 0, + substrate_block_hash: substrate_hash_1, + ethereum_block_hash: ethereum_hash_1, + }; + let log_2_abcd_0_0_bob = Log { + block_number: 2, + address: bob, + topics: [topics_a, topics_b, topics_c, topics_d], + log_index: 0, + transaction_index: 0, + substrate_block_hash: substrate_hash_2, + ethereum_block_hash: ethereum_hash_2, + }; + let log_2_dcba_1_0_bob = Log { + block_number: 2, + address: bob, + topics: [topics_d, topics_c, topics_b, topics_a], + log_index: 1, + transaction_index: 0, + substrate_block_hash: substrate_hash_2, + ethereum_block_hash: ethereum_hash_2, + }; + let log_2_badc_2_0_bob = Log { + block_number: 2, + address: bob, + topics: [topics_b, topics_a, topics_d, topics_c], + log_index: 2, + transaction_index: 0, + substrate_block_hash: substrate_hash_2, + ethereum_block_hash: ethereum_hash_2, + }; + + let log_3_abcd_0_0_bob = Log { + block_number: 3, + address: bob, + topics: [topics_a, topics_b, topics_c, topics_d], + log_index: 0, + transaction_index: 0, + substrate_block_hash: substrate_hash_3, + ethereum_block_hash: ethereum_hash_3, + }; + let log_3_dcba_1_0_bob = Log { + block_number: 3, + address: bob, + topics: [topics_d, topics_c, topics_b, topics_a], + log_index: 1, + transaction_index: 0, + substrate_block_hash: substrate_hash_3, + ethereum_block_hash: ethereum_hash_3, + }; + let log_3_badc_2_0_bob = Log { + block_number: 3, + address: bob, + topics: [topics_b, topics_a, topics_d, topics_c], + log_index: 2, + transaction_index: 0, + substrate_block_hash: substrate_hash_3, + ethereum_block_hash: ethereum_hash_3, + }; + + let log_entries = vec![ + // Block 1 + log_1_abcd_0_0_alice.clone(), + log_1_dcba_1_0_alice.clone(), + log_1_badc_2_0_alice.clone(), + // Block 2 + log_2_abcd_0_0_bob.clone(), + log_2_dcba_1_0_bob.clone(), + log_2_badc_2_0_bob.clone(), + // Block 3 + log_3_abcd_0_0_bob.clone(), + log_3_dcba_1_0_bob.clone(), + log_3_badc_2_0_bob.clone(), + ]; + + let mut builder: QueryBuilder = QueryBuilder::new( + "INSERT INTO logs( + address, + topic_1, + topic_2, + topic_3, + topic_4, + log_index, + transaction_index, + substrate_block_hash + )", + ); + builder.push_values(log_entries, |mut b, entry| { + let address = entry.address.as_bytes().to_owned(); + let topic_1 = entry.topics[0].as_bytes().to_owned(); + let topic_2 = entry.topics[1].as_bytes().to_owned(); + let topic_3 = entry.topics[2].as_bytes().to_owned(); + let topic_4 = entry.topics[3].as_bytes().to_owned(); + let log_index = entry.log_index; + let transaction_index = entry.transaction_index; + let substrate_block_hash = entry.substrate_block_hash.as_bytes().to_owned(); + + b.push_bind(address); + b.push_bind(topic_1); + b.push_bind(topic_2); + b.push_bind(topic_3); + b.push_bind(topic_4); + b.push_bind(log_index); + b.push_bind(transaction_index); + b.push_bind(substrate_block_hash); + }); + let query = builder.build(); + let _ = query.execute(indexer_backend.pool()).await; + + TestData { + alice, + bob, + topics_a, + topics_b, + topics_c, + topics_d, + substrate_hash_1, + substrate_hash_2, + substrate_hash_3, + ethereum_hash_1, + ethereum_hash_2, + ethereum_hash_3, + backend: indexer_backend, + log_1_abcd_0_0_alice, + log_1_dcba_1_0_alice, + log_1_badc_2_0_alice, + log_2_abcd_0_0_bob, + log_2_dcba_1_0_bob, + log_2_badc_2_0_bob, + log_3_abcd_0_0_bob, + log_3_dcba_1_0_bob, + log_3_badc_2_0_bob, + } + } + + async fn run_test_case( + backend: super::Backend, + test_case: &TestFilter, + ) -> Result, String> { + backend + .filter_logs( + test_case.from_block, + test_case.to_block, + test_case.addresses.clone(), + test_case.topics.clone(), + ) + .await + } + + async fn assert_blocks_canon(pool: &SqlitePool, expected: Vec<(H256, u32)>) { + let actual: Vec<(H256, u32)> = + sqlx::query("SELECT substrate_block_hash, is_canon FROM blocks") + .map(|row: SqliteRow| (H256::from_slice(&row.get::, _>(0)[..]), row.get(1))) + .fetch_all(pool) + .await + .expect("sql query must succeed"); + assert_eq!(expected, actual); + } + + #[tokio::test] + async fn genesis_works() { + let TestData { backend, .. } = prepare().await; + let filter = TestFilter { + from_block: 0, + to_block: 0, + addresses: vec![], + topics: vec![], + expected_result: vec![], + }; + let result = run_test_case(backend, &filter).await.expect("must succeed"); + assert_eq!(result, filter.expected_result); + } + + #[tokio::test] + async fn unsanitized_input_works() { + let TestData { backend, .. } = prepare().await; + let filter = TestFilter { + from_block: 0, + to_block: 0, + addresses: vec![], + topics: vec![vec![None], vec![None, None, None]], + expected_result: vec![], + }; + let result = run_test_case(backend, &filter).await.expect("must succeed"); + assert_eq!(result, filter.expected_result); + } + + #[tokio::test] + async fn invalid_topic_input_size_fails() { + let TestData { + backend, topics_a, .. + } = prepare().await; + let filter = TestFilter { + from_block: 0, + to_block: 0, + addresses: vec![], + topics: vec![ + vec![Some(topics_a), None, None, None, None], + vec![Some(topics_a), None, None, None], + ], + expected_result: vec![], + }; + run_test_case(backend, &filter) + .await + .expect_err("Invalid topic input. Maximum length is 4."); + } + + #[tokio::test] + async fn test_malformed_topic_cleans_invalid_options() { + let TestData { + backend, + topics_a, + topics_b, + topics_d, + log_1_badc_2_0_alice, + .. + } = prepare().await; + + // [(a,null,b), (a, null), (d,null), null] -> [(a,b), a, d] + let filter = TestFilter { + from_block: 0, + to_block: 1, + addresses: vec![], + topics: vec![ + vec![Some(topics_a), None, Some(topics_d)], + vec![None], // not considered + vec![Some(topics_b), Some(topics_a), None], + vec![None, None, None, None], // not considered + ], + expected_result: vec![log_1_badc_2_0_alice.into()], + }; + let result = run_test_case(backend, &filter).await.expect("must succeed"); + assert_eq!(result, filter.expected_result); + } + + #[tokio::test] + async fn block_range_works() { + let TestData { + backend, + log_1_abcd_0_0_alice, + log_1_dcba_1_0_alice, + log_1_badc_2_0_alice, + log_2_abcd_0_0_bob, + log_2_dcba_1_0_bob, + log_2_badc_2_0_bob, + .. + } = prepare().await; + + let filter = TestFilter { + from_block: 0, + to_block: 2, + addresses: vec![], + topics: vec![], + expected_result: vec![ + log_1_abcd_0_0_alice.into(), + log_1_dcba_1_0_alice.into(), + log_1_badc_2_0_alice.into(), + log_2_abcd_0_0_bob.into(), + log_2_dcba_1_0_bob.into(), + log_2_badc_2_0_bob.into(), + ], + }; + let result = run_test_case(backend, &filter).await.expect("must succeed"); + assert_eq!(result, filter.expected_result); + } + + #[tokio::test] + async fn address_filter_works() { + let TestData { + backend, + alice, + log_1_abcd_0_0_alice, + log_1_dcba_1_0_alice, + log_1_badc_2_0_alice, + .. + } = prepare().await; + let filter = TestFilter { + from_block: 0, + to_block: 3, + addresses: vec![alice], + topics: vec![], + expected_result: vec![ + log_1_abcd_0_0_alice.into(), + log_1_dcba_1_0_alice.into(), + log_1_badc_2_0_alice.into(), + ], + }; + let result = run_test_case(backend, &filter).await.expect("must succeed"); + assert_eq!(result, filter.expected_result); + } + + #[tokio::test] + async fn topic_filter_works() { + let TestData { + backend, + topics_d, + log_1_dcba_1_0_alice, + log_2_dcba_1_0_bob, + log_3_dcba_1_0_bob, + .. + } = prepare().await; + let filter = TestFilter { + from_block: 0, + to_block: 3, + addresses: vec![], + topics: vec![vec![Some(topics_d)]], + expected_result: vec![ + log_1_dcba_1_0_alice.into(), + log_2_dcba_1_0_bob.into(), + log_3_dcba_1_0_bob.into(), + ], + }; + let result = run_test_case(backend, &filter).await.expect("must succeed"); + assert_eq!(result, filter.expected_result); + } + + #[tokio::test] + async fn test_filters_address_and_topic() { + let TestData { + backend, + bob, + topics_b, + log_2_badc_2_0_bob, + log_3_badc_2_0_bob, + .. + } = prepare().await; + let filter = TestFilter { + from_block: 0, + to_block: 3, + addresses: vec![bob], + topics: vec![vec![Some(topics_b)]], + expected_result: vec![log_2_badc_2_0_bob.into(), log_3_badc_2_0_bob.into()], + }; + let result = run_test_case(backend, &filter).await.expect("must succeed"); + assert_eq!(result, filter.expected_result); + } + + #[tokio::test] + async fn test_filters_multi_address_and_topic() { + let TestData { + backend, + alice, + bob, + topics_b, + log_1_badc_2_0_alice, + log_2_badc_2_0_bob, + log_3_badc_2_0_bob, + .. + } = prepare().await; + let filter = TestFilter { + from_block: 0, + to_block: 3, + addresses: vec![alice, bob], + topics: vec![vec![Some(topics_b)]], + expected_result: vec![ + log_1_badc_2_0_alice.into(), + log_2_badc_2_0_bob.into(), + log_3_badc_2_0_bob.into(), + ], + }; + let result = run_test_case(backend, &filter).await.expect("must succeed"); + assert_eq!(result, filter.expected_result); + } + + #[tokio::test] + async fn test_filters_multi_address_and_multi_topic() { + let TestData { + backend, + alice, + bob, + topics_a, + topics_b, + log_1_abcd_0_0_alice, + log_2_abcd_0_0_bob, + log_3_abcd_0_0_bob, + .. + } = prepare().await; + let filter = TestFilter { + from_block: 0, + to_block: 3, + addresses: vec![alice, bob], + topics: vec![vec![Some(topics_a), Some(topics_b)]], + expected_result: vec![ + log_1_abcd_0_0_alice.into(), + log_2_abcd_0_0_bob.into(), + log_3_abcd_0_0_bob.into(), + ], + }; + let result = run_test_case(backend, &filter).await.expect("must succeed"); + assert_eq!(result, filter.expected_result); + } + + #[tokio::test] + async fn filter_with_topic_wildcards_works() { + let TestData { + backend, + alice, + bob, + topics_d, + topics_b, + log_1_dcba_1_0_alice, + log_2_dcba_1_0_bob, + log_3_dcba_1_0_bob, + .. + } = prepare().await; + let filter = TestFilter { + from_block: 0, + to_block: 3, + addresses: vec![alice, bob], + topics: vec![vec![Some(topics_d), None, Some(topics_b)]], + expected_result: vec![ + log_1_dcba_1_0_alice.into(), + log_2_dcba_1_0_bob.into(), + log_3_dcba_1_0_bob.into(), + ], + }; + let result = run_test_case(backend, &filter).await.expect("must succeed"); + assert_eq!(result, filter.expected_result); + } + + #[tokio::test] + async fn trailing_wildcard_is_useless_but_works() { + let TestData { + alice, + backend, + topics_b, + log_1_dcba_1_0_alice, + .. + } = prepare().await; + let filter = TestFilter { + from_block: 0, + to_block: 1, + addresses: vec![alice], + topics: vec![vec![None, None, Some(topics_b), None]], + expected_result: vec![log_1_dcba_1_0_alice.into()], + }; + let result = run_test_case(backend, &filter).await.expect("must succeed"); + assert_eq!(result, filter.expected_result); + } + + #[tokio::test] + async fn filter_with_multi_topic_options_works() { + let TestData { + backend, + topics_a, + topics_d, + log_1_abcd_0_0_alice, + log_1_dcba_1_0_alice, + log_2_abcd_0_0_bob, + log_2_dcba_1_0_bob, + log_3_abcd_0_0_bob, + log_3_dcba_1_0_bob, + .. + } = prepare().await; + let filter = TestFilter { + from_block: 0, + to_block: 3, + addresses: vec![], + topics: vec![ + vec![Some(topics_a)], + vec![Some(topics_d)], + vec![Some(topics_d)], // duplicate, ignored + ], + expected_result: vec![ + log_1_abcd_0_0_alice.into(), + log_1_dcba_1_0_alice.into(), + log_2_abcd_0_0_bob.into(), + log_2_dcba_1_0_bob.into(), + log_3_abcd_0_0_bob.into(), + log_3_dcba_1_0_bob.into(), + ], + }; + let result = run_test_case(backend, &filter).await.expect("must succeed"); + assert_eq!(result, filter.expected_result); + } + + #[tokio::test] + async fn filter_with_multi_topic_options_and_wildcards_works() { + let TestData { + backend, + bob, + topics_a, + topics_b, + topics_c, + topics_d, + log_2_dcba_1_0_bob, + log_2_badc_2_0_bob, + log_3_dcba_1_0_bob, + log_3_badc_2_0_bob, + .. + } = prepare().await; + let filter = TestFilter { + from_block: 0, + to_block: 3, + addresses: vec![bob], + // Product on input [null,null,(b,d),(a,c)]. + topics: vec![ + vec![None, None, Some(topics_b), Some(topics_a)], + vec![None, None, Some(topics_b), Some(topics_c)], + vec![None, None, Some(topics_d), Some(topics_a)], + vec![None, None, Some(topics_d), Some(topics_c)], + ], + expected_result: vec![ + log_2_dcba_1_0_bob.into(), + log_2_badc_2_0_bob.into(), + log_3_dcba_1_0_bob.into(), + log_3_badc_2_0_bob.into(), + ], + }; + let result = run_test_case(backend, &filter).await.expect("must succeed"); + assert_eq!(result, filter.expected_result); + } + + #[tokio::test] + async fn test_canonicalize_sets_canon_flag_for_redacted_and_enacted_blocks_correctly() { + let TestData { + backend, + substrate_hash_1, + substrate_hash_2, + substrate_hash_3, + .. + } = prepare().await; + + // set block #1 to non canon + sqlx::query("UPDATE blocks SET is_canon = 0 WHERE substrate_block_hash = ?") + .bind(substrate_hash_1.as_bytes()) + .execute(backend.pool()) + .await + .expect("sql query must succeed"); + assert_blocks_canon( + backend.pool(), + vec![ + (substrate_hash_1, 0), + (substrate_hash_2, 1), + (substrate_hash_3, 1), + ], + ) + .await; + + backend + .canonicalize(&[substrate_hash_2], &[substrate_hash_1]) + .await + .expect("must succeed"); + + assert_blocks_canon( + backend.pool(), + vec![ + (substrate_hash_1, 1), + (substrate_hash_2, 0), + (substrate_hash_3, 1), + ], + ) + .await; + } + + #[test] + fn test_query_should_be_generated_correctly() { + use sqlx::Execute; + + let from_block: u64 = 100; + let to_block: u64 = 500; + let addresses: Vec = vec![ + H160::repeat_byte(0x01), + H160::repeat_byte(0x02), + H160::repeat_byte(0x03), + ]; + let topics = [ + hashset![ + H256::repeat_byte(0x01), + H256::repeat_byte(0x02), + H256::repeat_byte(0x03), + ], + hashset![H256::repeat_byte(0x04), H256::repeat_byte(0x05),], + hashset![], + hashset![H256::repeat_byte(0x06)], + ]; + + let expected_query_sql = " +SELECT + l.substrate_block_hash, + b.ethereum_block_hash, + b.block_number, + b.ethereum_storage_schema, + l.transaction_index, + l.log_index +FROM logs AS l +INNER JOIN blocks AS b +ON (b.block_number BETWEEN ? AND ?) AND b.substrate_block_hash = l.substrate_block_hash AND b.is_canon = 1 +WHERE 1 AND l.address IN (?, ?, ?) AND l.topic_1 IN (?, ?, ?) AND l.topic_2 IN (?, ?) AND l.topic_4 = ? +ORDER BY b.block_number ASC, l.transaction_index ASC, l.log_index ASC +LIMIT 10001"; + + let mut qb = QueryBuilder::new(""); + let actual_query_sql = + super::build_query(&mut qb, from_block, to_block, addresses, topics).sql(); + assert_eq!(expected_query_sql, actual_query_sql); + } +} diff --git a/client/mapping-sync/Cargo.toml b/client/mapping-sync/Cargo.toml index ef5c8f85d9..d9af3c7f16 100644 --- a/client/mapping-sync/Cargo.toml +++ b/client/mapping-sync/Cargo.toml @@ -12,32 +12,41 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] futures = "0.3.25" -futures-timer = "3.0.1" +futures-timer = "3.0.2" log = "0.4.17" parking_lot = "0.12.1" +tokio = { version = "1.19", features = ["macros", "sync"] } # Substrate sc-client-api = { workspace = true } +sc-utils = { workspace = true } sp-api = { workspace = true } sp-blockchain = { workspace = true } sp-consensus = { workspace = true, features = ["default"] } +sp-core = { workspace = true } sp-runtime = { workspace = true } + # Frontier fc-db = { workspace = true } fc-storage = { workspace = true } fp-consensus = { workspace = true, features = ["default"] } fp-rpc = { workspace = true, features = ["default"] } -sc-utils = { workspace = true } [dev-dependencies] -ethereum = { workspace = true, features = ["with-codec"] } -ethereum-types = { workspace = true } +ethereum = { workspace = true } +scale-codec = { package = "parity-scale-codec", workspace = true } +sqlx = { features = ["runtime-tokio-native-tls", "sqlite"], git = "https://github.com/launchbadge/sqlx", branch = "main" } tempfile = "3.3.0" tokio = { version = "1.24", features = ["sync"] } -#Frontier -fp-storage = { workspace = true, features = ["default"] } +# Frontier +fp-consensus = { workspace = true, features = ["std"] } +fp-storage = { workspace = true, features = ["std"] } frontier-template-runtime = { workspace = true, features = ["default"] } # Substrate +ethereum-types = { workspace = true } +fc-rpc = { workspace = true } sc-block-builder = { workspace = true } sc-client-db = { workspace = true } +sp-consensus = { workspace = true } sp-core = { workspace = true, features = ["default"] } +sp-io = { workspace = true } substrate-test-runtime-client = { workspace = true } diff --git a/client/mapping-sync/src/kv/mod.rs b/client/mapping-sync/src/kv/mod.rs new file mode 100644 index 0000000000..cb0bcad529 --- /dev/null +++ b/client/mapping-sync/src/kv/mod.rs @@ -0,0 +1,304 @@ +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 +// This file is part of Frontier. +// +// Copyright (c) 2020-2022 Parity Technologies (UK) Ltd. +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +#![allow(clippy::too_many_arguments)] + +mod worker; + +pub use worker::MappingSyncWorker; + +use std::sync::Arc; + +// Substrate +use sc_client_api::backend::{Backend, StorageProvider}; +use sp_api::{ApiExt, ProvideRuntimeApi}; +use sp_blockchain::{Backend as _, HeaderBackend}; +use sp_consensus::SyncOracle; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Zero}; +// Frontier +use crate::{EthereumBlockNotification, EthereumBlockNotificationSinks, SyncStrategy}; +use fc_storage::OverrideHandle; +use fp_consensus::{FindLogError, Hashes, Log, PostLog, PreLog}; +use fp_rpc::EthereumRuntimeRPCApi; + +pub fn sync_block( + client: &C, + overrides: Arc>, + backend: &fc_db::kv::Backend, + header: &Block::Header, +) -> Result<(), String> +where + C: HeaderBackend + StorageProvider, + BE: Backend, +{ + let substrate_block_hash = header.hash(); + match fp_consensus::find_log(header.digest()) { + Ok(log) => { + let gen_from_hashes = |hashes: Hashes| -> fc_db::kv::MappingCommitment { + fc_db::kv::MappingCommitment { + block_hash: substrate_block_hash, + ethereum_block_hash: hashes.block_hash, + ethereum_transaction_hashes: hashes.transaction_hashes, + } + }; + let gen_from_block = |block| -> fc_db::kv::MappingCommitment { + let hashes = Hashes::from_block(block); + gen_from_hashes(hashes) + }; + + match log { + Log::Pre(PreLog::Block(block)) => { + let mapping_commitment = gen_from_block(block); + backend.mapping().write_hashes(mapping_commitment) + } + Log::Post(post_log) => match post_log { + PostLog::Hashes(hashes) => { + let mapping_commitment = gen_from_hashes(hashes); + backend.mapping().write_hashes(mapping_commitment) + } + PostLog::Block(block) => { + let mapping_commitment = gen_from_block(block); + backend.mapping().write_hashes(mapping_commitment) + } + PostLog::BlockHash(expect_eth_block_hash) => { + let schema = + fc_storage::onchain_storage_schema(client, substrate_block_hash); + let ethereum_block = overrides + .schemas + .get(&schema) + .unwrap_or(&overrides.fallback) + .current_block(substrate_block_hash); + match ethereum_block { + Some(block) => { + let got_eth_block_hash = block.header.hash(); + if got_eth_block_hash != expect_eth_block_hash { + Err(format!( + "Ethereum block hash mismatch: \ + frontier consensus digest ({expect_eth_block_hash:?}), \ + db state ({got_eth_block_hash:?})" + )) + } else { + let mapping_commitment = gen_from_block(block); + backend.mapping().write_hashes(mapping_commitment) + } + } + None => backend.mapping().write_none(substrate_block_hash), + } + } + }, + } + } + Err(FindLogError::NotFound) => backend.mapping().write_none(substrate_block_hash), + Err(FindLogError::MultipleLogs) => Err("Multiple logs found".to_string()), + } +} + +pub fn sync_genesis_block( + client: &C, + backend: &fc_db::kv::Backend, + header: &Block::Header, +) -> Result<(), String> +where + C: ProvideRuntimeApi, + C::Api: EthereumRuntimeRPCApi, +{ + let substrate_block_hash = header.hash(); + + if let Some(api_version) = client + .runtime_api() + .api_version::>(substrate_block_hash) + .map_err(|e| format!("{:?}", e))? + { + let block = if api_version > 1 { + client + .runtime_api() + .current_block(substrate_block_hash) + .map_err(|e| format!("{:?}", e))? + } else { + #[allow(deprecated)] + let legacy_block = client + .runtime_api() + .current_block_before_version_2(substrate_block_hash) + .map_err(|e| format!("{:?}", e))?; + legacy_block.map(|block| block.into()) + }; + let block_hash = block + .ok_or_else(|| "Ethereum genesis block not found".to_string())? + .header + .hash(); + let mapping_commitment = fc_db::kv::MappingCommitment:: { + block_hash: substrate_block_hash, + ethereum_block_hash: block_hash, + ethereum_transaction_hashes: Vec::new(), + }; + backend.mapping().write_hashes(mapping_commitment)?; + } else { + backend.mapping().write_none(substrate_block_hash)?; + }; + + Ok(()) +} + +pub fn sync_one_block( + client: &C, + substrate_backend: &BE, + overrides: Arc>, + frontier_backend: &fc_db::kv::Backend, + sync_from: ::Number, + strategy: SyncStrategy, + sync_oracle: Arc, + pubsub_notification_sinks: Arc< + EthereumBlockNotificationSinks>, + >, +) -> Result +where + C: ProvideRuntimeApi, + C::Api: EthereumRuntimeRPCApi, + C: HeaderBackend + StorageProvider, + BE: Backend, +{ + let mut current_syncing_tips = frontier_backend.meta().current_syncing_tips()?; + + if current_syncing_tips.is_empty() { + let mut leaves = substrate_backend + .blockchain() + .leaves() + .map_err(|e| format!("{:?}", e))?; + if leaves.is_empty() { + return Ok(false); + } + current_syncing_tips.append(&mut leaves); + } + + let mut operating_header = None; + while let Some(checking_tip) = current_syncing_tips.pop() { + if let Some(checking_header) = fetch_header( + substrate_backend.blockchain(), + frontier_backend, + checking_tip, + sync_from, + )? { + operating_header = Some(checking_header); + break; + } + } + let operating_header = match operating_header { + Some(operating_header) => operating_header, + None => { + frontier_backend + .meta() + .write_current_syncing_tips(current_syncing_tips)?; + return Ok(false); + } + }; + + if operating_header.number() == &Zero::zero() { + sync_genesis_block(client, frontier_backend, &operating_header)?; + + frontier_backend + .meta() + .write_current_syncing_tips(current_syncing_tips)?; + } else { + if SyncStrategy::Parachain == strategy + && operating_header.number() > &client.info().best_number + { + return Ok(false); + } + sync_block(client, overrides, frontier_backend, &operating_header)?; + + current_syncing_tips.push(*operating_header.parent_hash()); + frontier_backend + .meta() + .write_current_syncing_tips(current_syncing_tips)?; + } + // Notify on import and remove closed channels. + // Only notify when the node is node in major syncing. + let sinks = &mut pubsub_notification_sinks.lock(); + sinks.retain(|sink| { + if !sync_oracle.is_major_syncing() { + let hash = operating_header.hash(); + let is_new_best = client.info().best_hash == hash; + sink.unbounded_send(EthereumBlockNotification { is_new_best, hash }) + .is_ok() + } else { + // Remove from the pool if in major syncing. + false + } + }); + Ok(true) +} + +pub fn sync_blocks( + client: &C, + substrate_backend: &BE, + overrides: Arc>, + frontier_backend: &fc_db::kv::Backend, + limit: usize, + sync_from: ::Number, + strategy: SyncStrategy, + sync_oracle: Arc, + pubsub_notification_sinks: Arc< + EthereumBlockNotificationSinks>, + >, +) -> Result +where + C: ProvideRuntimeApi, + C::Api: EthereumRuntimeRPCApi, + C: HeaderBackend + StorageProvider, + BE: Backend, +{ + let mut synced_any = false; + + for _ in 0..limit { + synced_any = synced_any + || sync_one_block( + client, + substrate_backend, + overrides.clone(), + frontier_backend, + sync_from, + strategy, + sync_oracle.clone(), + pubsub_notification_sinks.clone(), + )?; + } + + Ok(synced_any) +} + +pub fn fetch_header( + substrate_backend: &BE, + frontier_backend: &fc_db::kv::Backend, + checking_tip: Block::Hash, + sync_from: ::Number, +) -> Result, String> +where + BE: HeaderBackend, +{ + if frontier_backend.mapping().is_synced(&checking_tip)? { + return Ok(None); + } + + match substrate_backend.header(checking_tip) { + Ok(Some(checking_header)) if checking_header.number() >= &sync_from => { + Ok(Some(checking_header)) + } + Ok(Some(_)) => Ok(None), + Ok(None) | Err(_) => Err("Header not found".to_string()), + } +} diff --git a/client/mapping-sync/src/worker.rs b/client/mapping-sync/src/kv/worker.rs similarity index 97% rename from client/mapping-sync/src/worker.rs rename to client/mapping-sync/src/kv/worker.rs index fc3b7c945f..1f6229b6af 100644 --- a/client/mapping-sync/src/worker.rs +++ b/client/mapping-sync/src/kv/worker.rs @@ -34,15 +34,10 @@ use sp_blockchain::HeaderBackend; use sp_consensus::SyncOracle; use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; // Frontier +use crate::SyncStrategy; use fc_storage::OverrideHandle; use fp_rpc::EthereumRuntimeRPCApi; -#[derive(Copy, Clone, Eq, PartialEq)] -pub enum SyncStrategy { - Normal, - Parachain, -} - pub struct MappingSyncWorker { import_notifications: ImportNotifications, timeout: Duration, @@ -51,7 +46,7 @@ pub struct MappingSyncWorker { client: Arc, substrate_backend: Arc, overrides: Arc>, - frontier_backend: Arc>, + frontier_backend: Arc>, have_next: bool, retry_times: usize, @@ -72,7 +67,7 @@ impl MappingSyncWorker { client: Arc, substrate_backend: Arc, overrides: Arc>, - frontier_backend: Arc>, + frontier_backend: Arc>, retry_times: usize, sync_from: ::Number, strategy: SyncStrategy, @@ -141,7 +136,7 @@ where if fire { self.inner_delay = None; - match crate::sync_blocks( + match crate::kv::sync_blocks( self.client.as_ref(), self.substrate_backend.as_ref(), self.overrides.clone(), @@ -263,9 +258,9 @@ mod tests { }); let frontier_backend = Arc::new( - fc_db::Backend::::new( + fc_db::kv::Backend::::new( client.clone(), - &fc_db::DatabaseSettings { + &fc_db::kv::DatabaseSettings { source: sc_client_db::DatabaseSource::RocksDb { path: tmp.path().to_path_buf(), cache_size: 0, @@ -401,9 +396,9 @@ mod tests { }); let frontier_backend = Arc::new( - fc_db::Backend::::new( + fc_db::kv::Backend::::new( client.clone(), - &fc_db::DatabaseSettings { + &fc_db::kv::DatabaseSettings { source: sc_client_db::DatabaseSource::RocksDb { path: tmp.path().to_path_buf(), cache_size: 0, diff --git a/client/mapping-sync/src/lib.rs b/client/mapping-sync/src/lib.rs index c83398ab9e..951978effd 100644 --- a/client/mapping-sync/src/lib.rs +++ b/client/mapping-sync/src/lib.rs @@ -16,25 +16,18 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -#![allow(clippy::too_many_arguments)] #![deny(unused_crate_dependencies)] -mod worker; +pub mod kv; +pub mod sql; -pub use worker::{MappingSyncWorker, SyncStrategy}; +use sp_api::BlockT; -use std::sync::Arc; - -// Substrate -use sc_client_api::backend::{Backend, StorageProvider}; -use sp_api::{ApiExt, ProvideRuntimeApi}; -use sp_blockchain::{Backend as _, HeaderBackend}; -use sp_consensus::SyncOracle; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Zero}; -// Frontier -use fc_storage::OverrideHandle; -use fp_consensus::{FindLogError, Hashes, Log, PostLog, PreLog}; -use fp_rpc::EthereumRuntimeRPCApi; +#[derive(Copy, Clone, Eq, PartialEq)] +pub enum SyncStrategy { + Normal, + Parachain, +} pub type EthereumBlockNotificationSinks = parking_lot::Mutex>>; @@ -44,270 +37,3 @@ pub struct EthereumBlockNotification { pub is_new_best: bool, pub hash: Block::Hash, } - -pub fn sync_block( - client: &C, - overrides: Arc>, - backend: &fc_db::Backend, - header: &Block::Header, -) -> Result<(), String> -where - C: HeaderBackend + StorageProvider, - BE: Backend, -{ - let substrate_block_hash = header.hash(); - match fp_consensus::find_log(header.digest()) { - Ok(log) => { - let gen_from_hashes = |hashes: Hashes| -> fc_db::MappingCommitment { - fc_db::MappingCommitment { - block_hash: substrate_block_hash, - ethereum_block_hash: hashes.block_hash, - ethereum_transaction_hashes: hashes.transaction_hashes, - } - }; - let gen_from_block = |block| -> fc_db::MappingCommitment { - let hashes = Hashes::from_block(block); - gen_from_hashes(hashes) - }; - - match log { - Log::Pre(PreLog::Block(block)) => { - let mapping_commitment = gen_from_block(block); - backend.mapping().write_hashes(mapping_commitment) - } - Log::Post(post_log) => match post_log { - PostLog::Hashes(hashes) => { - let mapping_commitment = gen_from_hashes(hashes); - backend.mapping().write_hashes(mapping_commitment) - } - PostLog::Block(block) => { - let mapping_commitment = gen_from_block(block); - backend.mapping().write_hashes(mapping_commitment) - } - PostLog::BlockHash(expect_eth_block_hash) => { - let schema = - fc_storage::onchain_storage_schema(client, substrate_block_hash); - let ethereum_block = overrides - .schemas - .get(&schema) - .unwrap_or(&overrides.fallback) - .current_block(substrate_block_hash); - match ethereum_block { - Some(block) => { - let got_eth_block_hash = block.header.hash(); - if got_eth_block_hash != expect_eth_block_hash { - Err(format!( - "Ethereum block hash mismatch: \ - frontier consensus digest ({expect_eth_block_hash:?}), \ - db state ({got_eth_block_hash:?})" - )) - } else { - let mapping_commitment = gen_from_block(block); - backend.mapping().write_hashes(mapping_commitment) - } - } - None => backend.mapping().write_none(substrate_block_hash), - } - } - }, - } - } - Err(FindLogError::NotFound) => backend.mapping().write_none(substrate_block_hash), - Err(FindLogError::MultipleLogs) => Err("Multiple logs found".to_string()), - } -} - -pub fn sync_genesis_block( - client: &C, - backend: &fc_db::Backend, - header: &Block::Header, -) -> Result<(), String> -where - C: ProvideRuntimeApi, - C::Api: EthereumRuntimeRPCApi, -{ - let substrate_block_hash = header.hash(); - - if let Some(api_version) = client - .runtime_api() - .api_version::>(substrate_block_hash) - .map_err(|e| format!("{:?}", e))? - { - let block = if api_version > 1 { - client - .runtime_api() - .current_block(substrate_block_hash) - .map_err(|e| format!("{:?}", e))? - } else { - #[allow(deprecated)] - let legacy_block = client - .runtime_api() - .current_block_before_version_2(substrate_block_hash) - .map_err(|e| format!("{:?}", e))?; - legacy_block.map(|block| block.into()) - }; - let block_hash = block - .ok_or_else(|| "Ethereum genesis block not found".to_string())? - .header - .hash(); - let mapping_commitment = fc_db::MappingCommitment:: { - block_hash: substrate_block_hash, - ethereum_block_hash: block_hash, - ethereum_transaction_hashes: Vec::new(), - }; - backend.mapping().write_hashes(mapping_commitment)?; - } else { - backend.mapping().write_none(substrate_block_hash)?; - }; - - Ok(()) -} - -pub fn sync_one_block( - client: &C, - substrate_backend: &BE, - overrides: Arc>, - frontier_backend: &fc_db::Backend, - sync_from: ::Number, - strategy: SyncStrategy, - sync_oracle: Arc, - pubsub_notification_sinks: Arc< - EthereumBlockNotificationSinks>, - >, -) -> Result -where - C: ProvideRuntimeApi, - C::Api: EthereumRuntimeRPCApi, - C: HeaderBackend + StorageProvider, - BE: Backend, -{ - let mut current_syncing_tips = frontier_backend.meta().current_syncing_tips()?; - - if current_syncing_tips.is_empty() { - let mut leaves = substrate_backend - .blockchain() - .leaves() - .map_err(|e| format!("{:?}", e))?; - if leaves.is_empty() { - return Ok(false); - } - current_syncing_tips.append(&mut leaves); - } - - let mut operating_header = None; - while let Some(checking_tip) = current_syncing_tips.pop() { - if let Some(checking_header) = fetch_header( - substrate_backend.blockchain(), - frontier_backend, - checking_tip, - sync_from, - )? { - operating_header = Some(checking_header); - break; - } - } - let operating_header = match operating_header { - Some(operating_header) => operating_header, - None => { - frontier_backend - .meta() - .write_current_syncing_tips(current_syncing_tips)?; - return Ok(false); - } - }; - - if operating_header.number() == &Zero::zero() { - sync_genesis_block(client, frontier_backend, &operating_header)?; - - frontier_backend - .meta() - .write_current_syncing_tips(current_syncing_tips)?; - } else { - if SyncStrategy::Parachain == strategy - && operating_header.number() > &client.info().best_number - { - return Ok(false); - } - sync_block(client, overrides, frontier_backend, &operating_header)?; - - current_syncing_tips.push(*operating_header.parent_hash()); - frontier_backend - .meta() - .write_current_syncing_tips(current_syncing_tips)?; - } - // Notify on import and remove closed channels. - // Only notify when the node is node in major syncing. - let sinks = &mut pubsub_notification_sinks.lock(); - sinks.retain(|sink| { - if !sync_oracle.is_major_syncing() { - let hash = operating_header.hash(); - let is_new_best = client.info().best_hash == hash; - sink.unbounded_send(EthereumBlockNotification { is_new_best, hash }) - .is_ok() - } else { - // Remove from the pool if in major syncing. - false - } - }); - Ok(true) -} - -pub fn sync_blocks( - client: &C, - substrate_backend: &BE, - overrides: Arc>, - frontier_backend: &fc_db::Backend, - limit: usize, - sync_from: ::Number, - strategy: SyncStrategy, - sync_oracle: Arc, - pubsub_notification_sinks: Arc< - EthereumBlockNotificationSinks>, - >, -) -> Result -where - C: ProvideRuntimeApi, - C::Api: EthereumRuntimeRPCApi, - C: HeaderBackend + StorageProvider, - BE: Backend, -{ - let mut synced_any = false; - - for _ in 0..limit { - synced_any = synced_any - || sync_one_block( - client, - substrate_backend, - overrides.clone(), - frontier_backend, - sync_from, - strategy, - sync_oracle.clone(), - pubsub_notification_sinks.clone(), - )?; - } - - Ok(synced_any) -} - -pub fn fetch_header( - substrate_backend: &BE, - frontier_backend: &fc_db::Backend, - checking_tip: Block::Hash, - sync_from: ::Number, -) -> Result, String> -where - BE: HeaderBackend, -{ - if frontier_backend.mapping().is_synced(&checking_tip)? { - return Ok(None); - } - - match substrate_backend.header(checking_tip) { - Ok(Some(checking_header)) if checking_header.number() >= &sync_from => { - Ok(Some(checking_header)) - } - Ok(Some(_)) => Ok(None), - Ok(None) | Err(_) => Err("Header not found".to_string()), - } -} diff --git a/client/mapping-sync/src/sql/mod.rs b/client/mapping-sync/src/sql/mod.rs new file mode 100644 index 0000000000..39fab6a41b --- /dev/null +++ b/client/mapping-sync/src/sql/mod.rs @@ -0,0 +1,1306 @@ +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 +// This file is part of Frontier. +// +// Copyright (c) 2020-2022 Parity Technologies (UK) Ltd. +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +#![allow(clippy::too_many_arguments)] + +use crate::EthereumBlockNotification; +use fp_rpc::EthereumRuntimeRPCApi; +use futures::prelude::*; +use sc_client_api::backend::{Backend as BackendT, StateBackend, StorageProvider}; +use sp_api::{HeaderT, ProvideRuntimeApi}; +use sp_blockchain::{Backend, HeaderBackend}; +use sp_consensus::SyncOracle; +use sp_core::H256; +use sp_runtime::traits::{BlakeTwo256, Block as BlockT, UniqueSaturatedInto}; +use std::{ops::DerefMut, sync::Arc, time::Duration}; + +/// Defines the commands for the sync worker. +#[derive(Debug)] +pub enum WorkerCommand { + /// Resume indexing from the last indexed canon block. + ResumeSync, + /// Index leaves. + IndexLeaves(Vec), + /// Index the best block known so far via import notifications. + IndexBestBlock(H256), + /// Canonicalize the enacted and retracted blocks reported via import notifications. + Canonicalize { + common: H256, + enacted: Vec, + retracted: Vec, + }, + /// Verify indexed blocks' consistency. + /// Check for any canon blocks that haven't had their logs indexed. + /// Check for any missing parent blocks from the latest canon block. + CheckIndexedBlocks, +} + +/// Config parameters for the SyncWorker. +pub struct SyncWorkerConfig { + pub check_indexed_blocks_interval: Duration, + pub read_notification_timeout: Duration, +} + +/// Implements an indexer that imports blocks and their transactions. +pub struct SyncWorker { + _phantom: std::marker::PhantomData<(Block, Backend, Client)>, +} + +impl SyncWorker +where + Block: BlockT + Send + Sync, + Client: StorageProvider + HeaderBackend + Send + Sync + 'static, + Client: ProvideRuntimeApi, + Client::Api: EthereumRuntimeRPCApi, + Backend: BackendT + 'static, + Backend::State: StateBackend, +{ + /// Spawn the indexing worker. The worker can be given commands via the sender channel. + /// Once the buffer is full, attempts to send new messages will wait until a message is read from the channel. + pub async fn spawn_worker( + client: Arc, + substrate_backend: Arc, + indexer_backend: Arc>, + pubsub_notification_sinks: Arc< + crate::EthereumBlockNotificationSinks>, + >, + ) -> tokio::sync::mpsc::Sender { + let (tx, mut rx) = tokio::sync::mpsc::channel(100); + tokio::task::spawn(async move { + while let Some(cmd) = rx.recv().await { + log::debug!( + target: "frontier-sql", + "💬 Recv Worker Command {:?}", + cmd, + ); + println!("💬 Recv Worker Command {:?}", cmd,); + match cmd { + WorkerCommand::ResumeSync => { + // Attempt to resume from last indexed block. If there is no data in the db, sync genesis. + match indexer_backend.get_last_indexed_canon_block().await.ok() { + Some(last_block_hash) => { + log::debug!( + target: "frontier-sql", + "Resume from last block {:?}", + last_block_hash, + ); + if let Some(parent_hash) = client + .header(last_block_hash) + .ok() + .flatten() + .map(|header| *header.parent_hash()) + { + index_canonical_block_and_ancestors( + client.clone(), + substrate_backend.clone(), + indexer_backend.clone(), + parent_hash, + ) + .await; + } + } + None => { + index_genesis_block(client.clone(), indexer_backend.clone()).await; + } + }; + } + WorkerCommand::IndexLeaves(leaves) => { + for leaf in leaves { + index_block_and_ancestors( + client.clone(), + substrate_backend.clone(), + indexer_backend.clone(), + leaf, + ) + .await; + } + } + WorkerCommand::IndexBestBlock(block_hash) => { + index_canonical_block_and_ancestors( + client.clone(), + substrate_backend.clone(), + indexer_backend.clone(), + block_hash, + ) + .await; + let sinks = &mut pubsub_notification_sinks.lock(); + for sink in sinks.iter() { + let _ = sink.unbounded_send(EthereumBlockNotification { + is_new_best: true, + hash: block_hash, + }); + } + } + WorkerCommand::Canonicalize { + common, + enacted, + retracted, + } => { + canonicalize_blocks(indexer_backend.clone(), common, enacted, retracted) + .await; + } + WorkerCommand::CheckIndexedBlocks => { + // Fix any indexed blocks that did not have their logs indexed + if let Some(block_hash) = + indexer_backend.get_first_pending_canon_block().await + { + log::debug!( + target: "frontier-sql", + "Indexing pending canonical block {:?}", + block_hash, + ); + indexer_backend + .index_block_logs(client.clone(), block_hash) + .await; + } + + // Fix any missing blocks + index_missing_blocks( + client.clone(), + substrate_backend.clone(), + indexer_backend.clone(), + ) + .await; + } + } + } + }); + + tx + } + + /// Start the worker. + pub async fn run( + client: Arc, + substrate_backend: Arc, + indexer_backend: Arc>, + import_notifications: sc_client_api::ImportNotifications, + worker_config: SyncWorkerConfig, + sync_strategy: crate::SyncStrategy, + sync_oracle: Arc, + pubsub_notification_sinks: Arc< + crate::EthereumBlockNotificationSinks>, + >, + ) { + // work in progress for `SyncStrategy::Normal` to also index non-best blocks. + if sync_strategy == crate::SyncStrategy::Normal { + panic!("'SyncStrategy::Normal' is not supported") + } + + let tx = Self::spawn_worker( + client.clone(), + substrate_backend.clone(), + indexer_backend.clone(), + pubsub_notification_sinks.clone(), + ) + .await; + + // Resume sync from the last indexed block until we reach an already indexed parent + tx.send(WorkerCommand::ResumeSync).await.ok(); + + // check missing blocks every interval + let tx2 = tx.clone(); + tokio::task::spawn(async move { + loop { + futures_timer::Delay::new(worker_config.check_indexed_blocks_interval).await; + tx2.send(WorkerCommand::CheckIndexedBlocks).await.ok(); + } + }); + + // check notifications + let mut notifications = import_notifications.fuse(); + loop { + let mut timeout = + futures_timer::Delay::new(worker_config.read_notification_timeout).fuse(); + futures::select! { + _ = timeout => { + if let Ok(leaves) = substrate_backend.blockchain().leaves() { + tx.send(WorkerCommand::IndexLeaves(leaves)).await.ok(); + } + if sync_oracle.is_major_syncing() { + let sinks = &mut pubsub_notification_sinks.lock(); + if !sinks.is_empty() { + *sinks.deref_mut() = vec![]; + } + } + } + notification = notifications.next() => if let Some(notification) = notification { + log::debug!( + target: "frontier-sql", + "📣 New notification: #{} {:?} (parent {}), best = {}", + notification.header.number(), + notification.hash, + notification.header.parent_hash(), + notification.is_new_best, + ); + if notification.is_new_best { + if let Some(tree_route) = notification.tree_route { + log::debug!( + target: "frontier-sql", + "🔀 Re-org happened at new best {}, proceeding to canonicalize db", + notification.hash + ); + let retracted = tree_route + .retracted() + .iter() + .map(|hash_and_number| hash_and_number.hash) + .collect::>(); + let enacted = tree_route + .enacted() + .iter() + .map(|hash_and_number| hash_and_number.hash) + .collect::>(); + + let common = tree_route.common_block().hash; + tx.send(WorkerCommand::Canonicalize { + common, + enacted, + retracted, + }).await.ok(); + } + + tx.send(WorkerCommand::IndexBestBlock(notification.hash)).await.ok(); + } + } + } + } + } +} + +/// Index the provided blocks. The function loops over the ancestors of the provided nodes +/// until it encounters the genesis block, or a block that has already been imported, or +/// is already in the active set. The `hashes` parameter is populated with any parent blocks +/// that is scheduled to be indexed. +async fn index_block_and_ancestors( + client: Arc, + substrate_backend: Arc, + indexer_backend: Arc>, + hash: H256, +) where + Block: BlockT + Send + Sync, + Client: StorageProvider + HeaderBackend + Send + Sync + 'static, + Client: ProvideRuntimeApi, + Client::Api: EthereumRuntimeRPCApi, + Backend: BackendT + 'static, + Backend::State: StateBackend, +{ + let blockchain_backend = substrate_backend.blockchain(); + let mut hashes = vec![hash]; + while let Some(hash) = hashes.pop() { + // exit if genesis block is reached + if hash == H256::default() { + break; + } + + // exit if block is already imported + if indexer_backend.is_block_indexed(hash).await { + log::debug!( + target: "frontier-sql", + "🔴 Block {:?} already imported", + hash, + ); + break; + } + + log::debug!( + target: "frontier-sql", + "🛠️ Importing {:?}", + hash, + ); + let _ = indexer_backend + .insert_block_metadata(client.clone(), hash) + .await + .map_err(|e| { + log::error!( + target: "frontier-sql", + "{}", + e, + ); + }); + log::debug!( + target: "frontier-sql", + "Inserted block metadata" + ); + indexer_backend.index_block_logs(client.clone(), hash).await; + + if let Ok(Some(header)) = blockchain_backend.header(hash) { + let parent_hash = header.parent_hash(); + hashes.push(*parent_hash); + } + } +} + +/// Index the provided known canonical blocks. The function loops over the ancestors of the provided nodes +/// until it encounters the genesis block, or a block that has already been imported, or +/// is already in the active set. The `hashes` parameter is populated with any parent blocks +/// that is scheduled to be indexed. +async fn index_canonical_block_and_ancestors( + client: Arc, + substrate_backend: Arc, + indexer_backend: Arc>, + hash: H256, +) where + Block: BlockT + Send + Sync, + Client: StorageProvider + HeaderBackend + Send + Sync + 'static, + Client: ProvideRuntimeApi, + Client::Api: EthereumRuntimeRPCApi, + Backend: BackendT + 'static, + Backend::State: StateBackend, +{ + let blockchain_backend = substrate_backend.blockchain(); + let mut hashes = vec![hash]; + while let Some(hash) = hashes.pop() { + // exit if genesis block is reached + if hash == H256::default() { + break; + } + + let status = indexer_backend.block_indexed_and_canon_status(hash).await; + + // exit if canonical block is already imported + if status.indexed && status.canon { + log::debug!( + target: "frontier-sql", + "🔴 Block {:?} already imported", + hash, + ); + break; + } + + // If block was previously indexed as non-canon then mark it as canon + if status.indexed && !status.canon { + if let Err(err) = indexer_backend.set_block_as_canon(hash).await { + log::error!( + target: "frontier-sql", + "Failed setting block {:?} as canon: {:?}", + hash, + err, + ); + continue; + } + + log::debug!( + target: "frontier-sql", + "🛠️ Marked block as canon {:?}", + hash, + ); + + // Check parent block + if let Ok(Some(header)) = blockchain_backend.header(hash) { + let parent_hash = header.parent_hash(); + hashes.push(*parent_hash); + } + continue; + } + + // Else, import the new block + log::debug!( + target: "frontier-sql", + "🛠️ Importing {:?}", + hash, + ); + let _ = indexer_backend + .insert_block_metadata(client.clone(), hash) + .await + .map_err(|e| { + log::error!( + target: "frontier-sql", + "{}", + e, + ); + }); + log::debug!( + target: "frontier-sql", + "Inserted block metadata {:?}", + hash + ); + indexer_backend.index_block_logs(client.clone(), hash).await; + + if let Ok(Some(header)) = blockchain_backend.header(hash) { + let parent_hash = header.parent_hash(); + hashes.push(*parent_hash); + } + } +} + +/// Canonicalizes the database by setting the `is_canon` field for the retracted blocks to `0`, +/// and `1` if they are enacted. +async fn canonicalize_blocks( + indexer_backend: Arc>, + common: H256, + enacted: Vec, + retracted: Vec, +) where + Block: BlockT + Send + Sync, +{ + if (indexer_backend.canonicalize(&retracted, &enacted).await).is_err() { + log::error!( + target: "frontier-sql", + "❌ Canonicalization failed for common ancestor {}, potentially corrupted db. Retracted: {:?}, Enacted: {:?}", + common, + retracted, + enacted, + ); + } +} + +/// Attempts to index any missing blocks that are in the past. This fixes any gaps that may +/// be present in the indexing strategy, since the indexer only walks the parent hashes until +/// it finds the first ancestor that has already been indexed. +async fn index_missing_blocks( + client: Arc, + substrate_backend: Arc, + indexer_backend: Arc>, +) where + Block: BlockT + Send + Sync, + Client: StorageProvider + HeaderBackend + Send + Sync + 'static, + Client: ProvideRuntimeApi, + Client::Api: EthereumRuntimeRPCApi, + Backend: BackendT + 'static, + Backend::State: StateBackend, +{ + if let Some(block_number) = indexer_backend.get_first_missing_canon_block().await { + log::debug!( + target: "frontier-sql", + "Missing {:?}", + block_number, + ); + if block_number == 0 { + index_genesis_block(client.clone(), indexer_backend.clone()).await; + } else if let Ok(Some(block_hash)) = client.hash(block_number.unique_saturated_into()) { + log::debug!( + target: "frontier-sql", + "Indexing past canonical blocks from #{} {:?}", + block_number, + block_hash, + ); + index_canonical_block_and_ancestors( + client.clone(), + substrate_backend.clone(), + indexer_backend.clone(), + block_hash, + ) + .await; + } else { + log::debug!( + target: "frontier-sql", + "Failed retrieving hash for block #{}", + block_number, + ); + } + } +} + +/// Attempts to index any missing blocks that are in the past. This fixes any gaps that may +/// be present in the indexing strategy, since the indexer only walks the parent hashes until +/// it finds the first ancestor that has already been indexed. +async fn index_genesis_block( + client: Arc, + indexer_backend: Arc>, +) where + Block: BlockT + Send + Sync, + Client: StorageProvider + HeaderBackend + Send + Sync + 'static, + Client: ProvideRuntimeApi, + Client::Api: EthereumRuntimeRPCApi, + Backend: BackendT + 'static, + Backend::State: StateBackend, +{ + log::info!( + target: "frontier-sql", + "Import genesis", + ); + if let Ok(Some(substrate_genesis_hash)) = indexer_backend + .insert_genesis_block_metadata(client.clone()) + .await + .map_err(|e| { + log::error!( + target: "frontier-sql", + "💔 Cannot sync genesis block: {}", + e, + ) + }) { + log::debug!( + target: "frontier-sql", + "Imported genesis block {:?}", + substrate_genesis_hash, + ); + } +} + +#[cfg(test)] +mod test { + use super::*; + use crate::{EthereumBlockNotification, EthereumBlockNotificationSinks}; + use fc_rpc::{OverrideHandle, SchemaV3Override, StorageOverride}; + use fp_storage::{ + EthereumStorageSchema, ETHEREUM_CURRENT_RECEIPTS, PALLET_ETHEREUM, PALLET_ETHEREUM_SCHEMA, + }; + use futures::executor; + use sc_block_builder::BlockBuilderProvider; + use sc_client_api::{BlockchainEvents, HeaderBackend}; + use scale_codec::Encode; + use sp_consensus::BlockOrigin; + use sp_core::{H160, H256, U256}; + use sp_io::hashing::twox_128; + use sp_runtime::{ + generic::{Digest, Header}, + traits::BlakeTwo256, + }; + use sqlx::Row; + use std::{collections::BTreeMap, path::Path, sync::Arc}; + use substrate_test_runtime_client::{ + prelude::*, DefaultTestClientBuilderExt, TestClientBuilder, TestClientBuilderExt, + }; + use tempfile::tempdir; + + type OpaqueBlock = sp_runtime::generic::Block< + Header, + substrate_test_runtime_client::runtime::Extrinsic, + >; + + struct TestSyncOracleNotSyncing; + impl sp_consensus::SyncOracle for TestSyncOracleNotSyncing { + fn is_major_syncing(&self) -> bool { + false + } + fn is_offline(&self) -> bool { + false + } + } + + fn storage_prefix_build(module: &[u8], storage: &[u8]) -> Vec { + [twox_128(module), twox_128(storage)].concat().to_vec() + } + + fn ethereum_digest() -> Digest { + let partial_header = ethereum::PartialHeader { + parent_hash: H256::random(), + beneficiary: H160::default(), + state_root: H256::default(), + receipts_root: H256::default(), + logs_bloom: ethereum_types::Bloom::default(), + difficulty: U256::zero(), + number: U256::zero(), + gas_limit: U256::zero(), + gas_used: U256::zero(), + timestamp: 0u64, + extra_data: Vec::new(), + mix_hash: H256::default(), + nonce: ethereum_types::H64::default(), + }; + let ethereum_transactions: Vec = vec![]; + let ethereum_block = ethereum::Block::new(partial_header, ethereum_transactions, vec![]); + Digest { + logs: vec![sp_runtime::generic::DigestItem::Consensus( + fp_consensus::FRONTIER_ENGINE_ID, + fp_consensus::PostLog::Hashes(fp_consensus::Hashes::from_block(ethereum_block)) + .encode(), + )], + } + } + + #[tokio::test] + async fn interval_indexing_works() { + let tmp = tempdir().expect("create a temporary directory"); + // Initialize storage with schema V3 + let builder = TestClientBuilder::new().add_extra_storage( + PALLET_ETHEREUM_SCHEMA.to_vec(), + Encode::encode(&EthereumStorageSchema::V3), + ); + // Backend + let backend = builder.backend(); + // Client + let (client, _) = + builder.build_with_native_executor::(None); + let mut client = Arc::new(client); + // Overrides + let mut overrides_map = BTreeMap::new(); + overrides_map.insert( + EthereumStorageSchema::V3, + Box::new(SchemaV3Override::new(client.clone())) as Box>, + ); + let overrides = Arc::new(OverrideHandle { + schemas: overrides_map, + fallback: Box::new(SchemaV3Override::new(client.clone())), + }); + // Indexer backend + let indexer_backend = fc_db::sql::Backend::new( + fc_db::sql::BackendConfig::Sqlite(fc_db::sql::SqliteBackendConfig { + path: Path::new("sqlite:///") + .join(tmp.path()) + .join("test.db3") + .to_str() + .unwrap(), + create_if_missing: true, + cache_size: 204800, + thread_count: 4, + }), + 100, + None, + overrides.clone(), + ) + .await + .expect("indexer pool to be created"); + // Pool + let pool = indexer_backend.pool().clone(); + + // Create 10 blocks, 2 receipts each, 1 log per receipt + let mut logs: Vec<(i32, fc_db::sql::Log)> = vec![]; + for block_number in 1..11 { + // New block including pallet ethereum block digest + let mut builder = client.new_block(ethereum_digest()).unwrap(); + // Addresses + let address_1 = H160::repeat_byte(0x01); + let address_2 = H160::repeat_byte(0x02); + // Topics + let topics_1_1 = H256::repeat_byte(0x01); + let topics_1_2 = H256::repeat_byte(0x02); + let topics_2_1 = H256::repeat_byte(0x03); + let topics_2_2 = H256::repeat_byte(0x04); + let topics_2_3 = H256::repeat_byte(0x05); + let topics_2_4 = H256::repeat_byte(0x06); + + let receipts = Encode::encode(&vec![ + ethereum::ReceiptV3::EIP1559(ethereum::EIP1559ReceiptData { + status_code: 0u8, + used_gas: U256::zero(), + logs_bloom: ethereum_types::Bloom::zero(), + logs: vec![ethereum::Log { + address: address_1, + topics: vec![topics_1_1, topics_1_2], + data: vec![], + }], + }), + ethereum::ReceiptV3::EIP1559(ethereum::EIP1559ReceiptData { + status_code: 0u8, + used_gas: U256::zero(), + logs_bloom: ethereum_types::Bloom::zero(), + logs: vec![ethereum::Log { + address: address_2, + topics: vec![topics_2_1, topics_2_2, topics_2_3, topics_2_4], + data: vec![], + }], + }), + ]); + builder + .push_storage_change( + storage_prefix_build(PALLET_ETHEREUM, ETHEREUM_CURRENT_RECEIPTS), + Some(receipts), + ) + .unwrap(); + let block = builder.build().unwrap().block; + let block_hash = block.header.hash(); + executor::block_on(client.import(BlockOrigin::Own, block)).unwrap(); + logs.push(( + block_number as i32, + fc_db::sql::Log { + address: address_1.as_bytes().to_owned(), + topic_1: Some(topics_1_1.as_bytes().to_owned()), + topic_2: Some(topics_1_2.as_bytes().to_owned()), + topic_3: None, + topic_4: None, + log_index: 0i32, + transaction_index: 0i32, + substrate_block_hash: block_hash.as_bytes().to_owned(), + }, + )); + logs.push(( + block_number as i32, + fc_db::sql::Log { + address: address_2.as_bytes().to_owned(), + topic_1: Some(topics_2_1.as_bytes().to_owned()), + topic_2: Some(topics_2_2.as_bytes().to_owned()), + topic_3: Some(topics_2_3.as_bytes().to_owned()), + topic_4: Some(topics_2_4.as_bytes().to_owned()), + log_index: 0i32, + transaction_index: 1i32, + substrate_block_hash: block_hash.as_bytes().to_owned(), + }, + )); + } + + let test_sync_oracle = TestSyncOracleNotSyncing {}; + let pubsub_notification_sinks: EthereumBlockNotificationSinks< + EthereumBlockNotification, + > = Default::default(); + let pubsub_notification_sinks = Arc::new(pubsub_notification_sinks); + + let pubsub_notification_sinks_inner = pubsub_notification_sinks.clone(); + + // Spawn worker after creating the blocks will resolve the interval future. + // Because the SyncWorker is spawned at service level, in the real world this will only + // happen when we are in major syncing (where there is lack of import notificatons). + tokio::task::spawn(async move { + crate::sql::SyncWorker::run( + client.clone(), + backend.clone(), + Arc::new(indexer_backend), + client.clone().import_notification_stream(), + SyncWorkerConfig { + read_notification_timeout: Duration::from_secs(1), + check_indexed_blocks_interval: Duration::from_secs(60), + }, + crate::SyncStrategy::Parachain, + Arc::new(test_sync_oracle), + pubsub_notification_sinks_inner, + ) + .await + }); + + // Enough time for interval to run + futures_timer::Delay::new(std::time::Duration::from_millis(1500)).await; + + // Query db + let db_logs = sqlx::query( + "SELECT + b.block_number, + address, + topic_1, + topic_2, + topic_3, + topic_4, + log_index, + transaction_index, + a.substrate_block_hash + FROM logs AS a INNER JOIN blocks AS b ON a.substrate_block_hash = b.substrate_block_hash + ORDER BY b.block_number ASC, log_index ASC, transaction_index ASC", + ) + .fetch_all(&pool) + .await + .expect("test query result") + .iter() + .map(|row| { + let block_number = row.get::(0); + let address = row.get::, _>(1); + let topic_1 = row.get::>, _>(2); + let topic_2 = row.get::>, _>(3); + let topic_3 = row.get::>, _>(4); + let topic_4 = row.get::>, _>(5); + let log_index = row.get::(6); + let transaction_index = row.get::(7); + let substrate_block_hash = row.get::, _>(8); + ( + block_number, + fc_db::sql::Log { + address, + topic_1, + topic_2, + topic_3, + topic_4, + log_index, + transaction_index, + substrate_block_hash, + }, + ) + }) + .collect::>(); + + // Expect the db to contain 20 rows. 10 blocks, 2 logs each. + // Db data is sorted ASC by block_number, log_index and transaction_index. + // This is necessary because indexing is done from tip to genesis. + // Expect the db resultset to be equal to the locally produced Log vector. + assert_eq!(db_logs, logs); + } + + #[tokio::test] + async fn notification_indexing_works() { + let tmp = tempdir().expect("create a temporary directory"); + // Initialize storage with schema V3 + let builder = TestClientBuilder::new().add_extra_storage( + PALLET_ETHEREUM_SCHEMA.to_vec(), + Encode::encode(&EthereumStorageSchema::V3), + ); + // Backend + let backend = builder.backend(); + // Client + let (client, _) = + builder.build_with_native_executor::(None); + let mut client = Arc::new(client); + // Overrides + let mut overrides_map = BTreeMap::new(); + overrides_map.insert( + EthereumStorageSchema::V3, + Box::new(SchemaV3Override::new(client.clone())) as Box>, + ); + let overrides = Arc::new(OverrideHandle { + schemas: overrides_map, + fallback: Box::new(SchemaV3Override::new(client.clone())), + }); + // Indexer backend + let indexer_backend = fc_db::sql::Backend::new( + fc_db::sql::BackendConfig::Sqlite(fc_db::sql::SqliteBackendConfig { + path: Path::new("sqlite:///") + .join(tmp.path()) + .join("test.db3") + .to_str() + .unwrap(), + create_if_missing: true, + cache_size: 204800, + thread_count: 4, + }), + 100, + None, + overrides.clone(), + ) + .await + .expect("indexer pool to be created"); + // Pool + let pool = indexer_backend.pool().clone(); + + let test_sync_oracle = TestSyncOracleNotSyncing {}; + let pubsub_notification_sinks: EthereumBlockNotificationSinks< + EthereumBlockNotification, + > = Default::default(); + let pubsub_notification_sinks = Arc::new(pubsub_notification_sinks); + + let pubsub_notification_sinks_inner = pubsub_notification_sinks.clone(); + + // Spawn worker after creating the blocks will resolve the interval future. + // Because the SyncWorker is spawned at service level, in the real world this will only + // happen when we are in major syncing (where there is lack of import notifications). + let notification_stream = client.clone().import_notification_stream(); + let client_inner = client.clone(); + tokio::task::spawn(async move { + crate::sql::SyncWorker::run( + client_inner, + backend.clone(), + Arc::new(indexer_backend), + notification_stream, + SyncWorkerConfig { + read_notification_timeout: Duration::from_secs(10), + check_indexed_blocks_interval: Duration::from_secs(60), + }, + crate::SyncStrategy::Parachain, + Arc::new(test_sync_oracle), + pubsub_notification_sinks_inner, + ) + .await + }); + + // Create 10 blocks, 2 receipts each, 1 log per receipt + let mut logs: Vec<(i32, fc_db::sql::Log)> = vec![]; + for block_number in 1..11 { + // New block including pallet ethereum block digest + let mut builder = client.new_block(ethereum_digest()).unwrap(); + // Addresses + let address_1 = H160::random(); + let address_2 = H160::random(); + // Topics + let topics_1_1 = H256::random(); + let topics_1_2 = H256::random(); + let topics_2_1 = H256::random(); + let topics_2_2 = H256::random(); + let topics_2_3 = H256::random(); + let topics_2_4 = H256::random(); + + let receipts = Encode::encode(&vec![ + ethereum::ReceiptV3::EIP1559(ethereum::EIP1559ReceiptData { + status_code: 0u8, + used_gas: U256::zero(), + logs_bloom: ethereum_types::Bloom::zero(), + logs: vec![ethereum::Log { + address: address_1, + topics: vec![topics_1_1, topics_1_2], + data: vec![], + }], + }), + ethereum::ReceiptV3::EIP1559(ethereum::EIP1559ReceiptData { + status_code: 0u8, + used_gas: U256::zero(), + logs_bloom: ethereum_types::Bloom::zero(), + logs: vec![ethereum::Log { + address: address_2, + topics: vec![topics_2_1, topics_2_2, topics_2_3, topics_2_4], + data: vec![], + }], + }), + ]); + builder + .push_storage_change( + storage_prefix_build(PALLET_ETHEREUM, ETHEREUM_CURRENT_RECEIPTS), + Some(receipts), + ) + .unwrap(); + let block = builder.build().unwrap().block; + let block_hash = block.header.hash(); + executor::block_on(client.import(BlockOrigin::Own, block)).unwrap(); + logs.push(( + block_number as i32, + fc_db::sql::Log { + address: address_1.as_bytes().to_owned(), + topic_1: Some(topics_1_1.as_bytes().to_owned()), + topic_2: Some(topics_1_2.as_bytes().to_owned()), + topic_3: None, + topic_4: None, + log_index: 0i32, + transaction_index: 0i32, + substrate_block_hash: block_hash.as_bytes().to_owned(), + }, + )); + logs.push(( + block_number as i32, + fc_db::sql::Log { + address: address_2.as_bytes().to_owned(), + topic_1: Some(topics_2_1.as_bytes().to_owned()), + topic_2: Some(topics_2_2.as_bytes().to_owned()), + topic_3: Some(topics_2_3.as_bytes().to_owned()), + topic_4: Some(topics_2_4.as_bytes().to_owned()), + log_index: 0i32, + transaction_index: 1i32, + substrate_block_hash: block_hash.as_bytes().to_owned(), + }, + )); + // Let's not notify too quickly + futures_timer::Delay::new(std::time::Duration::from_millis(100)).await; + } + + // Query db + let db_logs = sqlx::query( + "SELECT + b.block_number, + address, + topic_1, + topic_2, + topic_3, + topic_4, + log_index, + transaction_index, + a.substrate_block_hash + FROM logs AS a INNER JOIN blocks AS b ON a.substrate_block_hash = b.substrate_block_hash + ORDER BY b.block_number ASC, log_index ASC, transaction_index ASC", + ) + .fetch_all(&pool) + .await + .expect("test query result") + .iter() + .map(|row| { + let block_number = row.get::(0); + let address = row.get::, _>(1); + let topic_1 = row.get::>, _>(2); + let topic_2 = row.get::>, _>(3); + let topic_3 = row.get::>, _>(4); + let topic_4 = row.get::>, _>(5); + let log_index = row.get::(6); + let transaction_index = row.get::(7); + let substrate_block_hash = row.get::, _>(8); + ( + block_number, + fc_db::sql::Log { + address, + topic_1, + topic_2, + topic_3, + topic_4, + log_index, + transaction_index, + substrate_block_hash, + }, + ) + }) + .collect::>(); + + // Expect the db to contain 20 rows. 10 blocks, 2 logs each. + // Db data is sorted ASC by block_number, log_index and transaction_index. + // This is necessary because indexing is done from tip to genesis. + // Expect the db resultset to be equal to the locally produced Log vector. + assert_eq!(db_logs, logs); + } + + #[tokio::test] + async fn canonicalize_works() { + let tmp = tempdir().expect("create a temporary directory"); + // Initialize storage with schema V3 + let builder = TestClientBuilder::new().add_extra_storage( + PALLET_ETHEREUM_SCHEMA.to_vec(), + Encode::encode(&EthereumStorageSchema::V3), + ); + // Backend + let backend = builder.backend(); + // Client + let (client, _) = + builder.build_with_native_executor::(None); + let mut client = Arc::new(client); + // Overrides + let mut overrides_map = BTreeMap::new(); + overrides_map.insert( + EthereumStorageSchema::V3, + Box::new(SchemaV3Override::new(client.clone())) as Box>, + ); + let overrides = Arc::new(OverrideHandle { + schemas: overrides_map, + fallback: Box::new(SchemaV3Override::new(client.clone())), + }); + // Indexer backend + let indexer_backend = fc_db::sql::Backend::new( + fc_db::sql::BackendConfig::Sqlite(fc_db::sql::SqliteBackendConfig { + path: Path::new("sqlite:///") + .join(tmp.path()) + .join("test.db3") + .to_str() + .unwrap(), + create_if_missing: true, + cache_size: 204800, + thread_count: 4, + }), + 100, + None, + overrides.clone(), + ) + .await + .expect("indexer pool to be created"); + + // Pool + let pool = indexer_backend.pool().clone(); + + // Spawn indexer task + let test_sync_oracle = TestSyncOracleNotSyncing {}; + let pubsub_notification_sinks: EthereumBlockNotificationSinks< + EthereumBlockNotification, + > = Default::default(); + let pubsub_notification_sinks = Arc::new(pubsub_notification_sinks); + + let pubsub_notification_sinks_inner = pubsub_notification_sinks.clone(); + + let notification_stream = client.clone().import_notification_stream(); + let client_inner = client.clone(); + tokio::task::spawn(async move { + crate::sql::SyncWorker::run( + client_inner, + backend.clone(), + Arc::new(indexer_backend), + notification_stream, + SyncWorkerConfig { + read_notification_timeout: Duration::from_secs(10), + check_indexed_blocks_interval: Duration::from_secs(60), + }, + crate::SyncStrategy::Parachain, + Arc::new(test_sync_oracle), + pubsub_notification_sinks_inner, + ) + .await + }); + + // Create 10 blocks saving the common ancestor for branching. + let mut parent_hash = client + .hash(sp_runtime::traits::Zero::zero()) + .unwrap() + .expect("genesis hash"); + let mut common_ancestor = parent_hash; + let mut hashes_to_be_orphaned: Vec = vec![]; + for block_number in 1..11 { + // New block including pallet ethereum block digest + let builder = client + .new_block_at(parent_hash, ethereum_digest(), false) + .unwrap(); + let block = builder.build().unwrap().block; + let block_hash = block.header.hash(); + executor::block_on(client.import(BlockOrigin::Own, block)).unwrap(); + if block_number == 8 { + common_ancestor = block_hash; + } + if block_number == 9 || block_number == 10 { + hashes_to_be_orphaned.push(block_hash); + } + parent_hash = block_hash; + // Let's not notify too quickly + futures_timer::Delay::new(std::time::Duration::from_millis(100)).await; + } + + // Test all blocks are initially canon. + let mut res = sqlx::query("SELECT is_canon FROM blocks") + .fetch_all(&pool) + .await + .expect("test query result") + .iter() + .map(|row| row.get::(0)) + .collect::>(); + + assert_eq!(res.len(), 10); + res.dedup(); + assert_eq!(res.len(), 1); + + // Create the new longest chain, 10 more blocks on top of the common ancestor. + parent_hash = common_ancestor; + for _ in 1..11 { + // New block including pallet ethereum block digest + let builder = client + .new_block_at(parent_hash, ethereum_digest(), false) + .unwrap(); + let block = builder.build().unwrap().block; + let block_hash = block.header.hash(); + executor::block_on(client.import(BlockOrigin::Own, block)).unwrap(); + parent_hash = block_hash; + // Let's not notify too quickly + futures_timer::Delay::new(std::time::Duration::from_millis(100)).await; + } + + // Test the reorged chain is correctly indexed. + let res = sqlx::query("SELECT substrate_block_hash, is_canon, block_number FROM blocks") + .fetch_all(&pool) + .await + .expect("test query result") + .iter() + .map(|row| { + let substrate_block_hash = H256::from_slice(&row.get::, _>(0)[..]); + let is_canon = row.get::(1); + let block_number = row.get::(2); + (substrate_block_hash, is_canon, block_number) + }) + .collect::>(); + + // 20 blocks in total + assert_eq!(res.len(), 20); + + // 18 of which are canon + let canon = res + .clone() + .into_iter() + .filter_map(|it| if it.1 == 1 { Some(it) } else { None }) + .collect::>(); + assert_eq!(canon.len(), 18); + + // and 2 of which are the originally tracked as orphaned + let not_canon = res + .clone() + .into_iter() + .filter_map(|it| if it.1 == 0 { Some(it.0) } else { None }) + .collect::>(); + assert_eq!(not_canon.len(), hashes_to_be_orphaned.len()); + assert!(not_canon.iter().all(|h| hashes_to_be_orphaned.contains(h))); + } + + #[tokio::test] + async fn resuming_from_last_indexed_block_works() { + let tmp = tempdir().expect("create a temporary directory"); + // Initialize storage with schema V3 + let builder = TestClientBuilder::new().add_extra_storage( + PALLET_ETHEREUM_SCHEMA.to_vec(), + Encode::encode(&EthereumStorageSchema::V3), + ); + // Backend + let backend = builder.backend(); + // Client + let (client, _) = + builder.build_with_native_executor::(None); + let mut client = Arc::new(client); + // Overrides + let mut overrides_map = BTreeMap::new(); + overrides_map.insert( + EthereumStorageSchema::V3, + Box::new(SchemaV3Override::new(client.clone())) as Box>, + ); + let overrides = Arc::new(OverrideHandle { + schemas: overrides_map, + fallback: Box::new(SchemaV3Override::new(client.clone())), + }); + // Indexer backend + let indexer_backend = fc_db::sql::Backend::new( + fc_db::sql::BackendConfig::Sqlite(fc_db::sql::SqliteBackendConfig { + path: Path::new("sqlite:///") + .join(tmp.path()) + .join("test.db3") + .to_str() + .unwrap(), + create_if_missing: true, + cache_size: 204800, + thread_count: 4, + }), + 100, + None, + overrides.clone(), + ) + .await + .expect("indexer pool to be created"); + + // Pool + let pool = indexer_backend.pool().clone(); + + // Create 5 blocks, storing them newest first. + let mut parent_hash = client + .hash(sp_runtime::traits::Zero::zero()) + .unwrap() + .expect("genesis hash"); + let mut block_hashes: Vec = vec![]; + for _block_number in 1..=5 { + let builder = client + .new_block_at(parent_hash, ethereum_digest(), false) + .unwrap(); + let block = builder.build().unwrap().block; + let block_hash = block.header.hash(); + executor::block_on(client.import(BlockOrigin::Own, block)).unwrap(); + block_hashes.insert(0, block_hash.clone()); + parent_hash = block_hash; + } + + // Mark the block as canon and indexed + let block_resume_at = block_hashes[0]; + sqlx::query("INSERT INTO blocks(substrate_block_hash, ethereum_block_hash, ethereum_storage_schema, block_number, is_canon) VALUES (?, ?, ?, 5, 1)") + .bind(block_resume_at.as_bytes()) + .bind(H256::zero().as_bytes()) + .bind(H256::zero().as_bytes()) + .execute(&pool) + .await + .expect("sql query must succeed"); + sqlx::query("INSERT INTO sync_status(substrate_block_hash, status) VALUES (?, 1)") + .bind(block_resume_at.as_bytes()) + .execute(&pool) + .await + .expect("sql query must succeed"); + + // Spawn indexer task + let test_sync_oracle = TestSyncOracleNotSyncing {}; + let pubsub_notification_sinks: EthereumBlockNotificationSinks< + EthereumBlockNotification, + > = Default::default(); + let pubsub_notification_sinks = Arc::new(pubsub_notification_sinks); + + let pubsub_notification_sinks_inner = pubsub_notification_sinks.clone(); + + let client_inner = client.clone(); + tokio::task::spawn(async move { + crate::sql::SyncWorker::run( + client_inner, + backend.clone(), + Arc::new(indexer_backend), + client.clone().import_notification_stream(), + SyncWorkerConfig { + read_notification_timeout: Duration::from_secs(10), + check_indexed_blocks_interval: Duration::from_secs(60), + }, + crate::SyncStrategy::Parachain, + Arc::new(test_sync_oracle), + pubsub_notification_sinks_inner, + ) + .await + }); + // Enough time for indexing + futures_timer::Delay::new(std::time::Duration::from_millis(1500)).await; + + // Test the reorged chain is correctly indexed. + let actual_imported_blocks = + sqlx::query("SELECT substrate_block_hash, is_canon, block_number FROM blocks") + .fetch_all(&pool) + .await + .expect("test query result") + .iter() + .map(|row| H256::from_slice(&row.get::, _>(0)[..])) + .collect::>(); + let expected_imported_blocks = block_hashes.clone(); + assert_eq!(expected_imported_blocks, actual_imported_blocks); + } +} diff --git a/client/rpc-core/src/eth.rs b/client/rpc-core/src/eth.rs index 244df26fcb..bea3943d01 100644 --- a/client/rpc-core/src/eth.rs +++ b/client/rpc-core/src/eth.rs @@ -76,11 +76,14 @@ pub trait EthApi { /// Returns the number of transactions in a block with given hash. #[method(name = "eth_getBlockTransactionCountByHash")] - fn block_transaction_count_by_hash(&self, hash: H256) -> RpcResult>; + async fn block_transaction_count_by_hash(&self, hash: H256) -> RpcResult>; /// Returns the number of transactions in a block with given block number. #[method(name = "eth_getBlockTransactionCountByNumber")] - fn block_transaction_count_by_number(&self, number: BlockNumber) -> RpcResult>; + async fn block_transaction_count_by_number( + &self, + number: BlockNumber, + ) -> RpcResult>; /// Returns the number of uncles in a block with given hash. #[method(name = "eth_getUncleCountByBlockHash")] @@ -140,11 +143,11 @@ pub trait EthApi { /// Returns balance of the given account. #[method(name = "eth_getBalance")] - fn balance(&self, address: H160, number: Option) -> RpcResult; + async fn balance(&self, address: H160, number: Option) -> RpcResult; /// Returns content of the storage at given address. #[method(name = "eth_getStorageAt")] - fn storage_at( + async fn storage_at( &self, address: H160, index: U256, @@ -153,11 +156,15 @@ pub trait EthApi { /// Returns the number of transactions sent from given address at given time (block number). #[method(name = "eth_getTransactionCount")] - fn transaction_count(&self, address: H160, number: Option) -> RpcResult; + async fn transaction_count( + &self, + address: H160, + number: Option, + ) -> RpcResult; /// Returns the code at given address at given time (block number). #[method(name = "eth_getCode")] - fn code_at(&self, address: H160, number: Option) -> RpcResult; + async fn code_at(&self, address: H160, number: Option) -> RpcResult; // ######################################################################## // Execute @@ -165,7 +172,7 @@ pub trait EthApi { /// Call contract, returning the output data. #[method(name = "eth_call")] - fn call( + async fn call( &self, request: CallRequest, number: Option, @@ -190,7 +197,7 @@ pub trait EthApi { /// Introduced in EIP-1159 for getting information on the appropriate priority fee to use. #[method(name = "eth_feeHistory")] - fn fee_history( + async fn fee_history( &self, block_count: U256, newest_block: BlockNumber, diff --git a/client/rpc/src/eth/block.rs b/client/rpc/src/eth/block.rs index 871c52d429..1ffce180a2 100644 --- a/client/rpc/src/eth/block.rs +++ b/client/rpc/src/eth/block.rs @@ -54,6 +54,7 @@ where backend.as_ref(), hash, ) + .await .map_err(|err| internal_err(format!("{:?}", err)))? { Some(hash) => hash, @@ -107,7 +108,9 @@ where client.as_ref(), backend.as_ref(), Some(number), - )? { + ) + .await? + { Some(id) => id, None => return Ok(None), }; @@ -151,12 +154,13 @@ where } } - pub fn block_transaction_count_by_hash(&self, hash: H256) -> RpcResult> { + pub async fn block_transaction_count_by_hash(&self, hash: H256) -> RpcResult> { let substrate_hash = match frontier_backend_client::load_hash::( self.client.as_ref(), self.backend.as_ref(), hash, ) + .await .map_err(|err| internal_err(format!("{:?}", err)))? { Some(hash) => hash, @@ -176,7 +180,7 @@ where } } - pub fn block_transaction_count_by_number( + pub async fn block_transaction_count_by_number( &self, number: BlockNumber, ) -> RpcResult> { @@ -191,7 +195,9 @@ where self.client.as_ref(), self.backend.as_ref(), Some(number), - )? { + ) + .await? + { Some(id) => id, None => return Ok(None), }; diff --git a/client/rpc/src/eth/execute.rs b/client/rpc/src/eth/execute.rs index 4a80b3b26b..d603043b9b 100644 --- a/client/rpc/src/eth/execute.rs +++ b/client/rpc/src/eth/execute.rs @@ -73,7 +73,7 @@ where BE: Backend + 'static, A: ChainApi + 'static, { - pub fn call( + pub async fn call( &self, request: CallRequest, number: Option, @@ -106,7 +106,9 @@ where self.client.as_ref(), self.backend.as_ref(), number, - )? { + ) + .await? + { Some(id) => { let hash = self .client @@ -365,7 +367,9 @@ where self.client.as_ref(), self.backend.as_ref(), number, - )? { + ) + .await? + { Some(id) => { let hash = client .expect_block_hash_from_id(&id) diff --git a/client/rpc/src/eth/fee.rs b/client/rpc/src/eth/fee.rs index 102b9e6fb9..e3c20fb20a 100644 --- a/client/rpc/src/eth/fee.rs +++ b/client/rpc/src/eth/fee.rs @@ -50,7 +50,7 @@ where .map_err(|err| internal_err(format!("fetch runtime chain id failed: {:?}", err))) } - pub fn fee_history( + pub async fn fee_history( &self, block_count: U256, newest_block: BlockNumber, @@ -64,11 +64,13 @@ where block_count.as_u64() }; - if let Ok(Some(id)) = frontier_backend_client::native_block_id::( + if let Some(id) = frontier_backend_client::native_block_id::( self.client.as_ref(), self.backend.as_ref(), Some(newest_block), - ) { + ) + .await? + { let Ok(number) = self.client.expect_block_number_from_id(&id) else { return Err(internal_err(format!("Failed to retrieve block number at {id}"))); }; diff --git a/client/rpc/src/eth/filter.rs b/client/rpc/src/eth/filter.rs index 458b917fa8..593d4c2b71 100644 --- a/client/rpc/src/eth/filter.rs +++ b/client/rpc/src/eth/filter.rs @@ -38,7 +38,7 @@ use crate::{eth::cache::EthBlockDataCacheTask, frontier_backend_client, internal pub struct EthFilter { client: Arc, - backend: Arc>, + backend: Arc + Send + Sync>, filter_pool: FilterPool, max_stored_filters: usize, max_past_logs: u32, @@ -49,7 +49,7 @@ pub struct EthFilter { impl EthFilter { pub fn new( client: Arc, - backend: Arc>, + backend: Arc + Send + Sync>, filter_pool: FilterPool, max_stored_filters: usize, max_past_logs: u32, @@ -69,7 +69,7 @@ impl EthFilter { impl EthFilter where - B: BlockT, + B: BlockT, C: HeaderBackend, { fn create_filter(&self, filter_type: FilterType) -> RpcResult { @@ -111,7 +111,7 @@ where #[async_trait] impl EthFilterApiServer for EthFilter where - B: BlockT, + B: BlockT, C: ProvideRuntimeApi, C::Api: EthereumRuntimeRPCApi, C: HeaderBackend + StorageProvider + 'static, @@ -233,6 +233,7 @@ where }; let client = Arc::clone(&self.client); + let backend = Arc::clone(&self.backend); let block_data_cache = Arc::clone(&self.block_data_cache); let max_past_logs = self.max_past_logs; @@ -262,16 +263,30 @@ where current_number, } => { let mut ret: Vec = Vec::new(); - let _ = filter_range_logs( - client.as_ref(), - &block_data_cache, - &mut ret, - max_past_logs, - &filter, - from_number, - current_number, - ) - .await?; + if backend.is_indexed() { + let _ = filter_range_logs_indexed( + client.as_ref(), + backend.as_ref(), + &block_data_cache, + &mut ret, + max_past_logs, + &filter, + from_number, + current_number, + ) + .await?; + } else { + let _ = filter_range_logs( + client.as_ref(), + &block_data_cache, + &mut ret, + max_past_logs, + &filter, + from_number, + current_number, + ) + .await?; + } Ok(FilterChanges::Logs(ret)) } @@ -303,6 +318,7 @@ where })(); let client = Arc::clone(&self.client); + let backend = Arc::clone(&self.backend); let block_data_cache = Arc::clone(&self.block_data_cache); let max_past_logs = self.max_past_logs; @@ -326,16 +342,30 @@ where .unwrap_or(best_number); let mut ret: Vec = Vec::new(); - let _ = filter_range_logs( - client.as_ref(), - &block_data_cache, - &mut ret, - max_past_logs, - &filter, - from_number, - current_number, - ) - .await?; + if backend.is_indexed() { + let _ = filter_range_logs_indexed( + client.as_ref(), + backend.as_ref(), + &block_data_cache, + &mut ret, + max_past_logs, + &filter, + from_number, + current_number, + ) + .await?; + } else { + let _ = filter_range_logs( + client.as_ref(), + &block_data_cache, + &mut ret, + max_past_logs, + &filter, + from_number, + current_number, + ) + .await?; + } Ok(ret) } @@ -368,6 +398,7 @@ where backend.as_ref(), hash, ) + .await .map_err(|err| internal_err(format!("{:?}", err)))? { Some(hash) => hash, @@ -400,21 +431,181 @@ where .map(|s| s.unique_saturated_into()) .unwrap_or(best_number); - let _ = filter_range_logs( - client.as_ref(), - &block_data_cache, - &mut ret, - max_past_logs, - &filter, - from_number, - current_number, - ) - .await?; + if backend.is_indexed() { + let _ = filter_range_logs_indexed( + client.as_ref(), + backend.as_ref(), + &block_data_cache, + &mut ret, + max_past_logs, + &filter, + from_number, + current_number, + ) + .await?; + } else { + let _ = filter_range_logs( + client.as_ref(), + &block_data_cache, + &mut ret, + max_past_logs, + &filter, + from_number, + current_number, + ) + .await?; + } } Ok(ret) } } +async fn filter_range_logs_indexed( + _client: &C, + backend: &(dyn fc_db::BackendReader + Send + Sync), + block_data_cache: &EthBlockDataCacheTask, + ret: &mut Vec, + max_past_logs: u32, + filter: &Filter, + from: NumberFor, + to: NumberFor, +) -> RpcResult<()> +where + B: BlockT, + C: ProvideRuntimeApi, + C::Api: EthereumRuntimeRPCApi, + C: HeaderBackend + StorageProvider + 'static, + BE: Backend + 'static, +{ + use std::time::Instant; + let timer_start = Instant::now(); + let timer_prepare = Instant::now(); + + // Max request duration of 10 seconds. + let max_duration = time::Duration::from_secs(10); + let begin_request = time::Instant::now(); + + let topics_input = if filter.topics.is_some() { + let filtered_params = FilteredParams::new(Some(filter.clone())); + Some(filtered_params.flat_topics) + } else { + None + }; + + // Normalize filter data + let addresses = match &filter.address { + Some(VariadicValue::Single(item)) => vec![*item], + Some(VariadicValue::Multiple(items)) => items.clone(), + _ => vec![], + }; + let topics = topics_input + .unwrap_or_default() + .iter() + .map(|flat| match flat { + VariadicValue::Single(item) => vec![*item], + VariadicValue::Multiple(items) => items.clone(), + _ => vec![], + }) + .collect::>>>(); + + let time_prepare = timer_prepare.elapsed().as_millis(); + let timer_fetch = Instant::now(); + if let Ok(logs) = backend + .filter_logs( + UniqueSaturatedInto::::unique_saturated_into(from), + UniqueSaturatedInto::::unique_saturated_into(to), + addresses, + topics, + ) + .await + { + let time_fetch = timer_fetch.elapsed().as_millis(); + let timer_post = Instant::now(); + use std::collections::BTreeMap; + + let mut statuses_cache: BTreeMap>> = BTreeMap::new(); + + for log in logs.iter() { + let substrate_hash = log.substrate_block_hash; + + let schema = log.ethereum_storage_schema; + let ethereum_block_hash = log.ethereum_block_hash; + let block_number = log.block_number; + let db_transaction_index = log.transaction_index; + let db_log_index = log.log_index; + + let statuses = if let Some(statuses) = statuses_cache.get(&log.substrate_block_hash) { + statuses.clone() + } else { + let statuses = block_data_cache + .current_transaction_statuses(schema, substrate_hash) + .await; + statuses_cache.insert(log.substrate_block_hash, statuses.clone()); + statuses + }; + if let Some(statuses) = statuses { + let mut block_log_index: u32 = 0; + for status in statuses.iter() { + let mut transaction_log_index: u32 = 0; + let transaction_hash = status.transaction_hash; + let transaction_index = status.transaction_index; + for ethereum_log in &status.logs { + if transaction_index == db_transaction_index + && transaction_log_index == db_log_index + { + ret.push(Log { + address: ethereum_log.address, + topics: ethereum_log.topics.clone(), + data: Bytes(ethereum_log.data.clone()), + block_hash: Some(ethereum_block_hash), + block_number: Some(U256::from(block_number)), + transaction_hash: Some(transaction_hash), + transaction_index: Some(U256::from(transaction_index)), + log_index: Some(U256::from(block_log_index)), + transaction_log_index: Some(U256::from(transaction_log_index)), + removed: false, + }); + } + transaction_log_index += 1; + block_log_index += 1; + } + } + } + // Check for restrictions + if ret.len() as u32 > max_past_logs { + return Err(internal_err(format!( + "query returned more than {} results", + max_past_logs + ))); + } + if begin_request.elapsed() > max_duration { + return Err(internal_err(format!( + "query timeout of {} seconds exceeded", + max_duration.as_secs() + ))); + } + } + + let time_post = timer_post.elapsed().as_millis(); + + log::info!( + target: "frontier-sql", + "OUTER-TIMER fetch={}, post={}", + time_fetch, + time_post, + ); + } + + log::info!( + target: "frontier-sql", + "OUTER-TIMER start={}, prepare={}, all_fetch = {}", + timer_start.elapsed().as_millis(), + time_prepare, + timer_fetch.elapsed().as_millis(), + ); + Ok(()) +} + async fn filter_range_logs( client: &C, block_data_cache: &EthBlockDataCacheTask, @@ -501,10 +692,9 @@ fn filter_block_logs<'a>( let mut block_log_index: u32 = 0; let block_hash = H256::from(keccak_256(&rlp::encode(&block.header))); for status in transaction_statuses.iter() { - let logs = status.logs.clone(); let mut transaction_log_index: u32 = 0; let transaction_hash = status.transaction_hash; - for ethereum_log in logs { + for ethereum_log in &status.logs { let mut log = Log { address: ethereum_log.address, topics: ethereum_log.topics.clone(), diff --git a/client/rpc/src/eth/mod.rs b/client/rpc/src/eth/mod.rs index e74b92e219..db28d177e7 100644 --- a/client/rpc/src/eth/mod.rs +++ b/client/rpc/src/eth/mod.rs @@ -80,7 +80,7 @@ pub struct Eth> { is_authority: bool, signers: Vec>, overrides: Arc>, - backend: Arc>, + backend: Arc + Send + Sync>, block_data_cache: Arc>, fee_history_cache: FeeHistoryCache, fee_history_cache_limit: FeeHistoryCacheLimit, @@ -100,7 +100,7 @@ impl Eth { sync: Arc>, signers: Vec>, overrides: Arc>, - backend: Arc>, + backend: Arc + Send + Sync>, is_authority: bool, block_data_cache: Arc>, fee_history_cache: FeeHistoryCache, @@ -225,12 +225,15 @@ where self.block_by_number(number, full).await } - fn block_transaction_count_by_hash(&self, hash: H256) -> RpcResult> { - self.block_transaction_count_by_hash(hash) + async fn block_transaction_count_by_hash(&self, hash: H256) -> RpcResult> { + self.block_transaction_count_by_hash(hash).await } - fn block_transaction_count_by_number(&self, number: BlockNumber) -> RpcResult> { - self.block_transaction_count_by_number(number) + async fn block_transaction_count_by_number( + &self, + number: BlockNumber, + ) -> RpcResult> { + self.block_transaction_count_by_number(number).await } fn block_uncles_count_by_hash(&self, hash: H256) -> RpcResult { @@ -290,38 +293,42 @@ where // State // ######################################################################## - fn balance(&self, address: H160, number: Option) -> RpcResult { - self.balance(address, number) + async fn balance(&self, address: H160, number: Option) -> RpcResult { + self.balance(address, number).await } - fn storage_at( + async fn storage_at( &self, address: H160, index: U256, number: Option, ) -> RpcResult { - self.storage_at(address, index, number) + self.storage_at(address, index, number).await } - fn transaction_count(&self, address: H160, number: Option) -> RpcResult { - self.transaction_count(address, number) + async fn transaction_count( + &self, + address: H160, + number: Option, + ) -> RpcResult { + self.transaction_count(address, number).await } - fn code_at(&self, address: H160, number: Option) -> RpcResult { - self.code_at(address, number) + async fn code_at(&self, address: H160, number: Option) -> RpcResult { + self.code_at(address, number).await } // ######################################################################## // Execute // ######################################################################## - fn call( + async fn call( &self, request: CallRequest, number: Option, state_overrides: Option>, ) -> RpcResult { - self.call(request, number, state_overrides) + self.call(request, number, state_overrides).await } async fn estimate_gas( @@ -340,13 +347,14 @@ where self.gas_price() } - fn fee_history( + async fn fee_history( &self, block_count: U256, newest_block: BlockNumber, reward_percentiles: Option>, ) -> RpcResult { self.fee_history(block_count, newest_block, reward_percentiles) + .await } fn max_priority_fee_per_gas(&self) -> RpcResult { diff --git a/client/rpc/src/eth/state.rs b/client/rpc/src/eth/state.rs index 8a03f16da3..1f7f00ca5e 100644 --- a/client/rpc/src/eth/state.rs +++ b/client/rpc/src/eth/state.rs @@ -46,7 +46,7 @@ where P: TransactionPool + 'static, A: ChainApi + 'static, { - pub fn balance(&self, address: H160, number: Option) -> RpcResult { + pub async fn balance(&self, address: H160, number: Option) -> RpcResult { let number = number.unwrap_or(BlockNumber::Latest); if number == BlockNumber::Pending { let api = pending_runtime_api(self.client.as_ref(), self.graph.as_ref())?; @@ -58,11 +58,14 @@ where self.client.as_ref(), self.backend.as_ref(), Some(number), - ) { + ) + .await + { let substrate_hash = self .client .expect_block_hash_from_id(&id) .map_err(|_| internal_err(format!("Expect block number from id: {}", id)))?; + Ok(self .client .runtime_api() @@ -74,7 +77,7 @@ where } } - pub fn storage_at( + pub async fn storage_at( &self, address: H160, index: U256, @@ -90,7 +93,9 @@ where self.client.as_ref(), self.backend.as_ref(), Some(number), - ) { + ) + .await + { let substrate_hash = self .client .expect_block_hash_from_id(&id) @@ -108,7 +113,11 @@ where } } - pub fn transaction_count(&self, address: H160, number: Option) -> RpcResult { + pub async fn transaction_count( + &self, + address: H160, + number: Option, + ) -> RpcResult { if let Some(BlockNumber::Pending) = number { let substrate_hash = self.client.info().best_hash; @@ -139,7 +148,9 @@ where self.client.as_ref(), self.backend.as_ref(), number, - )? { + ) + .await? + { Some(id) => id, None => return Ok(U256::zero()), }; @@ -157,7 +168,7 @@ where .nonce) } - pub fn code_at(&self, address: H160, number: Option) -> RpcResult { + pub async fn code_at(&self, address: H160, number: Option) -> RpcResult { let number = number.unwrap_or(BlockNumber::Latest); if number == BlockNumber::Pending { let api = pending_runtime_api(self.client.as_ref(), self.graph.as_ref())?; @@ -169,7 +180,9 @@ where self.client.as_ref(), self.backend.as_ref(), Some(number), - ) { + ) + .await + { let substrate_hash = self .client .expect_block_hash_from_id(&id) diff --git a/client/rpc/src/eth/submit.rs b/client/rpc/src/eth/submit.rs index 1d37bd9b5b..3e7eb3e81b 100644 --- a/client/rpc/src/eth/submit.rs +++ b/client/rpc/src/eth/submit.rs @@ -67,7 +67,7 @@ where let nonce = match request.nonce { Some(nonce) => nonce, - None => match self.transaction_count(from, None) { + None => match self.transaction_count(from, None).await { Ok(nonce) => nonce, Err(e) => return Err(e), }, diff --git a/client/rpc/src/eth/transaction.rs b/client/rpc/src/eth/transaction.rs index 0b47c37d85..f313260046 100644 --- a/client/rpc/src/eth/transaction.rs +++ b/client/rpc/src/eth/transaction.rs @@ -59,6 +59,7 @@ where hash, true, ) + .await .map_err(|err| internal_err(format!("{:?}", err)))? { Some((hash, index)) => (hash, index as usize), @@ -127,6 +128,7 @@ where backend.as_ref(), hash, ) + .await .map_err(|err| internal_err(format!("{:?}", err)))? { Some(hash) => hash, @@ -170,6 +172,7 @@ where backend.as_ref(), hash, ) + .await .map_err(|err| internal_err(format!("{:?}", err)))? { Some(hash) => hash, @@ -222,7 +225,9 @@ where client.as_ref(), backend.as_ref(), Some(number), - )? { + ) + .await? + { Some(id) => id, None => return Ok(None), }; @@ -274,6 +279,7 @@ where hash, true, ) + .await .map_err(|err| internal_err(format!("{:?}", err)))? { Some((hash, index)) => (hash, index as usize), @@ -285,6 +291,7 @@ where backend.as_ref(), hash, ) + .await .map_err(|err| internal_err(format!("{:?}", err)))? { Some(hash) => hash, diff --git a/client/rpc/src/lib.rs b/client/rpc/src/lib.rs index 677afbf23f..2cc91cc820 100644 --- a/client/rpc/src/lib.rs +++ b/client/rpc/src/lib.rs @@ -68,7 +68,6 @@ pub mod frontier_backend_client { use sp_state_machine::OverlayedChanges; // Frontier use fc_rpc_core::types::BlockNumber; - use fp_storage::EthereumStorageSchema; /// Implements a default runtime storage override. /// It assumes that the balances and nonces are stored in pallet `system.account`, and @@ -180,9 +179,9 @@ pub mod frontier_backend_client { } } - pub fn native_block_id( + pub async fn native_block_id( client: &C, - backend: &fc_db::Backend, + backend: &(dyn fc_db::BackendReader + Send + Sync), number: Option, ) -> RpcResult>> where @@ -191,7 +190,7 @@ pub mod frontier_backend_client { { Ok(match number.unwrap_or(BlockNumber::Latest) { BlockNumber::Hash { hash, .. } => { - if let Ok(Some(hash)) = load_hash::(client, backend, hash) { + if let Ok(Some(hash)) = load_hash::(client, backend, hash).await { Some(BlockId::Hash(hash)) } else { None @@ -206,9 +205,9 @@ pub mod frontier_backend_client { }) } - pub fn load_hash( + pub async fn load_hash( client: &C, - backend: &fc_db::Backend, + backend: &(dyn fc_db::BackendReader + Send + Sync), hash: H256, ) -> RpcResult> where @@ -216,8 +215,8 @@ pub mod frontier_backend_client { C: HeaderBackend + 'static, { let substrate_hashes = backend - .mapping() .block_hash(&hash) + .await .map_err(|err| internal_err(format!("fetch aux store failed: {:?}", err)))?; if let Some(substrate_hashes) = substrate_hashes { @@ -230,35 +229,6 @@ pub mod frontier_backend_client { Ok(None) } - pub fn load_cached_schema( - backend: &fc_db::Backend, - ) -> RpcResult>> - where - B: BlockT, - C: HeaderBackend + 'static, - { - let cache = backend - .meta() - .ethereum_schema() - .map_err(|err| internal_err(format!("fetch backend failed: {:?}", err)))?; - Ok(cache) - } - - pub fn write_cached_schema( - backend: &fc_db::Backend, - new_cache: Vec<(EthereumStorageSchema, H256)>, - ) -> RpcResult<()> - where - B: BlockT, - C: HeaderBackend + 'static, - { - backend - .meta() - .write_ethereum_schema(new_cache) - .map_err(|err| internal_err(format!("write backend failed: {:?}", err)))?; - Ok(()) - } - pub fn is_canon(client: &C, target_hash: B::Hash) -> bool where B: BlockT, @@ -272,9 +242,9 @@ pub mod frontier_backend_client { false } - pub fn load_transactions( + pub async fn load_transactions( client: &C, - backend: &fc_db::Backend, + backend: &(dyn fc_db::BackendReader + Send + Sync), transaction_hash: H256, only_canonical: bool, ) -> RpcResult> @@ -283,8 +253,8 @@ pub mod frontier_backend_client { C: HeaderBackend + 'static, { let transaction_metadata = backend - .mapping() .transaction_metadata(&transaction_hash) + .await .map_err(|err| internal_err(format!("fetch aux store failed: {:?}", err)))?; transaction_metadata @@ -380,10 +350,10 @@ mod tests { fn open_frontier_backend>( client: Arc, path: PathBuf, - ) -> Result>, String> { - Ok(Arc::new(fc_db::Backend::::new( + ) -> Result>, String> { + Ok(Arc::new(fc_db::kv::Backend::::new( client, - &fc_db::DatabaseSettings { + &fc_db::kv::DatabaseSettings { source: sc_client_db::DatabaseSource::RocksDb { path, cache_size: 0, @@ -426,7 +396,7 @@ mod tests { executor::block_on(client.import(BlockOrigin::Own, b1)).unwrap(); // Map B1 - let commitment = fc_db::MappingCommitment:: { + let commitment = fc_db::kv::MappingCommitment:: { block_hash: b1_hash, ethereum_block_hash, ethereum_transaction_hashes: vec![], @@ -435,11 +405,11 @@ mod tests { // Expect B1 to be canon assert_eq!( - super::frontier_backend_client::load_hash( + futures::executor::block_on(super::frontier_backend_client::load_hash( client.as_ref(), backend.as_ref(), ethereum_block_hash - ) + )) .unwrap() .unwrap(), b1_hash, @@ -455,7 +425,7 @@ mod tests { executor::block_on(client.import(BlockOrigin::Own, b2)).unwrap(); // Map B2 to same ethereum hash - let commitment = fc_db::MappingCommitment:: { + let commitment = fc_db::kv::MappingCommitment:: { block_hash: b2_hash, ethereum_block_hash, ethereum_transaction_hashes: vec![], @@ -464,11 +434,11 @@ mod tests { // Still expect B1 to be canon assert_eq!( - super::frontier_backend_client::load_hash( + futures::executor::block_on(super::frontier_backend_client::load_hash( client.as_ref(), backend.as_ref(), ethereum_block_hash - ) + )) .unwrap() .unwrap(), b1_hash, @@ -484,11 +454,11 @@ mod tests { // Expect B2 to be new canon assert_eq!( - super::frontier_backend_client::load_hash( + futures::executor::block_on(super::frontier_backend_client::load_hash( client.as_ref(), backend.as_ref(), ethereum_block_hash - ) + )) .unwrap() .unwrap(), b2_hash, diff --git a/template/node/src/command.rs b/template/node/src/command.rs index 2979d33f29..b4e26d4f6d 100644 --- a/template/node/src/command.rs +++ b/template/node/src/command.rs @@ -15,11 +15,12 @@ // See the License for the specific language governing permissions and // limitations under the License. +use futures::TryFutureExt; // Substrate use sc_cli::{ChainSpec, RuntimeVersion, SubstrateCli}; use sc_service::DatabaseSource; // Frontier -use fc_db::frontier_database_dir; +use fc_db::kv::frontier_database_dir; use crate::{ chain_spec, @@ -120,19 +121,45 @@ pub fn run() -> sc_cli::Result<()> { runner.sync_run(|config| { // Remove Frontier offchain db let db_config_dir = db_config_dir(&config); - let frontier_database_config = match config.database { - DatabaseSource::RocksDb { .. } => DatabaseSource::RocksDb { - path: frontier_database_dir(&db_config_dir, "db"), - cache_size: 0, - }, - DatabaseSource::ParityDb { .. } => DatabaseSource::ParityDb { - path: frontier_database_dir(&db_config_dir, "paritydb"), - }, - _ => { - return Err(format!("Cannot purge `{:?}` database", config.database).into()) + match cli.eth.frontier_backend_type { + crate::eth::BackendType::KeyValue => { + let frontier_database_config = match config.database { + DatabaseSource::RocksDb { .. } => DatabaseSource::RocksDb { + path: frontier_database_dir(&db_config_dir, "db"), + cache_size: 0, + }, + DatabaseSource::ParityDb { .. } => DatabaseSource::ParityDb { + path: frontier_database_dir(&db_config_dir, "paritydb"), + }, + _ => { + return Err(format!( + "Cannot purge `{:?}` database", + config.database + ) + .into()) + } + }; + cmd.run(frontier_database_config)?; + } + crate::eth::BackendType::Sql => { + let db_path = db_config_dir.join("sql"); + match std::fs::remove_dir_all(&db_path) { + Ok(_) => { + println!("{:?} removed.", &db_path); + } + Err(ref err) if err.kind() == std::io::ErrorKind::NotFound => { + eprintln!("{:?} did not exist.", &db_path); + } + Err(err) => { + return Err(format!( + "Cannot purge `{:?}` database: {:?}", + db_path, err, + ) + .into()) + } + }; } }; - cmd.run(frontier_database_config)?; cmd.run(config.database) }) } @@ -211,13 +238,19 @@ pub fn run() -> sc_cli::Result<()> { runner.sync_run(|mut config| { let (client, _, _, _, frontier_backend) = service::new_chain_ops(&mut config, &cli.eth)?; + let frontier_backend = match frontier_backend { + fc_db::Backend::KeyValue(kv) => std::sync::Arc::new(kv), + _ => panic!("Only fc_db::Backend::KeyValue supported"), + }; cmd.run(client, frontier_backend) }) } None => { let runner = cli.create_runner(&cli.run)?; runner.run_node_until_exit(|config| async move { - service::build_full(config, cli.eth, cli.sealing).map_err(Into::into) + service::build_full(config, cli.eth, cli.sealing) + .map_err(Into::into) + .await }) } } diff --git a/template/node/src/eth.rs b/template/node/src/eth.rs index 75a4a20961..8f28a4b637 100644 --- a/template/node/src/eth.rs +++ b/template/node/src/eth.rs @@ -15,8 +15,6 @@ use sp_api::ConstructRuntimeApi; use sp_runtime::traits::BlakeTwo256; // Frontier pub use fc_consensus::FrontierBlockImport; -pub use fc_db::frontier_database_dir; -use fc_mapping_sync::{MappingSyncWorker, SyncStrategy}; use fc_rpc::{EthTask, OverrideHandle}; pub use fc_rpc_core::types::{FeeHistoryCache, FeeHistoryCacheLimit, FilterPool}; // Local @@ -38,6 +36,21 @@ pub fn db_config_dir(config: &Configuration) -> PathBuf { }) } +/// Avalailable frontier backend types. +#[derive(Debug, Copy, Clone, clap::ValueEnum)] +pub enum BackendType { + /// Either RocksDb or ParityDb as per inherited from the global backend settings. + KeyValue, + /// Sql database with custom log indexing. + Sql, +} + +impl Default for BackendType { + fn default() -> BackendType { + BackendType::KeyValue + } +} + /// The ethereum-compatibility configuration used to run a node. #[derive(Clone, Debug, clap::Parser)] pub struct EthConfiguration { @@ -68,6 +81,27 @@ pub struct EthConfiguration { /// Size in bytes of the LRU cache for transactions statuses data. #[arg(long, default_value = "50")] pub eth_statuses_cache: usize, + + /// Sets the frontier backend type (KeyValue or Sql) + #[arg(long, value_enum, ignore_case = true, default_value_t = BackendType::default())] + pub frontier_backend_type: BackendType, + + // Sets the SQL backend's pool size. + #[arg(long, default_value = "100")] + pub frontier_sql_backend_pool_size: u32, + + /// Sets the SQL backend's query timeout in number of VM ops. + #[arg(long, default_value = "10000000")] + pub frontier_sql_backend_num_ops_timeout: u32, + + /// Sets the SQL backend's auxiliary thread limit. + #[arg(long, default_value = "4")] + pub frontier_sql_backend_thread_count: u32, + + /// Sets the SQL backend's query timeout in number of VM ops. + /// Default value is 200MB. + #[arg(long, default_value = "209715200")] + pub frontier_sql_backend_cache_size: u64, } pub struct FrontierPartialComponents { @@ -105,11 +139,11 @@ where { } -pub fn spawn_frontier_tasks( +pub async fn spawn_frontier_tasks( task_manager: &TaskManager, client: Arc>, backend: Arc, - frontier_backend: Arc, + frontier_backend: FrontierBackend, filter_pool: Option, overrides: Arc>, fee_history_cache: FeeHistoryCache, @@ -127,24 +161,48 @@ pub fn spawn_frontier_tasks( EthCompatRuntimeApiCollection>, Executor: NativeExecutionDispatch + 'static, { - task_manager.spawn_essential_handle().spawn( - "frontier-mapping-sync-worker", - Some("frontier"), - MappingSyncWorker::new( - client.import_notification_stream(), - Duration::new(6, 0), - client.clone(), - backend, - overrides.clone(), - frontier_backend, - 3, - 0, - SyncStrategy::Normal, - sync, - pubsub_notification_sinks, - ) - .for_each(|()| future::ready(())), - ); + // Spawn main mapping sync worker background task. + match frontier_backend { + fc_db::Backend::KeyValue(b) => { + task_manager.spawn_essential_handle().spawn( + "frontier-mapping-sync-worker", + Some("frontier"), + fc_mapping_sync::kv::MappingSyncWorker::new( + client.import_notification_stream(), + Duration::new(6, 0), + client.clone(), + backend, + overrides.clone(), + Arc::new(b), + 3, + 0, + fc_mapping_sync::SyncStrategy::Normal, + sync, + pubsub_notification_sinks, + ) + .for_each(|()| future::ready(())), + ); + } + fc_db::Backend::Sql(b) => { + task_manager.spawn_essential_handle().spawn_blocking( + "frontier-mapping-sync-worker", + Some("frontier"), + fc_mapping_sync::sql::SyncWorker::run( + client.clone(), + backend, + Arc::new(b), + client.import_notification_stream(), + fc_mapping_sync::sql::SyncWorkerConfig { + read_notification_timeout: Duration::from_secs(10), + check_indexed_blocks_interval: Duration::from_secs(60), + }, + fc_mapping_sync::SyncStrategy::Parachain, + sync, + pubsub_notification_sinks, + ), + ); + } + } // Spawn Frontier EthFilterApi maintenance task. if let Some(filter_pool) = filter_pool { diff --git a/template/node/src/rpc/eth.rs b/template/node/src/rpc/eth.rs index ac2a367d3c..4adcb199d9 100644 --- a/template/node/src/rpc/eth.rs +++ b/template/node/src/rpc/eth.rs @@ -17,7 +17,6 @@ use sp_blockchain::{Error as BlockChainError, HeaderBackend, HeaderMetadata}; use sp_core::H256; use sp_runtime::traits::Block as BlockT; // Frontier -use fc_db::Backend as FrontierBackend; pub use fc_rpc::{EthBlockDataCacheTask, EthConfig, OverrideHandle, StorageOverride}; pub use fc_rpc_core::types::{FeeHistoryCache, FeeHistoryCacheLimit, FilterPool}; pub use fc_storage::overrides_handle; @@ -42,7 +41,7 @@ pub struct EthDeps { /// Chain syncing service pub sync: Arc>, /// Frontier Backend. - pub frontier_backend: Arc>, + pub frontier_backend: Arc + Send + Sync>, /// Ethereum data access overrides. pub overrides: Arc>, /// Cache for Ethereum block data. @@ -98,7 +97,7 @@ pub fn create_eth>( >, ) -> Result, Box> where - B: BlockT, + B: BlockT, C: CallApiAt + ProvideRuntimeApi, C::Api: BlockBuilderApi + EthereumRuntimeRPCApi + ConvertTransactionRuntimeApi, C: BlockchainEvents + 'static, diff --git a/template/node/src/service.rs b/template/node/src/service.rs index bba74fd0ea..2faee4b6f0 100644 --- a/template/node/src/service.rs +++ b/template/node/src/service.rs @@ -1,6 +1,6 @@ //! Service and ServiceFactory implementation. Specialized wrapper over substrate service. -use std::{cell::RefCell, sync::Arc, time::Duration}; +use std::{cell::RefCell, path::Path, sync::Arc, time::Duration}; use futures::{channel::mpsc, prelude::*}; // Substrate @@ -23,8 +23,8 @@ use crate::{ cli::Sealing, client::{BaseRuntimeApiCollection, FullBackend, FullClient, RuntimeApiCollection}, eth::{ - new_frontier_partial, spawn_frontier_tasks, FrontierBackend, FrontierBlockImport, - FrontierPartialComponents, + new_frontier_partial, spawn_frontier_tasks, BackendType, EthCompatRuntimeApiCollection, + FrontierBackend, FrontierBlockImport, FrontierPartialComponents, }, }; pub use crate::{ @@ -56,7 +56,8 @@ pub fn new_partial( Option, BoxBlockImport>, GrandpaLinkHalf>, - Arc, + FrontierBackend, + Arc>, ), >, ServiceError, @@ -64,8 +65,8 @@ pub fn new_partial( where RuntimeApi: ConstructRuntimeApi>, RuntimeApi: Send + Sync + 'static, - RuntimeApi::RuntimeApi: - BaseRuntimeApiCollection>, + RuntimeApi::RuntimeApi: BaseRuntimeApiCollection> + + EthCompatRuntimeApiCollection>, Executor: NativeExecutionDispatch + 'static, BIQ: FnOnce( Arc>, @@ -74,7 +75,6 @@ where &TaskManager, Option, GrandpaBlockImport>, - Arc, ) -> Result< ( BasicImportQueue>, @@ -124,11 +124,36 @@ where telemetry.as_ref().map(|x| x.handle()), )?; - let frontier_backend = Arc::new(FrontierBackend::open( - client.clone(), - &config.database, - &db_config_dir(config), - )?); + let overrides = crate::rpc::overrides_handle(client.clone()); + let frontier_backend = match eth_config.frontier_backend_type { + BackendType::KeyValue => FrontierBackend::KeyValue(fc_db::kv::Backend::open( + Arc::clone(&client), + &config.database, + &db_config_dir(config), + )?), + BackendType::Sql => { + let db_path = db_config_dir(config).join("sql"); + std::fs::create_dir_all(&db_path).expect("failed creating sql db directory"); + let backend = futures::executor::block_on(fc_db::sql::Backend::new( + fc_db::sql::BackendConfig::Sqlite(fc_db::sql::SqliteBackendConfig { + path: Path::new("sqlite:///") + .join(db_path) + .join("frontier.db3") + .to_str() + .unwrap(), + create_if_missing: true, + thread_count: eth_config.frontier_sql_backend_thread_count, + cache_size: eth_config.frontier_sql_backend_cache_size, + }), + eth_config.frontier_sql_backend_pool_size, + std::num::NonZeroU32::new(eth_config.frontier_sql_backend_num_ops_timeout), + overrides.clone(), + )) + .unwrap_or_else(|err| panic!("failed creating sql backend: {:?}", err)); + FrontierBackend::Sql(backend) + } + }; + let (import_queue, block_import) = build_import_queue( client.clone(), config, @@ -136,7 +161,6 @@ where &task_manager, telemetry.as_ref().map(|x| x.handle()), grandpa_block_import, - frontier_backend.clone(), )?; let transaction_pool = sc_transaction_pool::BasicPool::new_full( @@ -155,7 +179,13 @@ where select_chain, import_queue, transaction_pool, - other: (telemetry, block_import, grandpa_link, frontier_backend), + other: ( + telemetry, + block_import, + grandpa_link, + frontier_backend, + overrides, + ), }) } @@ -167,7 +197,6 @@ pub fn build_aura_grandpa_import_queue( task_manager: &TaskManager, telemetry: Option, grandpa_block_import: GrandpaBlockImport>, - frontier_backend: Arc, ) -> Result< ( BasicImportQueue>, @@ -182,11 +211,8 @@ where RuntimeApiCollection>, Executor: NativeExecutionDispatch + 'static, { - let frontier_block_import = FrontierBlockImport::new( - grandpa_block_import.clone(), - client.clone(), - frontier_backend, - ); + let frontier_block_import = + FrontierBlockImport::new(grandpa_block_import.clone(), client.clone()); let slot_duration = sc_consensus_aura::slot_duration(&*client)?; let target_gas_price = eth_config.target_gas_price; @@ -227,7 +253,6 @@ pub fn build_manual_seal_import_queue( task_manager: &TaskManager, _telemetry: Option, _grandpa_block_import: GrandpaBlockImport>, - frontier_backend: Arc, ) -> Result< ( BasicImportQueue>, @@ -242,7 +267,7 @@ where RuntimeApiCollection>, Executor: NativeExecutionDispatch + 'static, { - let frontier_block_import = FrontierBlockImport::new(client.clone(), client, frontier_backend); + let frontier_block_import = FrontierBlockImport::new(client.clone(), client); Ok(( sc_consensus_manual_seal::import_queue( Box::new(frontier_block_import.clone()), @@ -254,7 +279,7 @@ where } /// Builds a new service for a full client. -pub fn new_full( +pub async fn new_full( mut config: Configuration, eth_config: EthConfiguration, sealing: Option, @@ -280,7 +305,7 @@ where keystore_container, select_chain, transaction_pool, - other: (mut telemetry, block_import, grandpa_link, frontier_backend), + other: (mut telemetry, block_import, grandpa_link, frontier_backend, overrides), } = new_partial(&config, ð_config, build_import_queue)?; let FrontierPartialComponents { @@ -352,7 +377,6 @@ where // for ethereum-compatibility rpc. config.rpc_id_provider = Some(Box::new(fc_rpc::EthereumSubIdProvider)); - let overrides = crate::rpc::overrides_handle(client.clone()); let eth_rpc_params = crate::rpc::EthDeps { client: client.clone(), pool: transaction_pool.clone(), @@ -362,7 +386,10 @@ where enable_dev_signer: eth_config.enable_dev_signer, network: network.clone(), sync: sync_service.clone(), - frontier_backend: frontier_backend.clone(), + frontier_backend: match frontier_backend.clone() { + fc_db::Backend::KeyValue(b) => Arc::new(b), + fc_db::Backend::Sql(b) => Arc::new(b), + }, overrides: overrides.clone(), block_data_cache: Arc::new(fc_rpc::EthBlockDataCacheTask::new( task_manager.spawn_handle(), @@ -432,7 +459,8 @@ where fee_history_cache_limit, sync_service.clone(), pubsub_notification_sinks, - ); + ) + .await; if role.is_authority() { // manual-seal authorship @@ -646,7 +674,7 @@ where Ok(()) } -pub fn build_full( +pub async fn build_full( config: Configuration, eth_config: EthConfiguration, sealing: Option, @@ -654,6 +682,7 @@ pub fn build_full( new_full::( config, eth_config, sealing, ) + .await } pub fn new_chain_ops( @@ -665,7 +694,7 @@ pub fn new_chain_ops( Arc, BasicQueue>, TaskManager, - Arc, + FrontierBackend, ), ServiceError, > { diff --git a/ts-tests/package.json b/ts-tests/package.json index 431eac1cd6..7b59a7ed00 100644 --- a/ts-tests/package.json +++ b/ts-tests/package.json @@ -7,7 +7,8 @@ "fmt-check": "prettier ./tests --check", "fmt": "prettier ./tests --write", "build": "truffle compile", - "test": "mocha -r ts-node/register 'tests/**/*.ts'" + "test": "mocha -r ts-node/register 'tests/**/*.ts'", + "test-sql": "FRONTIER_BACKEND_TYPE='sql' mocha -r ts-node/register 'tests/**/*.ts'" }, "author": "", "license": "ISC", diff --git a/ts-tests/tests/util.ts b/ts-tests/tests/util.ts index bd5609d30a..34a7f54c93 100644 --- a/ts-tests/tests/util.ts +++ b/ts-tests/tests/util.ts @@ -12,6 +12,7 @@ export const WS_PORT = 19933; export const DISPLAY_LOG = process.env.FRONTIER_LOG || false; export const FRONTIER_LOG = process.env.FRONTIER_LOG || "info"; export const FRONTIER_BUILD = process.env.FRONTIER_BUILD || "release"; +export const FRONTIER_BACKEND_TYPE = process.env.FRONTIER_BACKEND_TYPE || "key-value"; export const BINARY_PATH = `../target/${FRONTIER_BUILD}/${NODE_BINARY_NAME}`; export const SPAWNING_TIME = 60000; @@ -82,6 +83,7 @@ export async function startFrontierNode(provider?: string): Promise<{ `--port=${PORT}`, `--rpc-port=${RPC_PORT}`, `--ws-port=${WS_PORT}`, + `--frontier-backend-type=${FRONTIER_BACKEND_TYPE}`, `--tmp`, ]; const binary = spawn(cmd, args); From 5d8abdbd9902ddb5c582d560bc311f88b4f4197c Mon Sep 17 00:00:00 2001 From: Qinxuan Chen Date: Tue, 6 Jun 2023 15:24:27 +0800 Subject: [PATCH 3/7] chore(deps): use specific version of sqlx instead of git branch (#1071) --- Cargo.lock | 174 ++++++++++++++++++++++++--------- Cargo.toml | 1 + client/db/Cargo.toml | 2 +- client/mapping-sync/Cargo.toml | 2 +- 4 files changed, 132 insertions(+), 47 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 510444e5aa..9dc7aeb3f4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -457,7 +457,7 @@ dependencies = [ "slab", "socket2", "waker-fn", - "windows-sys", + "windows-sys 0.42.0", ] [[package]] @@ -1698,15 +1698,6 @@ dependencies = [ "dirs-sys-next", ] -[[package]] -name = "dirs" -version = "4.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca3aa72a6f96ea37bbc5aa912f6788242832f75369bdfdadcb0e38423f100059" -dependencies = [ - "dirs-sys", -] - [[package]] name = "dirs-sys" version = "0.3.7" @@ -1923,6 +1914,17 @@ dependencies = [ "libc", ] +[[package]] +name = "etcetera" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "136d1b5283a1ab77bd9257427ffd09d8667ced0570b6f938942bc7568ed5b943" +dependencies = [ + "cfg-if", + "home", + "windows-sys 0.48.0", +] + [[package]] name = "ethbloom" version = "0.13.0" @@ -2308,7 +2310,7 @@ dependencies = [ "cfg-if", "libc", "redox_syscall", - "windows-sys", + "windows-sys 0.42.0", ] [[package]] @@ -3323,6 +3325,15 @@ dependencies = [ "hmac 0.8.1", ] +[[package]] +name = "home" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5444c27eef6923071f7ebcc33e3444508466a76f7a2b93da00ed6e19f30c1ddb" +dependencies = [ + "windows-sys 0.48.0", +] + [[package]] name = "hostname" version = "0.3.1" @@ -3598,7 +3609,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e7d6c6f8c91b4b9ed43484ad1a938e393caf35960fce7f82a040497207bd8e9e" dependencies = [ "libc", - "windows-sys", + "windows-sys 0.42.0", ] [[package]] @@ -3634,7 +3645,7 @@ dependencies = [ "hermit-abi 0.2.6", "io-lifetimes 1.0.4", "rustix 0.36.7", - "windows-sys", + "windows-sys 0.42.0", ] [[package]] @@ -4320,9 +4331,9 @@ dependencies = [ [[package]] name = "libsqlite3-sys" -version = "0.25.2" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29f835d03d717946d28b1d1ed632eb6f0e24a299388ee623d0c23118d3e8a7fa" +checksum = "afc22eff61b133b115c6e8c74e818c628d6d5e7a502afea6f64dee076dd94326" dependencies = [ "cc", "pkg-config", @@ -4589,7 +4600,7 @@ dependencies = [ "libc", "log", "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys", + "windows-sys 0.42.0", ] [[package]] @@ -5724,7 +5735,7 @@ dependencies = [ "libc", "redox_syscall", "smallvec", - "windows-sys", + "windows-sys 0.42.0", ] [[package]] @@ -5918,7 +5929,7 @@ dependencies = [ "libc", "log", "wepoll-ffi", - "windows-sys", + "windows-sys 0.42.0", ] [[package]] @@ -6659,7 +6670,7 @@ dependencies = [ "io-lifetimes 0.7.5", "libc", "linux-raw-sys 0.0.46", - "windows-sys", + "windows-sys 0.42.0", ] [[package]] @@ -6673,7 +6684,7 @@ dependencies = [ "io-lifetimes 1.0.4", "libc", "linux-raw-sys 0.1.4", - "windows-sys", + "windows-sys 0.42.0", ] [[package]] @@ -7827,7 +7838,7 @@ version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "713cfb06c7059f3588fb8044c0fad1d09e3c01d225e25b9220dbfdcf16dbb1b3" dependencies = [ - "windows-sys", + "windows-sys 0.42.0", ] [[package]] @@ -8942,8 +8953,9 @@ dependencies = [ [[package]] name = "sqlx" -version = "0.7.0-alpha.2" -source = "git+https://github.com/launchbadge/sqlx?branch=main#4f1ac1d6060ee73edf83c8365fafb12df44deecc" +version = "0.7.0-alpha.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "afd8985c8822235a9ebeedf0bff971462470162759663d3184593c807ab6e898" dependencies = [ "sqlx-core", "sqlx-macros", @@ -8954,8 +8966,9 @@ dependencies = [ [[package]] name = "sqlx-core" -version = "0.7.0-alpha.2" -source = "git+https://github.com/launchbadge/sqlx?branch=main#4f1ac1d6060ee73edf83c8365fafb12df44deecc" +version = "0.7.0-alpha.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c12403de02d88e6808de30eb2153c6997d39cc9511a446b510d5944a3ea6727" dependencies = [ "ahash 0.7.6", "atoi", @@ -8995,8 +9008,9 @@ dependencies = [ [[package]] name = "sqlx-macros" -version = "0.7.0-alpha.2" -source = "git+https://github.com/launchbadge/sqlx?branch=main#4f1ac1d6060ee73edf83c8365fafb12df44deecc" +version = "0.7.0-alpha.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2be74801a0852ace9d86bc8cc8ac36241e7dc712fea26b8f32bd80ce29c98a10" dependencies = [ "proc-macro2", "quote", @@ -9007,8 +9021,9 @@ dependencies = [ [[package]] name = "sqlx-macros-core" -version = "0.7.0-alpha.2" -source = "git+https://github.com/launchbadge/sqlx?branch=main#4f1ac1d6060ee73edf83c8365fafb12df44deecc" +version = "0.7.0-alpha.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ce71dd8afc7ad2aeff001bb6affa7128c9087bbdcab07fa97a7952e8ee3d1da" dependencies = [ "dotenvy", "either", @@ -9031,8 +9046,9 @@ dependencies = [ [[package]] name = "sqlx-mysql" -version = "0.7.0-alpha.2" -source = "git+https://github.com/launchbadge/sqlx?branch=main#4f1ac1d6060ee73edf83c8365fafb12df44deecc" +version = "0.7.0-alpha.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9c430536df19e8b5b048a9ae19b266aba77f9f3e2255b7195f465d678cb2d0a" dependencies = [ "atoi", "base64 0.21.0", @@ -9041,7 +9057,6 @@ dependencies = [ "bytes", "crc", "digest 0.10.6", - "dirs", "dotenvy", "either", "futures-channel", @@ -9073,16 +9088,17 @@ dependencies = [ [[package]] name = "sqlx-postgres" -version = "0.7.0-alpha.2" -source = "git+https://github.com/launchbadge/sqlx?branch=main#4f1ac1d6060ee73edf83c8365fafb12df44deecc" +version = "0.7.0-alpha.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "210e0a1523b6d46ca73db1c5197a233a8e14787596910ce88ff5d47a00da0241" dependencies = [ "atoi", "base64 0.21.0", "bitflags", "byteorder", "crc", - "dirs", "dotenvy", + "etcetera", "futures-channel", "futures-core", "futures-io", @@ -9090,6 +9106,7 @@ dependencies = [ "hex", "hkdf", "hmac 0.12.1", + "home", "itoa", "log", "md-5", @@ -9110,8 +9127,9 @@ dependencies = [ [[package]] name = "sqlx-sqlite" -version = "0.7.0-alpha.2" -source = "git+https://github.com/launchbadge/sqlx?branch=main#4f1ac1d6060ee73edf83c8365fafb12df44deecc" +version = "0.7.0-alpha.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f446c04b2d2d06b49b905e33c877b282e0f70b1b60a22513eacee8bf56d8afbe" dependencies = [ "atoi", "flume", @@ -9675,7 +9693,7 @@ dependencies = [ "signal-hook-registry", "socket2", "tokio-macros", - "windows-sys", + "windows-sys 0.42.0", ] [[package]] @@ -10381,7 +10399,7 @@ dependencies = [ "wasmtime-environ", "wasmtime-jit", "wasmtime-runtime", - "windows-sys", + "windows-sys 0.42.0", ] [[package]] @@ -10409,7 +10427,7 @@ dependencies = [ "serde", "sha2 0.10.6", "toml", - "windows-sys", + "windows-sys 0.42.0", "zstd", ] @@ -10474,7 +10492,7 @@ dependencies = [ "wasmtime-jit-debug", "wasmtime-jit-icache-coherence", "wasmtime-runtime", - "windows-sys", + "windows-sys 0.42.0", ] [[package]] @@ -10496,7 +10514,7 @@ checksum = "67d412e9340ab1c83867051d8d1d7c90aa8c9afc91da086088068e2734e25064" dependencies = [ "cfg-if", "libc", - "windows-sys", + "windows-sys 0.42.0", ] [[package]] @@ -10520,7 +10538,7 @@ dependencies = [ "wasmtime-asm-macros", "wasmtime-environ", "wasmtime-jit-debug", - "windows-sys", + "windows-sys 0.42.0", ] [[package]] @@ -10878,21 +10896,51 @@ version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" dependencies = [ - "windows_aarch64_gnullvm", + "windows_aarch64_gnullvm 0.42.1", "windows_aarch64_msvc 0.42.1", "windows_i686_gnu 0.42.1", "windows_i686_msvc 0.42.1", "windows_x86_64_gnu 0.42.1", - "windows_x86_64_gnullvm", + "windows_x86_64_gnullvm 0.42.1", "windows_x86_64_msvc 0.42.1", ] +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets", +] + +[[package]] +name = "windows-targets" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b1eb6f0cd7c80c79759c929114ef071b87354ce476d9d94271031c0497adfd5" +dependencies = [ + "windows_aarch64_gnullvm 0.48.0", + "windows_aarch64_msvc 0.48.0", + "windows_i686_gnu 0.48.0", + "windows_i686_msvc 0.48.0", + "windows_x86_64_gnu 0.48.0", + "windows_x86_64_gnullvm 0.48.0", + "windows_x86_64_msvc 0.48.0", +] + [[package]] name = "windows_aarch64_gnullvm" version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c9864e83243fdec7fc9c5444389dcbbfd258f745e7853198f365e3c4968a608" +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" + [[package]] name = "windows_aarch64_msvc" version = "0.34.0" @@ -10905,6 +10953,12 @@ version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c8b1b673ffc16c47a9ff48570a9d85e25d265735c503681332589af6253c6c7" +[[package]] +name = "windows_aarch64_msvc" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" + [[package]] name = "windows_i686_gnu" version = "0.34.0" @@ -10917,6 +10971,12 @@ version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "de3887528ad530ba7bdbb1faa8275ec7a1155a45ffa57c37993960277145d640" +[[package]] +name = "windows_i686_gnu" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" + [[package]] name = "windows_i686_msvc" version = "0.34.0" @@ -10929,6 +10989,12 @@ version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bf4d1122317eddd6ff351aa852118a2418ad4214e6613a50e0191f7004372605" +[[package]] +name = "windows_i686_msvc" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" + [[package]] name = "windows_x86_64_gnu" version = "0.34.0" @@ -10941,12 +11007,24 @@ version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c1040f221285e17ebccbc2591ffdc2d44ee1f9186324dd3e84e99ac68d699c45" +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" + [[package]] name = "windows_x86_64_gnullvm" version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "628bfdf232daa22b0d64fdb62b09fcc36bb01f05a3939e20ab73aaf9470d0463" +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" + [[package]] name = "windows_x86_64_msvc" version = "0.34.0" @@ -10959,6 +11037,12 @@ version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "447660ad36a13288b1db4d4248e857b510e8c3a225c822ba4fb748c0aafecffd" +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" + [[package]] name = "winreg" version = "0.10.1" diff --git a/Cargo.toml b/Cargo.toml index 6767780a4b..33c27f6102 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -56,6 +56,7 @@ scale-codec = { package = "parity-scale-codec", version = "3.2.1", default-featu scale-info = { version = "2.3.1", default-features = false, features = ["derive"] } serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" +sqlx = "0.7.0-alpha.3" # Substrate Client sc-basic-authorship = { version = "0.10.0-dev", git = "https://github.com/paritytech/substrate", branch = "master" } sc-block-builder = { version = "0.10.0-dev", git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/client/db/Cargo.toml b/client/db/Cargo.toml index c21d87fd94..c834b39e8c 100644 --- a/client/db/Cargo.toml +++ b/client/db/Cargo.toml @@ -17,7 +17,7 @@ futures = "0.3.25" log = "0.4.17" parking_lot = "0.12.1" smallvec = "1.10" -sqlx = { features = ["runtime-tokio-native-tls", "sqlite"], git = "https://github.com/launchbadge/sqlx", branch = "main" } +sqlx = { workspace = true, features = ["runtime-tokio-native-tls", "sqlite"] } tokio = { version = "1.19", features = ["macros", "sync"] } # Parity diff --git a/client/mapping-sync/Cargo.toml b/client/mapping-sync/Cargo.toml index d9af3c7f16..86f1dd3dab 100644 --- a/client/mapping-sync/Cargo.toml +++ b/client/mapping-sync/Cargo.toml @@ -34,7 +34,7 @@ fp-rpc = { workspace = true, features = ["default"] } [dev-dependencies] ethereum = { workspace = true } scale-codec = { package = "parity-scale-codec", workspace = true } -sqlx = { features = ["runtime-tokio-native-tls", "sqlite"], git = "https://github.com/launchbadge/sqlx", branch = "main" } +sqlx = { workspace = true, features = ["runtime-tokio-native-tls", "sqlite"] } tempfile = "3.3.0" tokio = { version = "1.24", features = ["sync"] } # Frontier From e87817021250d1e28b4e73b2b67d0e85c9079421 Mon Sep 17 00:00:00 2001 From: tgmichel Date: Wed, 7 Jun 2023 16:26:55 +0200 Subject: [PATCH 4/7] Update evm config to shanghai (#1068) --- docs/frame/evm.md | 2 +- frame/ethereum/src/tests/eip1559.rs | 2 +- frame/ethereum/src/tests/eip2930.rs | 2 +- frame/ethereum/src/tests/legacy.rs | 2 +- frame/evm/src/lib.rs | 4 ++-- primitives/evm/src/validation.rs | 4 ++-- ts-tests/tests/test-execute.ts | 4 ++-- ts-tests/tests/test-fee-history.ts | 2 +- ts-tests/tests/test-gas.ts | 8 ++++---- 9 files changed, 15 insertions(+), 15 deletions(-) diff --git a/docs/frame/evm.md b/docs/frame/evm.md index fc87908a29..0133e387c3 100644 --- a/docs/frame/evm.md +++ b/docs/frame/evm.md @@ -42,4 +42,4 @@ any Ethereum transaction into a transaction compatible with this module. The gas configurations are configurable. Right now, a pre-defined -London hard fork configuration option is provided. +Shanghai hard fork configuration option is provided. diff --git a/frame/ethereum/src/tests/eip1559.rs b/frame/ethereum/src/tests/eip1559.rs index 7f99cbf5a9..8a55e33033 100644 --- a/frame/ethereum/src/tests/eip1559.rs +++ b/frame/ethereum/src/tests/eip1559.rs @@ -327,7 +327,7 @@ fn transaction_should_generate_correct_gas_used() { let (pairs, mut ext) = new_test_ext(1); let alice = &pairs[0]; - let expected_gas = U256::from(893928); + let expected_gas = U256::from(894198); ext.execute_with(|| { let t = eip1559_erc20_creation_transaction(alice); diff --git a/frame/ethereum/src/tests/eip2930.rs b/frame/ethereum/src/tests/eip2930.rs index 644052cd97..56543f62dc 100644 --- a/frame/ethereum/src/tests/eip2930.rs +++ b/frame/ethereum/src/tests/eip2930.rs @@ -259,7 +259,7 @@ fn transaction_should_generate_correct_gas_used() { let (pairs, mut ext) = new_test_ext(1); let alice = &pairs[0]; - let expected_gas = U256::from(893928); + let expected_gas = U256::from(894198); ext.execute_with(|| { let t = eip2930_erc20_creation_transaction(alice); diff --git a/frame/ethereum/src/tests/legacy.rs b/frame/ethereum/src/tests/legacy.rs index 648f418012..777ff1bb88 100644 --- a/frame/ethereum/src/tests/legacy.rs +++ b/frame/ethereum/src/tests/legacy.rs @@ -259,7 +259,7 @@ fn transaction_should_generate_correct_gas_used() { let (pairs, mut ext) = new_test_ext(1); let alice = &pairs[0]; - let expected_gas = U256::from(893928); + let expected_gas = U256::from(894198); ext.execute_with(|| { let t = legacy_erc20_creation_transaction(alice); diff --git a/frame/evm/src/lib.rs b/frame/evm/src/lib.rs index 2100a29ce7..ca84889b1e 100644 --- a/frame/evm/src/lib.rs +++ b/frame/evm/src/lib.rs @@ -167,7 +167,7 @@ pub mod pallet { /// EVM config used in the module. fn config() -> &'static EvmConfig { - &LONDON_CONFIG + &SHANGHAI_CONFIG } } @@ -714,7 +714,7 @@ impl GasWeightMapping for FixedGasWeightMapping { } } -static LONDON_CONFIG: EvmConfig = EvmConfig::london(); +static SHANGHAI_CONFIG: EvmConfig = EvmConfig::shanghai(); impl Pallet { /// Check whether an account is empty. diff --git a/primitives/evm/src/validation.rs b/primitives/evm/src/validation.rs index 046d60be5b..3627f1d64d 100644 --- a/primitives/evm/src/validation.rs +++ b/primitives/evm/src/validation.rs @@ -227,7 +227,7 @@ mod tests { InvalidChainId, } - static LONDON_CONFIG: evm::Config = evm::Config::london(); + static SHANGHAI_CONFIG: evm::Config = evm::Config::shanghai(); impl From for TestError { fn from(e: InvalidEvmTransactionError) -> Self { @@ -293,7 +293,7 @@ mod tests { } = input; CheckEvmTransaction::::new( CheckEvmTransactionConfig { - evm_config: &LONDON_CONFIG, + evm_config: &SHANGHAI_CONFIG, block_gas_limit: blockchain_gas_limit, base_fee: blockchain_base_fee, chain_id: blockchain_chain_id, diff --git a/ts-tests/tests/test-execute.ts b/ts-tests/tests/test-execute.ts index ce0700e214..43bf103d41 100644 --- a/ts-tests/tests/test-execute.ts +++ b/ts-tests/tests/test-execute.ts @@ -158,7 +158,7 @@ describeWithFrontier("Frontier RPC (RPC execution)", (context) => { }, ]); - expect(result.result).to.be.equal("0x3043a"); + expect(result.result).to.be.equal("0x30464"); }); step("should estimateGas with gas limit up to 10x block gas limit", async function () { @@ -170,7 +170,7 @@ describeWithFrontier("Frontier RPC (RPC execution)", (context) => { }, ]); - expect(result.result).to.be.equal("0x3043a"); + expect(result.result).to.be.equal("0x30464"); }); step("shouldn't estimateGas with gas limit up higher than 10x block gas limit", async function () { diff --git a/ts-tests/tests/test-fee-history.ts b/ts-tests/tests/test-fee-history.ts index 2677d39ab7..e75edf9f97 100644 --- a/ts-tests/tests/test-fee-history.ts +++ b/ts-tests/tests/test-fee-history.ts @@ -74,7 +74,7 @@ describeWithFrontier("Frontier RPC (Fee History)", (context) => { // baseFeePerGas is always the requested block range + 1 (the next derived base fee). expect(result.baseFeePerGas.length).to.be.eq(blockCount + 1); // gasUsedRatio for the requested block range. - expect(result.gasUsedRatio).to.be.deep.eq(Array(blockCount).fill(0.03575712)); + expect(result.gasUsedRatio).to.be.deep.eq(Array(blockCount).fill(0.03576792)); // two-dimensional reward list for the requested block range. expect(result.reward.length).to.be.eq(blockCount); // each block has a reward list which's size is the requested percentile list. diff --git a/ts-tests/tests/test-gas.ts b/ts-tests/tests/test-gas.ts index 7d98982581..c3a6481b4f 100644 --- a/ts-tests/tests/test-gas.ts +++ b/ts-tests/tests/test-gas.ts @@ -42,7 +42,7 @@ describeWithFrontier("Frontier RPC (Gas)", (context) => { it("eth_estimateGas for contract creation", async function () { // The value returned as an estimation by the evm with estimate mode ON. - let oneOffEstimation = 196657; + let oneOffEstimation = 196701; let binarySearchEstimation = binarySearch(oneOffEstimation); // Sanity check expect a variance of 10%. expect(estimationVariance(binarySearchEstimation, oneOffEstimation)).to.be.lessThan(1); @@ -105,7 +105,7 @@ describeWithFrontier("Frontier RPC (Gas)", (context) => { it("eth_estimateGas should handle AccessList alias", async function () { // The value returned as an estimation by the evm with estimate mode ON. // 4300 == 1900 for one key and 2400 for one storage. - let oneOffEstimation = 196657 + 4300; + let oneOffEstimation = 196701 + 4300; let binarySearchEstimation = binarySearch(oneOffEstimation); // Sanity check expect a variance of 10%. expect(estimationVariance(binarySearchEstimation, oneOffEstimation)).to.be.lessThan(1); @@ -132,12 +132,12 @@ describeWithFrontier("Frontier RPC (Gas)", (context) => { data: Test.bytecode, gasPrice: "0x0", }); - expect(result).to.equal(197690); + expect(result).to.equal(197732); result = await context.web3.eth.estimateGas({ from: GENESIS_ACCOUNT, data: Test.bytecode, }); - expect(result).to.equal(197690); + expect(result).to.equal(197732); }); it("tx gas limit below ETH_BLOCK_GAS_LIMIT", async function () { From 77f6966bb150c7046efdedba5dcc766bd1e0ce32 Mon Sep 17 00:00:00 2001 From: Abhishek Shah Date: Wed, 7 Jun 2023 19:58:24 +0530 Subject: [PATCH 5/7] pass signers to Eth constructor (#1064) --- template/node/src/rpc/eth.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/template/node/src/rpc/eth.rs b/template/node/src/rpc/eth.rs index 4adcb199d9..67db9a8161 100644 --- a/template/node/src/rpc/eth.rs +++ b/template/node/src/rpc/eth.rs @@ -144,7 +144,7 @@ where graph, converter, sync.clone(), - vec![], + signers, overrides.clone(), frontier_backend.clone(), is_authority, From 5a7ac989336e70e33356a629839923964a45092e Mon Sep 17 00:00:00 2001 From: Nadimpalli Susruth Date: Wed, 7 Jun 2023 19:58:51 +0530 Subject: [PATCH 6/7] Fixed Dockerfile (#1063) --- template/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/template/Dockerfile b/template/Dockerfile index 82ccc53378..12d0d987c7 100644 --- a/template/Dockerfile +++ b/template/Dockerfile @@ -9,7 +9,7 @@ WORKDIR /frontier # Upcd dates core parts RUN apt-get update -y && \ - apt-get install -y cmake pkg-config libssl-dev git gcc build-essential clang libclang-dev + apt-get install -y cmake pkg-config libssl-dev git gcc build-essential clang libclang-dev protobuf-compiler # Install rust wasm. Needed for substrate wasm engine RUN rustup target add wasm32-unknown-unknown From fba84577bf6507d76d40af891d2db38f4e385c11 Mon Sep 17 00:00:00 2001 From: Qinxuan Chen Date: Wed, 7 Jun 2023 22:35:42 +0800 Subject: [PATCH 7/7] chore: adjust some imports and log of frontier-sql (#1072) --- client/db/Cargo.toml | 16 ++- client/db/src/kv/mod.rs | 3 +- client/db/src/kv/utils.rs | 1 + client/db/src/sql/mod.rs | 170 +++++++++---------------- client/mapping-sync/Cargo.toml | 13 +- client/mapping-sync/src/kv/mod.rs | 3 +- client/mapping-sync/src/kv/worker.rs | 3 +- client/mapping-sync/src/lib.rs | 1 + client/mapping-sync/src/sql/mod.rs | 177 +++++++++------------------ 9 files changed, 133 insertions(+), 254 deletions(-) diff --git a/client/db/Cargo.toml b/client/db/Cargo.toml index c834b39e8c..14d62a9ab1 100644 --- a/client/db/Cargo.toml +++ b/client/db/Cargo.toml @@ -14,17 +14,14 @@ targets = ["x86_64-unknown-linux-gnu"] async-trait = "0.1" ethereum = { workspace = true, features = ["with-codec"] } futures = "0.3.25" +kvdb-rocksdb = { workspace = true, optional = true } log = "0.4.17" +parity-db = { workspace = true, optional = true } parking_lot = "0.12.1" +scale-codec = { package = "parity-scale-codec", workspace = true } smallvec = "1.10" sqlx = { workspace = true, features = ["runtime-tokio-native-tls", "sqlite"] } tokio = { version = "1.19", features = ["macros", "sync"] } - -# Parity -kvdb-rocksdb = { workspace = true, optional = true } -parity-db = { workspace = true, optional = true } -scale-codec = { package = "parity-scale-codec", workspace = true } - # Substrate sc-client-api = { workspace = true } sc-client-db = { workspace = true, features = ["rocksdb"] } @@ -34,7 +31,6 @@ sp-core = { workspace = true } sp-database = { workspace = true } sp-runtime = { workspace = true } sp-storage = { workspace = true } - # Frontier fc-storage = { workspace = true } fp-consensus = { workspace = true, features = ["default"] } @@ -45,9 +41,11 @@ fp-storage = { workspace = true, features = ["default"] } default = ["kvdb-rocksdb", "parity-db"] [dev-dependencies] -fc-rpc = { workspace = true } maplit = "1.0.2" +tempfile = "3.3.0" +# Substrate sc-block-builder = { workspace = true } sp-consensus = { workspace = true } substrate-test-runtime-client = { workspace = true } -tempfile = "3.3.0" +# Frontier +fc-rpc = { workspace = true } diff --git a/client/db/src/kv/mod.rs b/client/db/src/kv/mod.rs index 4fda22a4f6..74d3c10ed3 100644 --- a/client/db/src/kv/mod.rs +++ b/client/db/src/kv/mod.rs @@ -36,9 +36,10 @@ use sp_core::H256; pub use sp_database::Database; use sp_runtime::traits::Block as BlockT; // Frontier -use crate::TransactionMetadata; use fp_storage::{EthereumStorageSchema, PALLET_ETHEREUM_SCHEMA_CACHE}; +use crate::TransactionMetadata; + const DB_HASH_LEN: usize = 32; /// Hash type that this backend uses for the database. pub type DbHash = [u8; DB_HASH_LEN]; diff --git a/client/db/src/kv/utils.rs b/client/db/src/kv/utils.rs index 18fc760e5c..a6c4da2dfb 100644 --- a/client/db/src/kv/utils.rs +++ b/client/db/src/kv/utils.rs @@ -18,6 +18,7 @@ use std::{path::Path, sync::Arc}; +// Substrate use sp_blockchain::HeaderBackend; use sp_runtime::traits::Block as BlockT; diff --git a/client/db/src/sql/mod.rs b/client/db/src/sql/mod.rs index 3057cabe31..c81df93fcf 100644 --- a/client/db/src/sql/mod.rs +++ b/client/db/src/sql/mod.rs @@ -16,20 +16,10 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use fc_storage::OverrideHandle; -use fp_consensus::{FindLogError, Hashes, Log as ConsensusLog, PostLog, PreLog}; -use fp_rpc::EthereumRuntimeRPCApi; -use fp_storage::{EthereumStorageSchema, PALLET_ETHEREUM_SCHEMA}; +use std::{cmp::Ordering, collections::HashSet, num::NonZeroU32, str::FromStr, sync::Arc}; + use futures::TryStreamExt; -use sc_client_api::backend::{Backend as BackendT, StateBackend, StorageProvider}; use scale_codec::{Decode, Encode}; -use sp_api::{ApiExt, ProvideRuntimeApi}; -use sp_blockchain::HeaderBackend; -use sp_core::{H160, H256}; -use sp_runtime::{ - generic::BlockId, - traits::{BlakeTwo256, Block as BlockT, Header as HeaderT, UniqueSaturatedInto, Zero}, -}; use sqlx::{ query::Query, sqlite::{ @@ -37,11 +27,22 @@ use sqlx::{ }, ConnectOptions, Error, Execute, QueryBuilder, Row, Sqlite, }; -use std::num::NonZeroU32; - -use std::{cmp::Ordering, collections::HashSet, str::FromStr, sync::Arc}; +// Substrate +use sc_client_api::backend::{Backend as BackendT, StateBackend, StorageProvider}; +use sp_api::{ApiExt, ProvideRuntimeApi}; +use sp_blockchain::HeaderBackend; +use sp_core::{H160, H256}; +use sp_runtime::{ + generic::BlockId, + traits::{BlakeTwo256, Block as BlockT, Header as HeaderT, UniqueSaturatedInto, Zero}, +}; +// Frontier +use fc_storage::OverrideHandle; +use fp_consensus::{FindLogError, Hashes, Log as ConsensusLog, PostLog, PreLog}; +use fp_rpc::EthereumRuntimeRPCApi; +use fp_storage::{EthereumStorageSchema, PALLET_ETHEREUM_SCHEMA}; -use crate::FilteredLog; +use crate::{BackendReader, FilteredLog}; /// Maximum number to topics allowed to be filtered upon const MAX_TOPIC_COUNT: u16 = 4; @@ -135,11 +136,7 @@ where fn connect_options(config: &BackendConfig) -> Result { match config { BackendConfig::Sqlite(config) => { - log::info!( - target: "frontier-sql", - "📑 Connection configuration: {:?}", - config, - ); + log::info!(target: "frontier-sql", "📑 Connection configuration: {config:?}"); let config = sqlx::sqlite::SqliteConnectOptions::from_str(config.path)? .create_if_missing(config.create_if_missing) // https://www.sqlite.org/pragma.html#pragma_busy_timeout @@ -225,12 +222,7 @@ where ) .expect("runtime api reachable"); - log::debug!( - target: "frontier-sql", - "Index genesis block, has_api={}, hash={:?}", - has_api, - substrate_genesis_hash, - ); + log::debug!(target: "frontier-sql", "Index genesis block, has_api={has_api}, hash={substrate_genesis_hash:?}"); if has_api { // The chain has frontier support from genesis. @@ -282,11 +274,7 @@ where BE: BackendT + 'static, BE::State: StateBackend, { - log::trace!( - target: "frontier-sql", - "🛠️ [Metadata] Retrieving digest data for block {:?}", - hash, - ); + log::trace!(target: "frontier-sql", "🛠️ [Metadata] Retrieving digest data for block {hash:?}"); if let Ok(Some(header)) = client.header(hash) { match fp_consensus::find_log(header.digest()) { Ok(log) => { @@ -326,32 +314,24 @@ where let header_number = *header.number(); let block_number = UniqueSaturatedInto::::unique_saturated_into(header_number) as i32; - let is_canon = - match client.hash(header_number) { - Ok(Some(inner_hash)) => (inner_hash == hash) as i32, - Ok(None) => { - log::debug!( - target: "frontier-sql", - "[Metadata] Missing header for block #{} ({:?})", - block_number, hash, - ); - 0 - } - Err(err) => { - log::debug!( - "[Metadata] Failed to retrieve header for block #{} ({:?}): {:?}", - block_number, hash, err, + let is_canon = match client.hash(header_number) { + Ok(Some(inner_hash)) => (inner_hash == hash) as i32, + Ok(None) => { + log::debug!(target: "frontier-sql", "[Metadata] Missing header for block #{block_number} ({hash:?})"); + 0 + } + Err(err) => { + log::debug!( + target: "frontier-sql", + "[Metadata] Failed to retrieve header for block #{block_number} ({hash:?}): {err:?}", ); - 0 - } - }; + 0 + } + }; log::trace!( target: "frontier-sql", - "[Metadata] Prepared block metadata for #{} ({:?}) canon={}", - block_number, - hash, - is_canon, + "[Metadata] Prepared block metadata for #{block_number} ({hash:?}) canon={is_canon}", ); Ok(BlockMetadata { substrate_block_hash: hash, @@ -435,10 +415,7 @@ where let ethereum_transaction_index = i as i32; log::trace!( target: "frontier-sql", - "[Metadata] Inserting TX for block #{} - {:?} index {}", - block_number, - transaction_hash, - ethereum_transaction_index, + "[Metadata] Inserting TX for block #{block_number} - {transaction_hash:?} index {ethereum_transaction_index}", ); let _ = sqlx::query( "INSERT OR IGNORE INTO transactions( @@ -461,10 +438,7 @@ where .execute(&mut *tx) .await?; - log::debug!( - target: "frontier-sql", - "[Metadata] Ready to commit", - ); + log::debug!(target: "frontier-sql", "[Metadata] Ready to commit"); tx.commit().await } @@ -539,18 +513,11 @@ where } .await .map_err(|e| { - log::error!( - target: "frontier-sql", - "{}", - e - ) + log::error!(target: "frontier-sql", "{e}"); }); // https://www.sqlite.org/pragma.html#pragma_optimize let _ = sqlx::query("PRAGMA optimize").execute(&pool).await; - log::debug!( - target: "frontier-sql", - "Batch commited" - ); + log::debug!(target: "frontier-sql", "Batch committed"); } fn get_logs( @@ -600,9 +567,7 @@ where } log::debug!( target: "frontier-sql", - "Ready to commit {} logs from {} transactions", - log_count, - transaction_count + "Ready to commit {log_count} logs from {transaction_count} transactions" ); logs } @@ -692,11 +657,7 @@ where } } Err(err) => { - log::debug!( - target: "frontier-sql", - "Failed retrieving missing block {:?}", - err - ); + log::debug!(target: "frontier-sql", "Failed retrieving missing block {err:?}"); } } @@ -725,11 +686,7 @@ where } } Err(err) => { - log::debug!( - target: "frontier-sql", - "Failed retrieving missing block {:?}", - err - ); + log::debug!(target: "frontier-sql", "Failed retrieving missing block {err:?}"); } } @@ -847,7 +804,7 @@ where } #[async_trait::async_trait] -impl> crate::BackendReader for Backend { +impl> BackendReader for Backend { async fn block_hash( &self, ethereum_block_hash: &H256, @@ -868,6 +825,7 @@ impl> crate::BackendReader for Backend }); Ok(res) } + async fn transaction_metadata( &self, ethereum_transaction_hash: &H256, @@ -943,19 +901,10 @@ impl> crate::BackendReader for Backend .await .map_err(|err| format!("{:?}", err))? .set_progress_handler(self.num_ops_timeout, move || { - log::debug!( - target: "frontier-sql", - "Sqlite progress_handler triggered for {}", - log_key2, - ); + log::debug!(target: "frontier-sql", "Sqlite progress_handler triggered for {log_key2}"); false }); - log::debug!( - target: "frontier-sql", - "Query: {:?} - {}", - sql, - log_key, - ); + log::debug!(target: "frontier-sql", "Query: {sql:?} - {log_key}"); let mut out: Vec = vec![]; let mut rows = query.fetch(&mut *conn); @@ -1000,20 +949,11 @@ impl> crate::BackendReader for Backend .remove_progress_handler(); if let Some(err) = maybe_err { - log::error!( - target: "frontier-sql", - "Failed to query sql db: {:?} - {}", - err, - log_key, - ); + log::error!(target: "frontier-sql", "Failed to query sql db: {err:?} - {log_key}"); return Err("Failed to query sql db with statement".to_string()); } - log::info!( - target: "frontier-sql", - "FILTER remove handler - {}", - log_key, - ); + log::info!(target: "frontier-sql", "FILTER remove handler - {log_key}"); Ok(out) } @@ -1095,24 +1035,26 @@ LIMIT 10001", #[cfg(test)] mod test { - use super::FilteredLog; + use super::*; + + use std::{collections::BTreeMap, path::Path}; - use crate::BackendReader; - use fc_rpc::{OverrideHandle, SchemaV3Override, StorageOverride}; - use fp_storage::{EthereumStorageSchema, PALLET_ETHEREUM_SCHEMA}; use maplit::hashset; use scale_codec::Encode; + use sqlx::{sqlite::SqliteRow, QueryBuilder, Row, SqlitePool}; + use tempfile::tempdir; + // Substrate use sp_core::{H160, H256}; use sp_runtime::{ generic::{Block, Header}, traits::BlakeTwo256, }; - use sqlx::{sqlite::SqliteRow, QueryBuilder, Row, SqlitePool}; - use std::{collections::BTreeMap, path::Path, sync::Arc}; use substrate_test_runtime_client::{ DefaultTestClientBuilderExt, TestClientBuilder, TestClientBuilderExt, }; - use tempfile::tempdir; + // Frontier + use fc_rpc::{OverrideHandle, SchemaV3Override, StorageOverride}; + use fp_storage::{EthereumStorageSchema, PALLET_ETHEREUM_SCHEMA}; type OpaqueBlock = Block, substrate_test_runtime_client::runtime::Extrinsic>; diff --git a/client/mapping-sync/Cargo.toml b/client/mapping-sync/Cargo.toml index 86f1dd3dab..f3d6ca061b 100644 --- a/client/mapping-sync/Cargo.toml +++ b/client/mapping-sync/Cargo.toml @@ -24,7 +24,6 @@ sp-blockchain = { workspace = true } sp-consensus = { workspace = true, features = ["default"] } sp-core = { workspace = true } sp-runtime = { workspace = true } - # Frontier fc-db = { workspace = true } fc-storage = { workspace = true } @@ -33,20 +32,20 @@ fp-rpc = { workspace = true, features = ["default"] } [dev-dependencies] ethereum = { workspace = true } +ethereum-types = { workspace = true } scale-codec = { package = "parity-scale-codec", workspace = true } sqlx = { workspace = true, features = ["runtime-tokio-native-tls", "sqlite"] } tempfile = "3.3.0" tokio = { version = "1.24", features = ["sync"] } -# Frontier -fp-consensus = { workspace = true, features = ["std"] } -fp-storage = { workspace = true, features = ["std"] } -frontier-template-runtime = { workspace = true, features = ["default"] } # Substrate -ethereum-types = { workspace = true } -fc-rpc = { workspace = true } sc-block-builder = { workspace = true } sc-client-db = { workspace = true } sp-consensus = { workspace = true } sp-core = { workspace = true, features = ["default"] } sp-io = { workspace = true } substrate-test-runtime-client = { workspace = true } +# Frontier +fc-rpc = { workspace = true } +fp-consensus = { workspace = true, features = ["default"] } +fp-storage = { workspace = true, features = ["default"] } +frontier-template-runtime = { workspace = true, features = ["default"] } diff --git a/client/mapping-sync/src/kv/mod.rs b/client/mapping-sync/src/kv/mod.rs index cb0bcad529..5aea3c7a5b 100644 --- a/client/mapping-sync/src/kv/mod.rs +++ b/client/mapping-sync/src/kv/mod.rs @@ -31,11 +31,12 @@ use sp_blockchain::{Backend as _, HeaderBackend}; use sp_consensus::SyncOracle; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Zero}; // Frontier -use crate::{EthereumBlockNotification, EthereumBlockNotificationSinks, SyncStrategy}; use fc_storage::OverrideHandle; use fp_consensus::{FindLogError, Hashes, Log, PostLog, PreLog}; use fp_rpc::EthereumRuntimeRPCApi; +use crate::{EthereumBlockNotification, EthereumBlockNotificationSinks, SyncStrategy}; + pub fn sync_block( client: &C, overrides: Arc>, diff --git a/client/mapping-sync/src/kv/worker.rs b/client/mapping-sync/src/kv/worker.rs index 1f6229b6af..aeda795769 100644 --- a/client/mapping-sync/src/kv/worker.rs +++ b/client/mapping-sync/src/kv/worker.rs @@ -34,10 +34,11 @@ use sp_blockchain::HeaderBackend; use sp_consensus::SyncOracle; use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; // Frontier -use crate::SyncStrategy; use fc_storage::OverrideHandle; use fp_rpc::EthereumRuntimeRPCApi; +use crate::SyncStrategy; + pub struct MappingSyncWorker { import_notifications: ImportNotifications, timeout: Duration, diff --git a/client/mapping-sync/src/lib.rs b/client/mapping-sync/src/lib.rs index 951978effd..50872529e9 100644 --- a/client/mapping-sync/src/lib.rs +++ b/client/mapping-sync/src/lib.rs @@ -17,6 +17,7 @@ // along with this program. If not, see . #![deny(unused_crate_dependencies)] +#![allow(clippy::too_many_arguments)] pub mod kv; pub mod sql; diff --git a/client/mapping-sync/src/sql/mod.rs b/client/mapping-sync/src/sql/mod.rs index 39fab6a41b..213d7a0f61 100644 --- a/client/mapping-sync/src/sql/mod.rs +++ b/client/mapping-sync/src/sql/mod.rs @@ -16,18 +16,20 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -#![allow(clippy::too_many_arguments)] +use std::{ops::DerefMut, sync::Arc, time::Duration}; -use crate::EthereumBlockNotification; -use fp_rpc::EthereumRuntimeRPCApi; use futures::prelude::*; +// Substrate use sc_client_api::backend::{Backend as BackendT, StateBackend, StorageProvider}; use sp_api::{HeaderT, ProvideRuntimeApi}; use sp_blockchain::{Backend, HeaderBackend}; use sp_consensus::SyncOracle; use sp_core::H256; use sp_runtime::traits::{BlakeTwo256, Block as BlockT, UniqueSaturatedInto}; -use std::{ops::DerefMut, sync::Arc, time::Duration}; +// Frontier +use fp_rpc::EthereumRuntimeRPCApi; + +use crate::{EthereumBlockNotification, EthereumBlockNotificationSinks, SyncStrategy}; /// Defines the commands for the sync worker. #[derive(Debug)] @@ -63,10 +65,10 @@ pub struct SyncWorker { impl SyncWorker where - Block: BlockT + Send + Sync, - Client: StorageProvider + HeaderBackend + Send + Sync + 'static, + Block: BlockT, Client: ProvideRuntimeApi, Client::Api: EthereumRuntimeRPCApi, + Client: HeaderBackend + StorageProvider + 'static, Backend: BackendT + 'static, Backend::State: StateBackend, { @@ -77,28 +79,20 @@ where substrate_backend: Arc, indexer_backend: Arc>, pubsub_notification_sinks: Arc< - crate::EthereumBlockNotificationSinks>, + EthereumBlockNotificationSinks>, >, ) -> tokio::sync::mpsc::Sender { let (tx, mut rx) = tokio::sync::mpsc::channel(100); tokio::task::spawn(async move { while let Some(cmd) = rx.recv().await { - log::debug!( - target: "frontier-sql", - "💬 Recv Worker Command {:?}", - cmd, - ); - println!("💬 Recv Worker Command {:?}", cmd,); + log::debug!(target: "frontier-sql", "💬 Recv Worker Command {cmd:?}"); + println!("💬 Recv Worker Command {cmd:?}"); match cmd { WorkerCommand::ResumeSync => { // Attempt to resume from last indexed block. If there is no data in the db, sync genesis. match indexer_backend.get_last_indexed_canon_block().await.ok() { Some(last_block_hash) => { - log::debug!( - target: "frontier-sql", - "Resume from last block {:?}", - last_block_hash, - ); + log::debug!(target: "frontier-sql", "Resume from last block {last_block_hash:?}"); if let Some(parent_hash) = client .header(last_block_hash) .ok() @@ -159,11 +153,7 @@ where if let Some(block_hash) = indexer_backend.get_first_pending_canon_block().await { - log::debug!( - target: "frontier-sql", - "Indexing pending canonical block {:?}", - block_hash, - ); + log::debug!(target: "frontier-sql", "Indexing pending canonical block {block_hash:?}"); indexer_backend .index_block_logs(client.clone(), block_hash) .await; @@ -191,14 +181,14 @@ where indexer_backend: Arc>, import_notifications: sc_client_api::ImportNotifications, worker_config: SyncWorkerConfig, - sync_strategy: crate::SyncStrategy, + sync_strategy: SyncStrategy, sync_oracle: Arc, pubsub_notification_sinks: Arc< - crate::EthereumBlockNotificationSinks>, + EthereumBlockNotificationSinks>, >, ) { // work in progress for `SyncStrategy::Normal` to also index non-best blocks. - if sync_strategy == crate::SyncStrategy::Normal { + if sync_strategy == SyncStrategy::Normal { panic!("'SyncStrategy::Normal' is not supported") } @@ -292,10 +282,10 @@ async fn index_block_and_ancestors( indexer_backend: Arc>, hash: H256, ) where - Block: BlockT + Send + Sync, - Client: StorageProvider + HeaderBackend + Send + Sync + 'static, + Block: BlockT, Client: ProvideRuntimeApi, Client::Api: EthereumRuntimeRPCApi, + Client: HeaderBackend + StorageProvider + 'static, Backend: BackendT + 'static, Backend::State: StateBackend, { @@ -309,33 +299,18 @@ async fn index_block_and_ancestors( // exit if block is already imported if indexer_backend.is_block_indexed(hash).await { - log::debug!( - target: "frontier-sql", - "🔴 Block {:?} already imported", - hash, - ); + log::debug!(target: "frontier-sql", "🔴 Block {hash:?} already imported"); break; } - log::debug!( - target: "frontier-sql", - "🛠️ Importing {:?}", - hash, - ); + log::debug!(target: "frontier-sql", "🛠️ Importing {hash:?}"); let _ = indexer_backend .insert_block_metadata(client.clone(), hash) .await .map_err(|e| { - log::error!( - target: "frontier-sql", - "{}", - e, - ); + log::error!(target: "frontier-sql", "{e}"); }); - log::debug!( - target: "frontier-sql", - "Inserted block metadata" - ); + log::debug!(target: "frontier-sql", "Inserted block metadata"); indexer_backend.index_block_logs(client.clone(), hash).await; if let Ok(Some(header)) = blockchain_backend.header(hash) { @@ -355,10 +330,10 @@ async fn index_canonical_block_and_ancestors( indexer_backend: Arc>, hash: H256, ) where - Block: BlockT + Send + Sync, - Client: StorageProvider + HeaderBackend + Send + Sync + 'static, + Block: BlockT, Client: ProvideRuntimeApi, Client::Api: EthereumRuntimeRPCApi, + Client: HeaderBackend + StorageProvider + 'static, Backend: BackendT + 'static, Backend::State: StateBackend, { @@ -374,31 +349,18 @@ async fn index_canonical_block_and_ancestors( // exit if canonical block is already imported if status.indexed && status.canon { - log::debug!( - target: "frontier-sql", - "🔴 Block {:?} already imported", - hash, - ); + log::debug!(target: "frontier-sql", "🔴 Block {hash:?} already imported"); break; } // If block was previously indexed as non-canon then mark it as canon if status.indexed && !status.canon { if let Err(err) = indexer_backend.set_block_as_canon(hash).await { - log::error!( - target: "frontier-sql", - "Failed setting block {:?} as canon: {:?}", - hash, - err, - ); + log::error!(target: "frontier-sql", "Failed setting block {hash:?} as canon: {err:?}"); continue; } - log::debug!( - target: "frontier-sql", - "🛠️ Marked block as canon {:?}", - hash, - ); + log::debug!(target: "frontier-sql", "🛠️ Marked block as canon {hash:?}"); // Check parent block if let Ok(Some(header)) = blockchain_backend.header(hash) { @@ -409,26 +371,14 @@ async fn index_canonical_block_and_ancestors( } // Else, import the new block - log::debug!( - target: "frontier-sql", - "🛠️ Importing {:?}", - hash, - ); + log::debug!(target: "frontier-sql", "🛠️ Importing {hash:?}"); let _ = indexer_backend .insert_block_metadata(client.clone(), hash) .await .map_err(|e| { - log::error!( - target: "frontier-sql", - "{}", - e, - ); + log::error!(target: "frontier-sql", "{e}"); }); - log::debug!( - target: "frontier-sql", - "Inserted block metadata {:?}", - hash - ); + log::debug!(target: "frontier-sql", "Inserted block metadata {hash:?}"); indexer_backend.index_block_logs(client.clone(), hash).await; if let Ok(Some(header)) = blockchain_backend.header(hash) { @@ -440,14 +390,12 @@ async fn index_canonical_block_and_ancestors( /// Canonicalizes the database by setting the `is_canon` field for the retracted blocks to `0`, /// and `1` if they are enacted. -async fn canonicalize_blocks( +async fn canonicalize_blocks>( indexer_backend: Arc>, common: H256, enacted: Vec, retracted: Vec, -) where - Block: BlockT + Send + Sync, -{ +) { if (indexer_backend.canonicalize(&retracted, &enacted).await).is_err() { log::error!( target: "frontier-sql", @@ -467,19 +415,15 @@ async fn index_missing_blocks( substrate_backend: Arc, indexer_backend: Arc>, ) where - Block: BlockT + Send + Sync, - Client: StorageProvider + HeaderBackend + Send + Sync + 'static, + Block: BlockT, Client: ProvideRuntimeApi, Client::Api: EthereumRuntimeRPCApi, + Client: HeaderBackend + StorageProvider + 'static, Backend: BackendT + 'static, Backend::State: StateBackend, { if let Some(block_number) = indexer_backend.get_first_missing_canon_block().await { - log::debug!( - target: "frontier-sql", - "Missing {:?}", - block_number, - ); + log::debug!(target: "frontier-sql", "Missing {block_number:?}"); if block_number == 0 { index_genesis_block(client.clone(), indexer_backend.clone()).await; } else if let Ok(Some(block_hash)) = client.hash(block_number.unique_saturated_into()) { @@ -497,11 +441,7 @@ async fn index_missing_blocks( ) .await; } else { - log::debug!( - target: "frontier-sql", - "Failed retrieving hash for block #{}", - block_number, - ); + log::debug!(target: "frontier-sql", "Failed retrieving hash for block #{block_number}"); } } } @@ -513,10 +453,10 @@ async fn index_genesis_block( client: Arc, indexer_backend: Arc>, ) where - Block: BlockT + Send + Sync, - Client: StorageProvider + HeaderBackend + Send + Sync + 'static, + Block: BlockT, Client: ProvideRuntimeApi, Client::Api: EthereumRuntimeRPCApi, + Client: HeaderBackend + StorageProvider + 'static, Backend: BackendT + 'static, Backend::State: StateBackend, { @@ -528,32 +468,25 @@ async fn index_genesis_block( .insert_genesis_block_metadata(client.clone()) .await .map_err(|e| { - log::error!( - target: "frontier-sql", - "💔 Cannot sync genesis block: {}", - e, - ) + log::error!(target: "frontier-sql", "💔 Cannot sync genesis block: {e}"); }) { - log::debug!( - target: "frontier-sql", - "Imported genesis block {:?}", - substrate_genesis_hash, - ); + log::debug!(target: "frontier-sql", "Imported genesis block {substrate_genesis_hash:?}"); } } #[cfg(test)] mod test { use super::*; - use crate::{EthereumBlockNotification, EthereumBlockNotificationSinks}; - use fc_rpc::{OverrideHandle, SchemaV3Override, StorageOverride}; - use fp_storage::{ - EthereumStorageSchema, ETHEREUM_CURRENT_RECEIPTS, PALLET_ETHEREUM, PALLET_ETHEREUM_SCHEMA, - }; + + use std::{collections::BTreeMap, path::Path, sync::Arc}; + use futures::executor; + use scale_codec::Encode; + use sqlx::Row; + use tempfile::tempdir; + // Substrate use sc_block_builder::BlockBuilderProvider; use sc_client_api::{BlockchainEvents, HeaderBackend}; - use scale_codec::Encode; use sp_consensus::BlockOrigin; use sp_core::{H160, H256, U256}; use sp_io::hashing::twox_128; @@ -561,12 +494,14 @@ mod test { generic::{Digest, Header}, traits::BlakeTwo256, }; - use sqlx::Row; - use std::{collections::BTreeMap, path::Path, sync::Arc}; use substrate_test_runtime_client::{ prelude::*, DefaultTestClientBuilderExt, TestClientBuilder, TestClientBuilderExt, }; - use tempfile::tempdir; + // Frontier + use fc_rpc::{OverrideHandle, SchemaV3Override, StorageOverride}; + use fp_storage::{ + EthereumStorageSchema, ETHEREUM_CURRENT_RECEIPTS, PALLET_ETHEREUM, PALLET_ETHEREUM_SCHEMA, + }; type OpaqueBlock = sp_runtime::generic::Block< Header, @@ -755,7 +690,7 @@ mod test { read_notification_timeout: Duration::from_secs(1), check_indexed_blocks_interval: Duration::from_secs(60), }, - crate::SyncStrategy::Parachain, + SyncStrategy::Parachain, Arc::new(test_sync_oracle), pubsub_notification_sinks_inner, ) @@ -885,7 +820,7 @@ mod test { read_notification_timeout: Duration::from_secs(10), check_indexed_blocks_interval: Duration::from_secs(60), }, - crate::SyncStrategy::Parachain, + SyncStrategy::Parachain, Arc::new(test_sync_oracle), pubsub_notification_sinks_inner, ) @@ -1088,7 +1023,7 @@ mod test { read_notification_timeout: Duration::from_secs(10), check_indexed_blocks_interval: Duration::from_secs(60), }, - crate::SyncStrategy::Parachain, + SyncStrategy::Parachain, Arc::new(test_sync_oracle), pubsub_notification_sinks_inner, ) @@ -1282,7 +1217,7 @@ mod test { read_notification_timeout: Duration::from_secs(10), check_indexed_blocks_interval: Duration::from_secs(60), }, - crate::SyncStrategy::Parachain, + SyncStrategy::Parachain, Arc::new(test_sync_oracle), pubsub_notification_sinks_inner, )