diff --git a/core/bin/external_node/src/main.rs b/core/bin/external_node/src/main.rs index 0d954e6a91a..7fa0ea9b5a6 100644 --- a/core/bin/external_node/src/main.rs +++ b/core/bin/external_node/src/main.rs @@ -142,7 +142,9 @@ async fn init_tasks( let metadata_calculator = MetadataCalculator::new(&MetadataCalculatorConfig { db_path: &config.required.merkle_tree_path, - mode: MetadataCalculatorModeConfig::Lightweight, + mode: MetadataCalculatorModeConfig::Full { + store_factory: None, + }, delay_interval: config.optional.metadata_calculator_delay(), max_l1_batches_per_iter: config.optional.max_l1_batches_per_tree_iter, multi_get_chunk_size: config.optional.merkle_tree_multi_get_chunk_size, diff --git a/core/lib/circuit_breaker/src/facet_selectors.rs b/core/lib/circuit_breaker/src/facet_selectors.rs deleted file mode 100644 index 3fe2594e70a..00000000000 --- a/core/lib/circuit_breaker/src/facet_selectors.rs +++ /dev/null @@ -1,158 +0,0 @@ -use backon::{ConstantBuilder, Retryable}; -use convert_case::{Case, Casing}; -use std::{collections::BTreeMap, env, fmt, fs, path::Path, str::FromStr}; - -use zksync_config::configs::chain::CircuitBreakerConfig; -use zksync_contracts::zksync_contract; -use zksync_eth_client::{types::Error as EthClientError, EthInterface}; -use zksync_types::{ethabi::Token, Address, H160}; - -// local imports -use crate::{utils::unwrap_tuple, CircuitBreaker, CircuitBreakerError}; - -#[derive(Debug)] -pub struct MismatchedFacetSelectorsError { - pub server_selectors: String, - pub contract_selectors: String, -} - -impl fmt::Display for MismatchedFacetSelectorsError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!( - f, - "server: {}, contract: {}", - self.server_selectors, self.contract_selectors - ) - } -} - -#[derive(Debug)] -pub struct FacetSelectorsChecker { - eth_client: E, - // BTreeMap is used to have fixed order of elements when printing error. - server_selectors: BTreeMap>, - config: CircuitBreakerConfig, - main_contract: H160, -} - -impl FacetSelectorsChecker { - pub fn new(config: &CircuitBreakerConfig, eth_client: E, main_contract: H160) -> Self { - let zksync_home = env::var("ZKSYNC_HOME").unwrap_or_else(|_| ".".into()); - let path_str = "contracts/ethereum/artifacts/cache/solpp-generated-contracts/zksync/facets"; - let facets_path = Path::new(&zksync_home).join(path_str); - let paths = fs::read_dir(facets_path).unwrap(); - let server_selectors = paths - .into_iter() - .filter_map(|path| { - let file_name: String = path.unwrap().file_name().into_string().unwrap(); - let facet_name: &str = file_name.as_str().split('.').next().unwrap(); - // Exclude `Base` contract. - if facet_name == "Base" { - return None; - } - let env_name = format!( - "CONTRACTS_{}_FACET_ADDR", - facet_name.to_case(Case::ScreamingSnake) - ); - let address = Address::from_str(&env::var(env_name).unwrap()).unwrap(); - - let contract = zksync_contracts::load_contract( - format!("{0}/{1}.sol/{1}Facet.json", path_str, facet_name).as_str(), - ); - // Filter out `getName` function. Because it's a part of the common interface and it could messed up the selectors - let selectors = contract - .functions - .into_values() - .filter(|func| { - let func = func.first().cloned().unwrap(); - func.name != "getName" - }) - .map(|func| { - let func = func.first().cloned().unwrap(); - format!("0x{}", hex::encode(func.short_signature())) - }) - .collect(); - - Some((address, selectors)) - }) - .collect(); - - Self { - eth_client, - server_selectors, - config: config.clone(), - main_contract, - } - } -} - -impl FacetSelectorsChecker { - async fn get_contract_facet_selectors(&self) -> BTreeMap> { - let facets = self.get_facets_token_with_retry().await.unwrap(); - - parse_faucets_token(facets) - } - - pub(super) async fn get_facets_token_with_retry(&self) -> Result { - (|| async { - let result: Result = self - .eth_client - .call_contract_function( - "facets", - (), - None, - Default::default(), - None, - self.main_contract, - zksync_contract(), - ) - .await; - - result - }) - .retry( - &ConstantBuilder::default() - .with_max_times(self.config.http_req_max_retry_number) - .with_delay(self.config.http_req_retry_interval()), - ) - .await - } -} - -#[async_trait::async_trait] -impl CircuitBreaker for FacetSelectorsChecker { - async fn check(&self) -> Result<(), CircuitBreakerError> { - let contract_selectors = self.get_contract_facet_selectors().await; - if self.server_selectors != contract_selectors { - return Err(CircuitBreakerError::MismatchedFacetSelectors( - MismatchedFacetSelectorsError { - server_selectors: serde_json::to_string_pretty(&self.server_selectors).unwrap(), - contract_selectors: serde_json::to_string_pretty(&contract_selectors).unwrap(), - }, - )); - } - - Ok(()) - } -} - -fn parse_faucets_token(facets: Token) -> BTreeMap> { - let facets = facets.into_array().unwrap(); - facets - .into_iter() - .map(|facet| { - let tokens = unwrap_tuple(facet); - let address = tokens[0].clone().into_address().unwrap(); - let selectors = tokens[1] - .clone() - .into_array() - .unwrap() - .into_iter() - .map(|token| { - "0x".to_string() + hex::encode(token.into_fixed_bytes().unwrap()).as_str() - }) - .collect(); - (address, selectors) - }) - .collect() -} diff --git a/core/lib/circuit_breaker/src/lib.rs b/core/lib/circuit_breaker/src/lib.rs index d92869839b3..878114f0d04 100644 --- a/core/lib/circuit_breaker/src/lib.rs +++ b/core/lib/circuit_breaker/src/lib.rs @@ -7,22 +7,14 @@ use tokio::sync::watch; use zksync_config::configs::chain::CircuitBreakerConfig; -use crate::facet_selectors::MismatchedFacetSelectorsError; - -pub mod facet_selectors; pub mod l1_txs; pub mod replication_lag; pub mod utils; -#[cfg(test)] -mod tests; - #[derive(Debug, Error)] pub enum CircuitBreakerError { #[error("System has failed L1 transaction")] FailedL1Transaction, - #[error("Mismatched facet selectors: {0}")] - MismatchedFacetSelectors(MismatchedFacetSelectorsError), #[error("Replication lag ({0:?}) is above the threshold ({1:?})")] ReplicationLag(u32, u32), } diff --git a/core/lib/circuit_breaker/src/tests/mod.rs b/core/lib/circuit_breaker/src/tests/mod.rs deleted file mode 100644 index 9ff2c3c05fd..00000000000 --- a/core/lib/circuit_breaker/src/tests/mod.rs +++ /dev/null @@ -1,287 +0,0 @@ -use std::sync::Mutex; - -use assert_matches::assert_matches; -use async_trait::async_trait; - -use zksync_config::configs::{chain::CircuitBreakerConfig, ContractsConfig}; -use zksync_eth_client::{ - types::{Error, ExecutedTxStatus, FailureInfo, SignedCallResult}, - BoundEthInterface, EthInterface, -}; -use zksync_types::web3::types::Block; -use zksync_types::{ - ethabi::Token, - web3::{ - self, - contract::{ - tokens::{Detokenize, Tokenize}, - Options, - }, - error::TransportError, - ethabi, - types::{ - Address, BlockId, BlockNumber, Filter, Log, Transaction, TransactionReceipt, H160, - H256, U256, - }, - }, - L1ChainId, U64, -}; - -#[derive(Debug)] -pub struct ETHDirectClientMock { - contract: ethabi::Contract, - // next 2 are needed for simulation of the few ZksInterface functions, - // to test retries - circuit_breaker_config: CircuitBreakerConfig, - counter: Mutex, -} - -impl ETHDirectClientMock { - pub fn new() -> Self { - Self { - contract: Default::default(), - circuit_breaker_config: get_test_circuit_breaker_config(), - counter: Mutex::new(0), - } - } - - fn inc_counter(&self) { - let mut current = self.counter.lock().unwrap(); - *current += 1; - } - - fn get_counter_cur_val(&self) -> u8 { - let current = self.counter.lock().unwrap(); - *current - } - - fn reset_counter(&self) { - let mut current = self.counter.lock().unwrap(); - *current = 0; - } - - // The idea of this function is to simulate the behavior when function call fails all the time, - // and when the current attempt is the last one it succeeds and returns Ok() - pub fn simulate_get_contract_behavior(&self) -> Result - where - R: Detokenize + Unpin, - { - self.inc_counter(); - - let cur_val = self.get_counter_cur_val(); - // If the condition returns `true`, it means that its the last attempt of the retry() wrapper function. - // Otherwise we pretend that there are some eth_client issues and return Err() - if cur_val as usize == self.circuit_breaker_config.http_req_max_retry_number { - self.reset_counter(); - Ok( - Detokenize::from_tokens(vec![Token::Array(vec![Token::Tuple(vec![ - Token::Address(H160::zero()), - Token::Array(vec![Token::FixedBytes(vec![0, 0, 0, 0, 0, 0])]), - ])])]) - .unwrap(), - ) - } else { - Err(Error::EthereumGateway(web3::error::Error::Transport( - TransportError::Code(503), - ))) - } - } -} - -fn get_test_circuit_breaker_config() -> CircuitBreakerConfig { - CircuitBreakerConfig { - sync_interval_ms: 1000, - http_req_max_retry_number: 5, - http_req_retry_interval_sec: 2, - replication_lag_limit_sec: Some(10), - } -} -#[async_trait] -impl EthInterface for ETHDirectClientMock { - /// Note: The only really implemented method! Other ones are just stubs. - #[allow(clippy::too_many_arguments)] - async fn call_contract_function( - &self, - _func: &str, - _params: P, - _from: A, - _options: Options, - _block: B, - _contract_address: Address, - _contract_abi: ethabi::Contract, - ) -> Result - where - R: Detokenize + Unpin, - A: Into> + Send, - B: Into> + Send, - P: Tokenize + Send, - { - self.simulate_get_contract_behavior() - } - - async fn get_tx_status( - &self, - _hash: H256, - _: &'static str, - ) -> Result, Error> { - Ok(None) - } - - async fn block_number(&self, _: &'static str) -> Result { - Ok(Default::default()) - } - - async fn send_raw_tx(&self, _tx: Vec) -> Result { - Ok(Default::default()) - } - - async fn nonce_at_for_account( - &self, - _account: Address, - _block: BlockNumber, - _: &'static str, - ) -> Result { - Ok(Default::default()) - } - - async fn get_gas_price(&self, _: &'static str) -> Result { - Ok(Default::default()) - } - - async fn base_fee_history( - &self, - _from_block: usize, - _block_count: usize, - _component: &'static str, - ) -> Result, Error> { - Ok(Default::default()) - } - - async fn get_pending_block_base_fee_per_gas( - &self, - _component: &'static str, - ) -> Result { - Ok(Default::default()) - } - - async fn failure_reason(&self, _tx_hash: H256) -> Result, Error> { - Ok(Default::default()) - } - - async fn get_tx( - &self, - _hash: H256, - _component: &'static str, - ) -> Result, Error> { - Ok(Default::default()) - } - - async fn tx_receipt( - &self, - _tx_hash: H256, - _component: &'static str, - ) -> Result, Error> { - Ok(Default::default()) - } - - async fn eth_balance( - &self, - _address: Address, - _component: &'static str, - ) -> Result { - Ok(Default::default()) - } - - async fn logs(&self, _filter: Filter, _component: &'static str) -> Result, Error> { - Ok(Default::default()) - } - - async fn block( - &self, - _block_id: String, - _component: &'static str, - ) -> Result>, Error> { - Ok(Default::default()) - } -} - -#[async_trait] -impl BoundEthInterface for ETHDirectClientMock { - fn contract(&self) -> ðabi::Contract { - &self.contract - } - - fn contract_addr(&self) -> H160 { - Default::default() - } - - fn chain_id(&self) -> L1ChainId { - L1ChainId(0) - } - - fn sender_account(&self) -> Address { - Default::default() - } - - async fn sign_prepared_tx_for_addr( - &self, - _data: Vec, - _contract_addr: H160, - _options: Options, - _component: &'static str, - ) -> Result { - Ok(SignedCallResult { - raw_tx: vec![], - max_priority_fee_per_gas: U256::zero(), - max_fee_per_gas: U256::zero(), - nonce: U256::zero(), - hash: H256::zero(), - }) - } - - async fn allowance_on_account( - &self, - _token_address: Address, - _contract_address: Address, - _erc20_abi: ethabi::Contract, - ) -> Result { - Ok(Default::default()) - } -} - -#[tokio::test] -async fn retries_for_facet_selectors() { - let eth_client = ETHDirectClientMock::new(); - - let result: Result = eth_client - .call_contract_function( - "get_verification_key", - (), - None, - Default::default(), - None, - Address::default(), - eth_client.contract().clone(), - ) - .await; - - assert_matches!( - result, - Err(Error::EthereumGateway(web3::error::Error::Transport( - TransportError::Code(503), - ))) - ); - - let contracts = ContractsConfig::from_env().unwrap(); - let config = get_test_circuit_breaker_config(); - let facet_selectors_checker = crate::facet_selectors::FacetSelectorsChecker::new( - &config, - eth_client, - contracts.diamond_proxy_addr, - ); - - assert_matches!( - facet_selectors_checker.get_facets_token_with_retry().await, - Ok(_) - ); -} diff --git a/core/lib/commitment_utils/src/lib.rs b/core/lib/commitment_utils/src/lib.rs index c99332009d2..49b8ee7ef50 100644 --- a/core/lib/commitment_utils/src/lib.rs +++ b/core/lib/commitment_utils/src/lib.rs @@ -10,7 +10,7 @@ pub fn events_queue_commitment( protocol_version: ProtocolVersionId, ) -> Option { match protocol_version { - id if id < ProtocolVersionId::Version17 => None, + id if id.is_pre_boojum() => None, ProtocolVersionId::Version17 => Some(H256(events_queue_commitment_fixed(events_queue))), id => unimplemented!("events_queue_commitment is not implemented for {id:?}"), } @@ -21,7 +21,7 @@ pub fn bootloader_initial_content_commitment( protocol_version: ProtocolVersionId, ) -> Option { match protocol_version { - id if id < ProtocolVersionId::Version17 => None, + id if id.is_pre_boojum() => None, ProtocolVersionId::Version17 => { let full_bootloader_memory = expand_memory_contents(initial_bootloader_contents, USED_BOOTLOADER_MEMORY_BYTES); diff --git a/core/lib/config/src/configs/database.rs b/core/lib/config/src/configs/database.rs index 465a493172a..0628d8d4565 100644 --- a/core/lib/config/src/configs/database.rs +++ b/core/lib/config/src/configs/database.rs @@ -10,8 +10,8 @@ use super::envy_load; #[derive(Debug, Clone, Copy, Default, PartialEq, Eq, Serialize, Deserialize)] #[serde(rename_all = "snake_case")] pub enum MerkleTreeMode { - /// In this mode, `MetadataCalculator` will compute witness inputs for all storage operations - /// and put them into the object store as provided by `store_factory` (e.g., GCS). + /// In this mode, `MetadataCalculator` will compute commitments and witness inputs for all storage operations + /// and optionally put witness inputs into the object store as provided by `store_factory` (e.g., GCS). #[default] Full, /// In this mode, `MetadataCalculator` computes Merkle tree root hashes and some auxiliary information diff --git a/core/lib/contracts/src/lib.rs b/core/lib/contracts/src/lib.rs index cdc5b8b0e6a..a64aef1c916 100644 --- a/core/lib/contracts/src/lib.rs +++ b/core/lib/contracts/src/lib.rs @@ -5,7 +5,7 @@ #![allow(clippy::derive_partial_eq_without_eq)] use ethabi::{ ethereum_types::{H256, U256}, - Contract, + Contract, Function, }; use once_cell::sync::Lazy; use serde::{Deserialize, Serialize}; @@ -393,3 +393,314 @@ impl BaseSystemContracts { } } } + +pub static PRE_BOOJUM_COMMIT_FUNCTION: Lazy = Lazy::new(|| { + let abi = r#" + { + "inputs": [ + { + "components": [ + { + "internalType": "uint64", + "name": "blockNumber", + "type": "uint64" + }, + { + "internalType": "bytes32", + "name": "blockHash", + "type": "bytes32" + }, + { + "internalType": "uint64", + "name": "indexRepeatedStorageChanges", + "type": "uint64" + }, + { + "internalType": "uint256", + "name": "numberOfLayer1Txs", + "type": "uint256" + }, + { + "internalType": "bytes32", + "name": "priorityOperationsHash", + "type": "bytes32" + }, + { + "internalType": "bytes32", + "name": "l2LogsTreeRoot", + "type": "bytes32" + }, + { + "internalType": "uint256", + "name": "timestamp", + "type": "uint256" + }, + { + "internalType": "bytes32", + "name": "commitment", + "type": "bytes32" + } + ], + "internalType": "struct IExecutor.StoredBlockInfo", + "name": "_lastCommittedBlockData", + "type": "tuple" + }, + { + "components": [ + { + "internalType": "uint64", + "name": "blockNumber", + "type": "uint64" + }, + { + "internalType": "uint64", + "name": "timestamp", + "type": "uint64" + }, + { + "internalType": "uint64", + "name": "indexRepeatedStorageChanges", + "type": "uint64" + }, + { + "internalType": "bytes32", + "name": "newStateRoot", + "type": "bytes32" + }, + { + "internalType": "uint256", + "name": "numberOfLayer1Txs", + "type": "uint256" + }, + { + "internalType": "bytes32", + "name": "l2LogsTreeRoot", + "type": "bytes32" + }, + { + "internalType": "bytes32", + "name": "priorityOperationsHash", + "type": "bytes32" + }, + { + "internalType": "bytes", + "name": "initialStorageChanges", + "type": "bytes" + }, + { + "internalType": "bytes", + "name": "repeatedStorageChanges", + "type": "bytes" + }, + { + "internalType": "bytes", + "name": "l2Logs", + "type": "bytes" + }, + { + "internalType": "bytes[]", + "name": "l2ArbitraryLengthMessages", + "type": "bytes[]" + }, + { + "internalType": "bytes[]", + "name": "factoryDeps", + "type": "bytes[]" + } + ], + "internalType": "struct IExecutor.CommitBlockInfo[]", + "name": "_newBlocksData", + "type": "tuple[]" + } + ], + "name": "commitBlocks", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }"#; + serde_json::from_str(abi).unwrap() +}); + +pub static PRE_BOOJUM_PROVE_FUNCTION: Lazy = Lazy::new(|| { + let abi = r#" + { + "inputs": [ + { + "components": [ + { + "internalType": "uint64", + "name": "blockNumber", + "type": "uint64" + }, + { + "internalType": "bytes32", + "name": "blockHash", + "type": "bytes32" + }, + { + "internalType": "uint64", + "name": "indexRepeatedStorageChanges", + "type": "uint64" + }, + { + "internalType": "uint256", + "name": "numberOfLayer1Txs", + "type": "uint256" + }, + { + "internalType": "bytes32", + "name": "priorityOperationsHash", + "type": "bytes32" + }, + { + "internalType": "bytes32", + "name": "l2LogsTreeRoot", + "type": "bytes32" + }, + { + "internalType": "uint256", + "name": "timestamp", + "type": "uint256" + }, + { + "internalType": "bytes32", + "name": "commitment", + "type": "bytes32" + } + ], + "internalType": "struct IExecutor.StoredBlockInfo", + "name": "_prevBlock", + "type": "tuple" + }, + { + "components": [ + { + "internalType": "uint64", + "name": "blockNumber", + "type": "uint64" + }, + { + "internalType": "bytes32", + "name": "blockHash", + "type": "bytes32" + }, + { + "internalType": "uint64", + "name": "indexRepeatedStorageChanges", + "type": "uint64" + }, + { + "internalType": "uint256", + "name": "numberOfLayer1Txs", + "type": "uint256" + }, + { + "internalType": "bytes32", + "name": "priorityOperationsHash", + "type": "bytes32" + }, + { + "internalType": "bytes32", + "name": "l2LogsTreeRoot", + "type": "bytes32" + }, + { + "internalType": "uint256", + "name": "timestamp", + "type": "uint256" + }, + { + "internalType": "bytes32", + "name": "commitment", + "type": "bytes32" + } + ], + "internalType": "struct IExecutor.StoredBlockInfo[]", + "name": "_committedBlocks", + "type": "tuple[]" + }, + { + "components": [ + { + "internalType": "uint256[]", + "name": "recursiveAggregationInput", + "type": "uint256[]" + }, + { + "internalType": "uint256[]", + "name": "serializedProof", + "type": "uint256[]" + } + ], + "internalType": "struct IExecutor.ProofInput", + "name": "_proof", + "type": "tuple" + } + ], + "name": "proveBlocks", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }"#; + serde_json::from_str(abi).unwrap() +}); + +pub static PRE_BOOJUM_EXECUTE_FUNCTION: Lazy = Lazy::new(|| { + let abi = r#" + { + "inputs": [ + { + "components": [ + { + "internalType": "uint64", + "name": "blockNumber", + "type": "uint64" + }, + { + "internalType": "bytes32", + "name": "blockHash", + "type": "bytes32" + }, + { + "internalType": "uint64", + "name": "indexRepeatedStorageChanges", + "type": "uint64" + }, + { + "internalType": "uint256", + "name": "numberOfLayer1Txs", + "type": "uint256" + }, + { + "internalType": "bytes32", + "name": "priorityOperationsHash", + "type": "bytes32" + }, + { + "internalType": "bytes32", + "name": "l2LogsTreeRoot", + "type": "bytes32" + }, + { + "internalType": "uint256", + "name": "timestamp", + "type": "uint256" + }, + { + "internalType": "bytes32", + "name": "commitment", + "type": "bytes32" + } + ], + "internalType": "struct IExecutor.StoredBlockInfo[]", + "name": "_blocksData", + "type": "tuple[]" + } + ], + "name": "executeBlocks", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }"#; + serde_json::from_str(abi).unwrap() +}); diff --git a/core/lib/types/src/aggregated_operations.rs b/core/lib/types/src/aggregated_operations.rs index fea96585271..8819460f269 100644 --- a/core/lib/types/src/aggregated_operations.rs +++ b/core/lib/types/src/aggregated_operations.rs @@ -9,7 +9,7 @@ use zkevm_test_harness::bellman::plonk::better_better_cs::proof::Proof; use zkevm_test_harness::witness::oracle::VmWitnessOracle; use zksync_basic_types::{ethabi::Token, L1BatchNumber}; -use crate::{commitment::L1BatchWithMetadata, U256}; +use crate::{commitment::L1BatchWithMetadata, ProtocolVersionId, U256}; fn l1_batch_range_from_batches( batches: &[L1BatchWithMetadata], @@ -99,13 +99,23 @@ impl L1BatchProofOperation { let (_, proof) = serialize_proof(scheduler_proof); - let proof_input = Token::Tuple(vec![ + let aggregation_result_coords = if self.l1_batches[0] + .header + .protocol_version + .unwrap() + .is_pre_boojum() + { Token::Array( aggregation_result_coords .iter() .map(|bytes| Token::Uint(U256::from_big_endian(bytes))) .collect(), - ), + ) + } else { + Token::Array(Vec::new()) + }; + let proof_input = Token::Tuple(vec![ + aggregation_result_coords, Token::Array(proof.into_iter().map(Token::Uint).collect()), ]); @@ -216,4 +226,12 @@ impl AggregatedOperation { Self::Execute(_) => "execute", } } + + pub fn protocol_version(&self) -> ProtocolVersionId { + match self { + Self::Commit(op) => op.l1_batches[0].header.protocol_version.unwrap(), + Self::PublishProofOnchain(op) => op.l1_batches[0].header.protocol_version.unwrap(), + Self::Execute(op) => op.l1_batches[0].header.protocol_version.unwrap(), + } + } } diff --git a/core/lib/types/src/commitment.rs b/core/lib/types/src/commitment.rs index d3a4eb6e550..049560277af 100644 --- a/core/lib/types/src/commitment.rs +++ b/core/lib/types/src/commitment.rs @@ -135,36 +135,68 @@ impl L1BatchWithMetadata { } pub fn l1_commit_data(&self) -> Token { - Token::Tuple(vec![ - Token::Uint(U256::from(self.header.number.0)), - Token::Uint(U256::from(self.header.timestamp)), - Token::Uint(U256::from(self.metadata.rollup_last_leaf_index)), - Token::FixedBytes(self.metadata.merkle_root_hash.as_bytes().to_vec()), - Token::Uint(U256::from(self.header.l1_tx_count)), - Token::FixedBytes(self.metadata.l2_l1_merkle_root.as_bytes().to_vec()), - Token::FixedBytes( - self.header - .priority_ops_onchain_data_hash() - .as_bytes() - .to_vec(), - ), - Token::Bytes(self.metadata.initial_writes_compressed.clone()), - Token::Bytes(self.metadata.repeated_writes_compressed.clone()), - Token::Bytes(self.metadata.l2_l1_messages_compressed.clone()), - Token::Array( - self.header - .l2_to_l1_messages - .iter() - .map(|message| Token::Bytes(message.to_vec())) - .collect(), - ), - Token::Array( - self.factory_deps - .iter() - .map(|bytecode| Token::Bytes(bytecode.to_vec())) - .collect(), - ), - ]) + if self.header.protocol_version.unwrap().is_pre_boojum() { + Token::Tuple(vec![ + Token::Uint(U256::from(self.header.number.0)), + Token::Uint(U256::from(self.header.timestamp)), + Token::Uint(U256::from(self.metadata.rollup_last_leaf_index)), + Token::FixedBytes(self.metadata.merkle_root_hash.as_bytes().to_vec()), + Token::Uint(U256::from(self.header.l1_tx_count)), + Token::FixedBytes(self.metadata.l2_l1_merkle_root.as_bytes().to_vec()), + Token::FixedBytes( + self.header + .priority_ops_onchain_data_hash() + .as_bytes() + .to_vec(), + ), + Token::Bytes(self.metadata.initial_writes_compressed.clone()), + Token::Bytes(self.metadata.repeated_writes_compressed.clone()), + Token::Bytes(self.metadata.l2_l1_messages_compressed.clone()), + Token::Array( + self.header + .l2_to_l1_messages + .iter() + .map(|message| Token::Bytes(message.to_vec())) + .collect(), + ), + Token::Array( + self.factory_deps + .iter() + .map(|bytecode| Token::Bytes(bytecode.to_vec())) + .collect(), + ), + ]) + } else { + Token::Tuple(vec![ + Token::Uint(U256::from(self.header.number.0)), + Token::Uint(U256::from(self.header.timestamp)), + Token::Uint(U256::from(self.metadata.rollup_last_leaf_index)), + Token::FixedBytes(self.metadata.merkle_root_hash.as_bytes().to_vec()), + Token::Uint(U256::from(self.header.l1_tx_count)), + Token::FixedBytes( + self.header + .priority_ops_onchain_data_hash() + .as_bytes() + .to_vec(), + ), + Token::FixedBytes( + self.metadata + .bootloader_initial_content_commitment + .unwrap() + .as_bytes() + .to_vec(), + ), + Token::FixedBytes( + self.metadata + .events_queue_commitment + .unwrap() + .as_bytes() + .to_vec(), + ), + Token::Bytes(self.metadata.l2_l1_messages_compressed.clone()), + Token::Bytes(self.construct_pubdata()), + ]) + } } pub fn l1_commit_data_size(&self) -> usize { diff --git a/core/lib/types/src/protocol_version.rs b/core/lib/types/src/protocol_version.rs index 8faa998636c..05d25d6db93 100644 --- a/core/lib/types/src/protocol_version.rs +++ b/core/lib/types/src/protocol_version.rs @@ -72,6 +72,10 @@ impl ProtocolVersionId { ProtocolVersionId::Version17 => VmVersion::VmVirtualBlocksRefundsEnhancement, } } + + pub fn is_pre_boojum(&self) -> bool { + self <= &ProtocolVersionId::Version17 + } } impl Default for ProtocolVersionId { diff --git a/core/lib/zksync_core/src/block_reverter/mod.rs b/core/lib/zksync_core/src/block_reverter/mod.rs index 7888df4a0f3..a2b716ad550 100644 --- a/core/lib/zksync_core/src/block_reverter/mod.rs +++ b/core/lib/zksync_core/src/block_reverter/mod.rs @@ -300,9 +300,13 @@ impl BlockReverter { let signer = PrivateKeySigner::new(eth_config.reverter_private_key); let chain_id = web3.eth().chain_id().await.unwrap().as_u64(); - let data = contract + let revert_function = contract .function("revertBlocks") - .unwrap() + .or_else(|_| contract.function("revertBatches")) + .expect( + "Either `revertBlocks` or `revertBatches` function must be present in contract", + ); + let data = revert_function .encode_input(&[Token::Uint(last_l1_batch_to_keep.0.into())]) .unwrap(); diff --git a/core/lib/zksync_core/src/consistency_checker/mod.rs b/core/lib/zksync_core/src/consistency_checker/mod.rs index ac0a9865cf0..3613cd56898 100644 --- a/core/lib/zksync_core/src/consistency_checker/mod.rs +++ b/core/lib/zksync_core/src/consistency_checker/mod.rs @@ -1,5 +1,6 @@ use std::time::Duration; +use zksync_contracts::PRE_BOOJUM_COMMIT_FUNCTION; use zksync_dal::ConnectionPool; use zksync_types::{ web3::{error, ethabi, transports::Http, types::TransactionId, Web3}, @@ -97,11 +98,18 @@ impl ConsistencyChecker { Some(1.into()), "Main node gave us a failed commit tx" ); - - let commitments = self - .contract - .function("commitBlocks") + let commit_function = if block_metadata + .header + .protocol_version .unwrap() + .is_pre_boojum() + { + PRE_BOOJUM_COMMIT_FUNCTION.clone() + } else { + self.contract.function("commitBatches").unwrap().clone() + }; + + let commitments = commit_function .decode_input(&commit_tx.input.0[4..]) .unwrap() .pop() @@ -113,7 +121,7 @@ impl ConsistencyChecker { // the one that corresponds to the batch we're checking. let first_batch_number = match &commitments[0] { ethabi::Token::Tuple(tuple) => tuple[0].clone().into_uint().unwrap().as_usize(), - _ => panic!("ABI does not match the commitBlocks() function on the zkSync contract"), + _ => panic!("ABI does not match the expected one"), }; let commitment = &commitments[batch_number.0 as usize - first_batch_number]; @@ -152,7 +160,7 @@ impl ConsistencyChecker { break; } - let batch_has_metadata = self + let metadata = self .db .access_storage() .await @@ -160,8 +168,13 @@ impl ConsistencyChecker { .blocks_dal() .get_l1_batch_metadata(batch_number) .await - .unwrap() - .is_some(); + .unwrap(); + let batch_has_metadata = metadata + .map(|m| { + m.metadata.bootloader_initial_content_commitment.is_some() + && m.metadata.events_queue_commitment.is_some() + }) + .unwrap_or(false); // The batch might be already committed but not yet processed by the external node's tree // OR the batch might be processed by the external node's tree but not yet committed. diff --git a/core/lib/zksync_core/src/eth_sender/eth_tx_aggregator.rs b/core/lib/zksync_core/src/eth_sender/eth_tx_aggregator.rs index 257b89e913f..9e55de888a1 100644 --- a/core/lib/zksync_core/src/eth_sender/eth_tx_aggregator.rs +++ b/core/lib/zksync_core/src/eth_sender/eth_tx_aggregator.rs @@ -304,21 +304,41 @@ impl EthTxAggregator { &mut self, eth_client: &E, verifier_address: Address, + contracts_are_pre_boojum: bool, ) -> Result { - let token: Token = eth_client - .call_contract_function( - &self.functions.get_verification_key.name, - (), - None, - Default::default(), - None, - verifier_address, - self.functions.verifier_contract.clone(), - ) - .await?; - let recursion_scheduler_level_vk_hash = l1_vk_commitment(token); - - Ok(recursion_scheduler_level_vk_hash) + // This is here for backward compatibility with the old verifier: + // Pre-boojum verifier returns the full verification key; + // New verifier returns the hash of the verification key + tracing::debug!("Calling get_verification_key"); + if contracts_are_pre_boojum { + let vk = eth_client + .call_contract_function( + &self.functions.get_verification_key.name, + (), + None, + Default::default(), + None, + verifier_address, + self.functions.verifier_contract.clone(), + ) + .await?; + Ok(l1_vk_commitment(vk)) + } else { + let get_vk_hash = self.functions.verification_key_hash.as_ref(); + tracing::debug!("Calling verificationKeyHash"); + let vk_hash = eth_client + .call_contract_function( + &get_vk_hash.unwrap().name, + (), + None, + Default::default(), + None, + verifier_address, + self.functions.verifier_contract.clone(), + ) + .await?; + Ok(vk_hash) + } } #[tracing::instrument(skip(self, storage, eth_client))] @@ -333,11 +353,23 @@ impl EthTxAggregator { verifier_params, verifier_address, protocol_version_id, - } = self.get_multicall_data(eth_client).await?; + } = self.get_multicall_data(eth_client).await.map_err(|err| { + tracing::error!("Failed to get multicall data {err:?}"); + err + })?; + let contracts_are_pre_boojum = protocol_version_id.is_pre_boojum(); let recursion_scheduler_level_vk_hash = self - .get_recursion_scheduler_level_vk_hash(eth_client, verifier_address) - .await?; + .get_recursion_scheduler_level_vk_hash( + eth_client, + verifier_address, + contracts_are_pre_boojum, + ) + .await + .map_err(|err| { + tracing::error!("Failed to get VK hash from the Verifier {err:?}"); + err + })?; let l1_verifier_config = L1VerifierConfig { params: verifier_params, recursion_scheduler_level_vk_hash, @@ -353,7 +385,9 @@ impl EthTxAggregator { ) .await { - let tx = self.save_eth_tx(storage, &agg_op).await?; + let tx = self + .save_eth_tx(storage, &agg_op, contracts_are_pre_boojum) + .await?; Self::report_eth_tx_saving(storage, agg_op, &tx).await; } Ok(()) @@ -390,20 +424,51 @@ impl EthTxAggregator { .await; } - fn encode_aggregated_op(&self, op: &AggregatedOperation) -> Vec { + fn encode_aggregated_op( + &self, + op: &AggregatedOperation, + contracts_are_pre_boojum: bool, + ) -> Vec { + let operation_is_pre_boojum = op.protocol_version().is_pre_boojum(); + + // For "commit" and "prove" operations it's necessary that the contracts are of the same version as L1 batches are. + // For "execute" it's not required, i.e. we can "execute" pre-boojum batches with post-boojum contracts. match &op { - AggregatedOperation::Commit(op) => self - .functions - .commit_blocks - .encode_input(&op.get_eth_tx_args()), - AggregatedOperation::PublishProofOnchain(op) => self - .functions - .prove_blocks - .encode_input(&op.get_eth_tx_args()), - AggregatedOperation::Execute(op) => self - .functions - .execute_blocks - .encode_input(&op.get_eth_tx_args()), + AggregatedOperation::Commit(op) => { + assert_eq!(contracts_are_pre_boojum, operation_is_pre_boojum); + let f = if contracts_are_pre_boojum { + &self.functions.pre_boojum_commit + } else { + self.functions + .post_boojum_commit + .as_ref() + .expect("Missing ABI for commitBatches") + }; + f.encode_input(&op.get_eth_tx_args()) + } + AggregatedOperation::PublishProofOnchain(op) => { + assert_eq!(contracts_are_pre_boojum, operation_is_pre_boojum); + let f = if contracts_are_pre_boojum { + &self.functions.pre_boojum_prove + } else { + self.functions + .post_boojum_prove + .as_ref() + .expect("Missing ABI for proveBatches") + }; + f.encode_input(&op.get_eth_tx_args()) + } + AggregatedOperation::Execute(op) => { + let f = if contracts_are_pre_boojum { + &self.functions.pre_boojum_execute + } else { + self.functions + .post_boojum_execute + .as_ref() + .expect("Missing ABI for executeBatches") + }; + f.encode_input(&op.get_eth_tx_args()) + } } .expect("Failed to encode transaction data") } @@ -412,10 +477,11 @@ impl EthTxAggregator { &self, storage: &mut StorageProcessor<'_>, aggregated_op: &AggregatedOperation, + contracts_are_pre_boojum: bool, ) -> Result { let mut transaction = storage.start_transaction().await.unwrap(); let nonce = self.get_next_nonce(&mut transaction).await?; - let calldata = self.encode_aggregated_op(aggregated_op); + let calldata = self.encode_aggregated_op(aggregated_op, contracts_are_pre_boojum); let l1_batch_number_range = aggregated_op.l1_batch_range(); let op_type = aggregated_op.get_action_type(); diff --git a/core/lib/zksync_core/src/eth_sender/tests.rs b/core/lib/zksync_core/src/eth_sender/tests.rs index ce1977512a5..81837e58099 100644 --- a/core/lib/zksync_core/src/eth_sender/tests.rs +++ b/core/lib/zksync_core/src/eth_sender/tests.rs @@ -1,6 +1,8 @@ use assert_matches::assert_matches; use std::sync::{atomic::Ordering, Arc}; +use once_cell::sync::Lazy; + use zksync_config::{ configs::eth_sender::{ProofSendingMode, SenderConfig}, ContractsConfig, ETHSenderConfig, GasAdjusterConfig, @@ -29,8 +31,21 @@ use crate::l1_gas_price::GasAdjuster; // Alias to conveniently call static methods of ETHSender. type MockEthTxManager = EthTxManager, GasAdjuster>>; -const DUMMY_OPERATION: AggregatedOperation = - AggregatedOperation::Execute(L1BatchExecuteOperation { l1_batches: vec![] }); +static DUMMY_OPERATION: Lazy = Lazy::new(|| { + AggregatedOperation::Execute(L1BatchExecuteOperation { + l1_batches: vec![L1BatchWithMetadata { + header: L1BatchHeader::new( + L1BatchNumber(1), + 1, + Address::default(), + BaseSystemContractsHashes::default(), + ProtocolVersionId::latest(), + ), + metadata: default_l1_batch_metadata(), + factory_deps: Vec::new(), + }], + }) +}); #[derive(Debug)] struct EthSenderTester { @@ -143,6 +158,7 @@ async fn confirm_many() -> anyhow::Result<()> { .save_eth_tx( &mut tester.conn.access_storage().await.unwrap(), &DUMMY_OPERATION, + true, ) .await?; let hash = tester @@ -218,6 +234,7 @@ async fn resend_each_block() -> anyhow::Result<()> { .save_eth_tx( &mut tester.conn.access_storage().await.unwrap(), &DUMMY_OPERATION, + true, ) .await?; @@ -304,6 +321,7 @@ async fn dont_resend_already_mined() -> anyhow::Result<()> { .save_eth_tx( &mut tester.conn.access_storage().await.unwrap(), &DUMMY_OPERATION, + true, ) .await .unwrap(); @@ -375,6 +393,7 @@ async fn three_scenarios() -> anyhow::Result<()> { .save_eth_tx( &mut tester.conn.access_storage().await.unwrap(), &DUMMY_OPERATION, + true, ) .await .unwrap(); @@ -444,6 +463,7 @@ async fn failed_eth_tx() { .save_eth_tx( &mut tester.conn.access_storage().await.unwrap(), &DUMMY_OPERATION, + true, ) .await .unwrap(); @@ -925,6 +945,7 @@ async fn send_operation( .save_eth_tx( &mut tester.conn.access_storage().await.unwrap(), &aggregated_operation, + true, ) .await .unwrap(); diff --git a/core/lib/zksync_core/src/eth_sender/zksync_functions.rs b/core/lib/zksync_core/src/eth_sender/zksync_functions.rs index 1dadbd142df..ba20054b880 100644 --- a/core/lib/zksync_core/src/eth_sender/zksync_functions.rs +++ b/core/lib/zksync_core/src/eth_sender/zksync_functions.rs @@ -1,11 +1,17 @@ -use zksync_contracts::{multicall_contract, verifier_contract, zksync_contract}; +use zksync_contracts::{ + multicall_contract, verifier_contract, zksync_contract, PRE_BOOJUM_COMMIT_FUNCTION, + PRE_BOOJUM_EXECUTE_FUNCTION, PRE_BOOJUM_PROVE_FUNCTION, +}; use zksync_types::ethabi::{Contract, Function}; #[derive(Debug)] pub(super) struct ZkSyncFunctions { - pub(super) commit_blocks: Function, - pub(super) prove_blocks: Function, - pub(super) execute_blocks: Function, + pub(super) pre_boojum_commit: Function, + pub(super) post_boojum_commit: Option, + pub(super) pre_boojum_prove: Function, + pub(super) post_boojum_prove: Option, + pub(super) pre_boojum_execute: Function, + pub(super) post_boojum_execute: Option, pub(super) get_l2_bootloader_bytecode_hash: Function, pub(super) get_l2_default_account_bytecode_hash: Function, pub(super) get_verifier: Function, @@ -14,6 +20,7 @@ pub(super) struct ZkSyncFunctions { pub(super) verifier_contract: Contract, pub(super) get_verification_key: Function, + pub(super) verification_key_hash: Option, pub(super) multicall_contract: Contract, pub(super) aggregate3: Function, @@ -29,15 +36,26 @@ fn get_function(contract: &Contract, name: &str) -> Function { .unwrap_or_else(|| panic!("{} function entry not found", name)) } +fn get_optional_function(contract: &Contract, name: &str) -> Option { + contract + .functions + .get(name) + .cloned() + .map(|mut functions| functions.pop().unwrap()) +} + impl Default for ZkSyncFunctions { fn default() -> Self { let zksync_contract = zksync_contract(); let verifier_contract = verifier_contract(); let multicall_contract = multicall_contract(); - let commit_blocks = get_function(&zksync_contract, "commitBlocks"); - let prove_blocks = get_function(&zksync_contract, "proveBlocks"); - let execute_blocks = get_function(&zksync_contract, "executeBlocks"); + let pre_boojum_commit = PRE_BOOJUM_COMMIT_FUNCTION.clone(); + let post_boojum_commit = get_optional_function(&zksync_contract, "commitBatches"); + let pre_boojum_prove = PRE_BOOJUM_PROVE_FUNCTION.clone(); + let post_boojum_prove = get_optional_function(&zksync_contract, "proveBatches"); + let pre_boojum_execute = PRE_BOOJUM_EXECUTE_FUNCTION.clone(); + let post_boojum_execute = get_optional_function(&zksync_contract, "executeBatches"); let get_l2_bootloader_bytecode_hash = get_function(&zksync_contract, "getL2BootloaderBytecodeHash"); let get_l2_default_account_bytecode_hash = @@ -47,11 +65,16 @@ impl Default for ZkSyncFunctions { let get_protocol_version = get_function(&zksync_contract, "getProtocolVersion"); let get_verification_key = get_function(&verifier_contract, "get_verification_key"); let aggregate3 = get_function(&multicall_contract, "aggregate3"); + let verification_key_hash = + get_optional_function(&verifier_contract, "verificationKeyHash"); ZkSyncFunctions { - commit_blocks, - prove_blocks, - execute_blocks, + pre_boojum_commit, + post_boojum_commit, + pre_boojum_prove, + post_boojum_prove, + pre_boojum_execute, + post_boojum_execute, get_l2_bootloader_bytecode_hash, get_l2_default_account_bytecode_hash, get_verifier, @@ -59,6 +82,7 @@ impl Default for ZkSyncFunctions { get_protocol_version, verifier_contract, get_verification_key, + verification_key_hash, multicall_contract, aggregate3, } diff --git a/core/lib/zksync_core/src/lib.rs b/core/lib/zksync_core/src/lib.rs index b52a0784563..6e01de149df 100644 --- a/core/lib/zksync_core/src/lib.rs +++ b/core/lib/zksync_core/src/lib.rs @@ -8,9 +8,8 @@ use prometheus_exporter::PrometheusExporterConfig; use tokio::{sync::watch, task::JoinHandle}; use zksync_circuit_breaker::{ - facet_selectors::FacetSelectorsChecker, l1_txs::FailedL1TransactionChecker, - replication_lag::ReplicationLagChecker, CircuitBreaker, CircuitBreakerChecker, - CircuitBreakerError, + l1_txs::FailedL1TransactionChecker, replication_lag::ReplicationLagChecker, CircuitBreaker, + CircuitBreakerChecker, CircuitBreakerError, }; use zksync_config::configs::api::MerkleTreeApiConfig; use zksync_config::configs::{ @@ -43,7 +42,7 @@ use zksync_types::{ proofs::AggregationRound, protocol_version::{L1VerifierConfig, VerifierParams}, system_contracts::get_system_smart_contracts, - Address, L2ChainId, PackedEthSignature, ProtocolVersionId, + L2ChainId, PackedEthSignature, ProtocolVersionId, }; use zksync_verification_key_server::get_cached_commitments; @@ -319,16 +318,10 @@ pub async fn initialize_components( let circuit_breaker_config = CircuitBreakerConfig::from_env().context("CircuitBreakerConfig::from_env()")?; - let main_zksync_contract_address = contracts_config.diamond_proxy_addr; let circuit_breaker_checker = CircuitBreakerChecker::new( - circuit_breakers_for_components( - &components, - ð_client_config.web3_url, - &circuit_breaker_config, - main_zksync_contract_address, - ) - .await - .context("circuit_breakers_for_components")?, + circuit_breakers_for_components(&components, &circuit_breaker_config) + .await + .context("circuit_breakers_for_components")?, &circuit_breaker_config, ); circuit_breaker_checker.check().await.unwrap_or_else(|err| { @@ -507,6 +500,7 @@ pub async fn initialize_components( tracing::info!("initialized State Keeper in {elapsed:?}"); } + let main_zksync_contract_address = contracts_config.diamond_proxy_addr; if components.contains(&Component::EthWatcher) { let started_at = Instant::now(); tracing::info!("initializing ETH-Watcher"); @@ -786,7 +780,9 @@ async fn add_trees_to_task_futures( (false, true) => MetadataCalculatorModeConfig::Lightweight, (true, false) => match db_config.merkle_tree.mode { MerkleTreeMode::Lightweight => MetadataCalculatorModeConfig::Lightweight, - MerkleTreeMode::Full => MetadataCalculatorModeConfig::Full { store_factory }, + MerkleTreeMode::Full => MetadataCalculatorModeConfig::Full { + store_factory: Some(store_factory), + }, }, (false, false) => { anyhow::ensure!( @@ -1258,9 +1254,7 @@ async fn run_ws_api( async fn circuit_breakers_for_components( components: &[Component], - web3_url: &str, circuit_breaker_config: &CircuitBreakerConfig, - main_contract: Address, ) -> anyhow::Result>> { let mut circuit_breakers: Vec> = Vec::new(); @@ -1277,18 +1271,6 @@ async fn circuit_breakers_for_components( circuit_breakers.push(Box::new(FailedL1TransactionChecker { pool })); } - if components - .iter() - .any(|c| matches!(c, Component::EthTxAggregator | Component::EthTxManager)) - { - let eth_client = QueryClient::new(web3_url).unwrap(); - circuit_breakers.push(Box::new(FacetSelectorsChecker::new( - circuit_breaker_config, - eth_client, - main_contract, - ))); - } - if components.iter().any(|c| { matches!( c, diff --git a/core/lib/zksync_core/src/metadata_calculator/mod.rs b/core/lib/zksync_core/src/metadata_calculator/mod.rs index 956db116593..b278b094ac9 100644 --- a/core/lib/zksync_core/src/metadata_calculator/mod.rs +++ b/core/lib/zksync_core/src/metadata_calculator/mod.rs @@ -39,10 +39,10 @@ pub enum MetadataCalculatorModeConfig<'a> { /// In this mode, `MetadataCalculator` computes Merkle tree root hashes and some auxiliary information /// for L1 batches, but not witness inputs. Lightweight, - /// In this mode, `MetadataCalculator` will compute witness inputs for all storage operations - /// and put them into the object store as provided by `store_factory` (e.g., GCS). + /// In this mode, `MetadataCalculator` will compute commitments and witness inputs for all storage operations + /// and optionally put witness inputs into the object store as provided by `store_factory` (e.g., GCS). Full { - store_factory: &'a ObjectStoreFactory, + store_factory: Option<&'a ObjectStoreFactory>, }, } @@ -112,9 +112,10 @@ impl MetadataCalculator { let mode = config.mode.to_mode(); let object_store = match config.mode { - MetadataCalculatorModeConfig::Full { store_factory } => { - Some(store_factory.create_store().await) - } + MetadataCalculatorModeConfig::Full { store_factory } => match store_factory { + Some(f) => Some(f.create_store().await), + None => None, + }, MetadataCalculatorModeConfig::Lightweight => None, }; let updater = TreeUpdater::new(mode, config, object_store).await; diff --git a/core/lib/zksync_core/src/metadata_calculator/tests.rs b/core/lib/zksync_core/src/metadata_calculator/tests.rs index 490022b5d5d..dca85e1f7d2 100644 --- a/core/lib/zksync_core/src/metadata_calculator/tests.rs +++ b/core/lib/zksync_core/src/metadata_calculator/tests.rs @@ -372,7 +372,9 @@ pub(crate) async fn setup_calculator( ) -> (MetadataCalculator, Box) { let store_factory = &ObjectStoreFactory::mock(); let (db_config, operation_manager) = create_config(db_path); - let mode = MetadataCalculatorModeConfig::Full { store_factory }; + let mode = MetadataCalculatorModeConfig::Full { + store_factory: Some(store_factory), + }; let calculator = setup_calculator_with_options(&db_config, &operation_manager, pool, mode).await; (calculator, store_factory.create_store().await)