diff --git a/core/bin/block_reverter/src/main.rs b/core/bin/block_reverter/src/main.rs index 1be006cfb3d..f7a9ca9f9de 100644 --- a/core/bin/block_reverter/src/main.rs +++ b/core/bin/block_reverter/src/main.rs @@ -2,7 +2,10 @@ use std::path::PathBuf; use anyhow::Context as _; use clap::{Parser, Subcommand}; -use tokio::io::{self, AsyncReadExt}; +use tokio::{ + fs, + io::{self, AsyncReadExt}, +}; use zksync_block_reverter::{ eth_client::{ clients::{Client, PKSigningClient}, @@ -12,8 +15,8 @@ use zksync_block_reverter::{ }; use zksync_config::{ configs::{ - chain::NetworkConfig, wallets::Wallets, DatabaseSecrets, GeneralConfig, L1Secrets, - ObservabilityConfig, + chain::NetworkConfig, wallets::Wallets, BasicWitnessInputProducerConfig, DatabaseSecrets, + GeneralConfig, L1Secrets, ObservabilityConfig, ProtectiveReadsWriterConfig, }, ContractsConfig, DBConfig, EthConfig, GenesisConfig, PostgresConfig, }; @@ -88,6 +91,9 @@ enum Command { /// Flag that specifies if RocksDB with state keeper cache should be rolled back. #[arg(long)] rollback_sk_cache: bool, + /// Flag that specifies if RocksDBs with vm runners' caches should be rolled back. + #[arg(long)] + rollback_vm_runners_cache: bool, /// Flag that specifies if snapshot files in GCS should be rolled back. #[arg(long, requires = "rollback_postgres")] rollback_snapshots: bool, @@ -160,6 +166,22 @@ async fn main() -> anyhow::Result<()> { .context("Failed to find eth config")?, None => DBConfig::from_env().context("DBConfig::from_env()")?, }; + let protective_reads_writer_config = match &general_config { + Some(general_config) => general_config + .protective_reads_writer_config + .clone() + .context("Failed to find eth config")?, + None => ProtectiveReadsWriterConfig::from_env() + .context("ProtectiveReadsWriterConfig::from_env()")?, + }; + let basic_witness_input_producer_config = match &general_config { + Some(general_config) => general_config + .basic_witness_input_producer_config + .clone() + .context("Failed to find eth config")?, + None => BasicWitnessInputProducerConfig::from_env() + .context("BasicWitnessInputProducerConfig::from_env()")?, + }; let contracts = match opts.contracts_config_path { Some(path) => { let yaml = @@ -294,6 +316,7 @@ async fn main() -> anyhow::Result<()> { rollback_postgres, rollback_tree, rollback_sk_cache, + rollback_vm_runners_cache, rollback_snapshots, allow_executed_block_reversion, } => { @@ -341,8 +364,37 @@ async fn main() -> anyhow::Result<()> { block_reverter.enable_rolling_back_merkle_tree(db_config.merkle_tree.path); } if rollback_sk_cache { - block_reverter - .enable_rolling_back_state_keeper_cache(db_config.state_keeper_db_path); + block_reverter.add_rocksdb_storage_path_to_rollback(db_config.state_keeper_db_path); + } + + if rollback_vm_runners_cache { + let cache_exists = fs::try_exists(&protective_reads_writer_config.db_path) + .await + .with_context(|| { + format!( + "cannot check whether storage cache path `{}` exists", + protective_reads_writer_config.db_path + ) + })?; + if cache_exists { + block_reverter.add_rocksdb_storage_path_to_rollback( + protective_reads_writer_config.db_path, + ); + } + + let cache_exists = fs::try_exists(&basic_witness_input_producer_config.db_path) + .await + .with_context(|| { + format!( + "cannot check whether storage cache path `{}` exists", + basic_witness_input_producer_config.db_path + ) + })?; + if cache_exists { + block_reverter.add_rocksdb_storage_path_to_rollback( + basic_witness_input_producer_config.db_path, + ); + } } block_reverter diff --git a/core/bin/external_node/src/main.rs b/core/bin/external_node/src/main.rs index 1060938ef3b..2a219222d1b 100644 --- a/core/bin/external_node/src/main.rs +++ b/core/bin/external_node/src/main.rs @@ -1038,7 +1038,7 @@ async fn run_node( .allow_rolling_back_executed_batches() .enable_rolling_back_postgres() .enable_rolling_back_merkle_tree(config.required.merkle_tree_path.clone()) - .enable_rolling_back_state_keeper_cache(config.required.state_cache_path.clone()); + .add_rocksdb_storage_path_to_rollback(config.required.state_cache_path.clone()); let mut reorg_detector = ReorgDetector::new(main_node_client.clone(), connection_pool.clone()); // We're checking for the reorg in the beginning because we expect that if reorg is detected during diff --git a/core/bin/zksync_server/src/main.rs b/core/bin/zksync_server/src/main.rs index ea732a5c143..a7fee61c8a0 100644 --- a/core/bin/zksync_server/src/main.rs +++ b/core/bin/zksync_server/src/main.rs @@ -44,7 +44,7 @@ struct Cli { /// Comma-separated list of components to launch. #[arg( long, - default_value = "api,tree,eth,state_keeper,housekeeper,tee_verifier_input_producer,commitment_generator,da_dispatcher" + default_value = "api,tree,eth,state_keeper,housekeeper,tee_verifier_input_producer,commitment_generator,da_dispatcher,vm_runner_protective_reads" )] components: ComponentsToRun, /// Path to the yaml config. If set, it will be used instead of env vars. diff --git a/core/lib/dal/.sqlx/query-ce0b54e6f5a805144ace2f0f9fba6b3e9bca996fa633005bc8eeb77341c8c893.json b/core/lib/dal/.sqlx/query-ce0b54e6f5a805144ace2f0f9fba6b3e9bca996fa633005bc8eeb77341c8c893.json new file mode 100644 index 00000000000..77f5849bac6 --- /dev/null +++ b/core/lib/dal/.sqlx/query-ce0b54e6f5a805144ace2f0f9fba6b3e9bca996fa633005bc8eeb77341c8c893.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "\n DELETE FROM vm_runner_bwip\n WHERE\n l1_batch_number > $1\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [] + }, + "hash": "ce0b54e6f5a805144ace2f0f9fba6b3e9bca996fa633005bc8eeb77341c8c893" +} diff --git a/core/lib/dal/src/vm_runner_dal.rs b/core/lib/dal/src/vm_runner_dal.rs index 64e37892657..37ef1517d54 100644 --- a/core/lib/dal/src/vm_runner_dal.rs +++ b/core/lib/dal/src/vm_runner_dal.rs @@ -146,6 +146,30 @@ impl VmRunnerDal<'_, '_> { Ok(()) } + pub async fn delete_bwip_data(&mut self, last_batch_to_keep: L1BatchNumber) -> DalResult<()> { + self.delete_bwip_data_inner(Some(last_batch_to_keep)).await + } + + async fn delete_bwip_data_inner( + &mut self, + last_batch_to_keep: Option, + ) -> DalResult<()> { + let l1_batch_number = last_batch_to_keep.map_or(-1, |number| i64::from(number.0)); + sqlx::query!( + r#" + DELETE FROM vm_runner_bwip + WHERE + l1_batch_number > $1 + "#, + l1_batch_number + ) + .instrument("delete_bwip_data") + .with_arg("l1_batch_number", &l1_batch_number) + .execute(self.storage) + .await?; + Ok(()) + } + pub async fn get_bwip_latest_processed_batch(&mut self) -> DalResult> { let row = sqlx::query!( r#" diff --git a/core/lib/env_config/src/chain.rs b/core/lib/env_config/src/chain.rs index 441fcc4159c..f62f8b859ca 100644 --- a/core/lib/env_config/src/chain.rs +++ b/core/lib/env_config/src/chain.rs @@ -136,6 +136,7 @@ mod tests { CHAIN_STATE_KEEPER_SAVE_CALL_TRACES="false" CHAIN_STATE_KEEPER_BOOTLOADER_HASH=0x010007ede999d096c84553fb514d3d6ca76fbf39789dda76bfeda9f3ae06236e CHAIN_STATE_KEEPER_DEFAULT_AA_HASH=0x0100055b041eb28aff6e3a6e0f37c31fd053fc9ef142683b05e5f0aee6934066 + CHAIN_STATE_KEEPER_PROTECTIVE_READS_PERSISTENCE_ENABLED=true CHAIN_STATE_KEEPER_L1_BATCH_COMMIT_DATA_GENERATOR_MODE="{l1_batch_commit_data_generator_mode}" "# ) diff --git a/core/node/block_reverter/src/lib.rs b/core/node/block_reverter/src/lib.rs index da1bf091ea3..466b5f3c69f 100644 --- a/core/node/block_reverter/src/lib.rs +++ b/core/node/block_reverter/src/lib.rs @@ -79,7 +79,7 @@ pub enum NodeRole { /// /// - State of the Postgres database /// - State of the Merkle tree -/// - State of the state keeper cache +/// - State of the RocksDB storage cache /// - Object store for protocol snapshots /// /// In addition, it can revert the state of the Ethereum contract (if the reverted L1 batches were committed). @@ -91,7 +91,7 @@ pub struct BlockReverter { allow_rolling_back_executed_batches: bool, connection_pool: ConnectionPool, should_roll_back_postgres: bool, - state_keeper_cache_path: Option, + storage_cache_paths: Vec, merkle_tree_path: Option, snapshots_object_store: Option>, } @@ -103,7 +103,7 @@ impl BlockReverter { allow_rolling_back_executed_batches: false, connection_pool, should_roll_back_postgres: false, - state_keeper_cache_path: None, + storage_cache_paths: Vec::new(), merkle_tree_path: None, snapshots_object_store: None, } @@ -129,8 +129,8 @@ impl BlockReverter { self } - pub fn enable_rolling_back_state_keeper_cache(&mut self, path: String) -> &mut Self { - self.state_keeper_cache_path = Some(path); + pub fn add_rocksdb_storage_path_to_rollback(&mut self, path: String) -> &mut Self { + self.storage_cache_paths.push(path); self } @@ -221,19 +221,15 @@ impl BlockReverter { } } - if let Some(state_keeper_cache_path) = &self.state_keeper_cache_path { - let sk_cache_exists = fs::try_exists(state_keeper_cache_path) - .await - .with_context(|| { - format!( - "cannot check whether state keeper cache path `{state_keeper_cache_path}` exists" - ) - })?; + for storage_cache_path in &self.storage_cache_paths { + let sk_cache_exists = fs::try_exists(storage_cache_path).await.with_context(|| { + format!("cannot check whether storage cache path `{storage_cache_path}` exists") + })?; anyhow::ensure!( sk_cache_exists, - "Path with state keeper cache DB doesn't exist at `{state_keeper_cache_path}`" + "Path with storage cache DB doesn't exist at `{storage_cache_path}`" ); - self.roll_back_state_keeper_cache(last_l1_batch_to_keep, state_keeper_cache_path) + self.roll_back_storage_cache(last_l1_batch_to_keep, storage_cache_path) .await?; } Ok(()) @@ -266,26 +262,26 @@ impl BlockReverter { Ok(()) } - /// Rolls back changes in the state keeper cache. - async fn roll_back_state_keeper_cache( + /// Rolls back changes in the storage cache. + async fn roll_back_storage_cache( &self, last_l1_batch_to_keep: L1BatchNumber, - state_keeper_cache_path: &str, + storage_cache_path: &str, ) -> anyhow::Result<()> { - tracing::info!("Opening DB with state keeper cache at `{state_keeper_cache_path}`"); - let sk_cache = RocksdbStorage::builder(state_keeper_cache_path.as_ref()) + tracing::info!("Opening DB with storage cache at `{storage_cache_path}`"); + let sk_cache = RocksdbStorage::builder(storage_cache_path.as_ref()) .await - .context("failed initializing state keeper cache")?; + .context("failed initializing storage cache")?; if sk_cache.l1_batch_number().await > Some(last_l1_batch_to_keep + 1) { let mut storage = self.connection_pool.connection().await?; - tracing::info!("Rolling back state keeper cache"); + tracing::info!("Rolling back storage cache"); sk_cache .roll_back(&mut storage, last_l1_batch_to_keep) .await - .context("failed rolling back state keeper cache")?; + .context("failed rolling back storage cache")?; } else { - tracing::info!("Nothing to roll back in state keeper cache"); + tracing::info!("Nothing to roll back in storage cache"); } Ok(()) } @@ -356,10 +352,21 @@ impl BlockReverter { .blocks_dal() .delete_l1_batches(last_l1_batch_to_keep) .await?; + tracing::info!("Rolling back initial writes"); transaction .blocks_dal() .delete_initial_writes(last_l1_batch_to_keep) .await?; + tracing::info!("Rolling back vm_runner_protective_reads"); + transaction + .vm_runner_dal() + .delete_protective_reads(last_l1_batch_to_keep) + .await?; + tracing::info!("Rolling back vm_runner_bwip"); + transaction + .vm_runner_dal() + .delete_bwip_data(last_l1_batch_to_keep) + .await?; tracing::info!("Rolling back L2 blocks"); transaction .blocks_dal() diff --git a/core/node/block_reverter/src/tests.rs b/core/node/block_reverter/src/tests.rs index 161ac3ed00c..1881de1e2cb 100644 --- a/core/node/block_reverter/src/tests.rs +++ b/core/node/block_reverter/src/tests.rs @@ -146,7 +146,7 @@ async fn block_reverter_basics(sync_merkle_tree: bool) { BlockReverter::new(NodeRole::External, pool.clone()) .enable_rolling_back_postgres() .enable_rolling_back_merkle_tree(merkle_tree_path.to_str().unwrap().to_owned()) - .enable_rolling_back_state_keeper_cache(sk_cache_path.to_str().unwrap().to_owned()) + .add_rocksdb_storage_path_to_rollback(sk_cache_path.to_str().unwrap().to_owned()) .roll_back(L1BatchNumber(5)) .await .unwrap(); diff --git a/core/node/node_framework/src/implementations/layers/block_reverter.rs b/core/node/node_framework/src/implementations/layers/block_reverter.rs index 4cfe4212e4d..8f03911bfa1 100644 --- a/core/node/node_framework/src/implementations/layers/block_reverter.rs +++ b/core/node/node_framework/src/implementations/layers/block_reverter.rs @@ -85,7 +85,7 @@ impl WiringLayer for BlockReverterLayer { block_reverter.enable_rolling_back_merkle_tree(path); } if let Some(path) = self.state_keeper_cache_path { - block_reverter.enable_rolling_back_state_keeper_cache(path); + block_reverter.add_rocksdb_storage_path_to_rollback(path); } Ok(Output { diff --git a/core/tests/revert-test/tests/revert-and-restart-en.test.ts b/core/tests/revert-test/tests/revert-and-restart-en.test.ts index 2fee9c7be88..952f8865f84 100644 --- a/core/tests/revert-test/tests/revert-and-restart-en.test.ts +++ b/core/tests/revert-test/tests/revert-and-restart-en.test.ts @@ -180,7 +180,7 @@ class MainNode { replaceAggregatedBlockExecuteDeadline(pathToHome, fileConfig, enableExecute ? 1 : 10000); } - let components = 'api,tree,eth,state_keeper,commitment_generator,da_dispatcher'; + let components = 'api,tree,eth,state_keeper,commitment_generator,da_dispatcher,vm_runner_protective_reads'; if (enableConsensus) { components += ',consensus'; } @@ -445,7 +445,8 @@ describe('Block reverting test', function () { values.lastExecutedL1BatchNumber.toString(), '--rollback-postgres', '--rollback-tree', - '--rollback-sk-cache' + '--rollback-sk-cache', + '--rollback-vm-runners-cache' ]); console.log('Start main node.'); diff --git a/core/tests/revert-test/tests/revert-and-restart.test.ts b/core/tests/revert-test/tests/revert-and-restart.test.ts index 25ed90ea72e..bea912d3305 100644 --- a/core/tests/revert-test/tests/revert-and-restart.test.ts +++ b/core/tests/revert-test/tests/revert-and-restart.test.ts @@ -80,7 +80,7 @@ describe('Block reverting test', function () { const pathToHome = path.join(__dirname, '../../../..'); const enableConsensus = process.env.ENABLE_CONSENSUS == 'true'; - let components = 'api,tree,eth,state_keeper,commitment_generator,da_dispatcher'; + let components = 'api,tree,eth,state_keeper,commitment_generator,da_dispatcher,vm_runner_protective_reads'; if (enableConsensus) { components += ',consensus'; } @@ -237,7 +237,7 @@ describe('Block reverting test', function () { console.log('Rolling back DB..'); await utils.spawn( - `cd ${pathToHome} && cargo run --bin block_reverter --release -- rollback-db --l1-batch-number ${lastL1BatchNumber} --rollback-postgres --rollback-tree --rollback-sk-cache ${fileConfigFlags}` + `cd ${pathToHome} && cargo run --bin block_reverter --release -- rollback-db --l1-batch-number ${lastL1BatchNumber} --rollback-postgres --rollback-tree --rollback-sk-cache --rollback-vm-runners-cache ${fileConfigFlags}` ); let blocksCommitted = await mainContract.getTotalBatchesCommitted(); diff --git a/core/tests/ts-integration/tests/fees.test.ts b/core/tests/ts-integration/tests/fees.test.ts index 522a9d8fd5b..8d5b7a23a94 100644 --- a/core/tests/ts-integration/tests/fees.test.ts +++ b/core/tests/ts-integration/tests/fees.test.ts @@ -305,7 +305,7 @@ async function setInternalL1GasPrice( } catch (_) {} // Run server in background. - let command = 'zk server --components api,tree,eth,state_keeper,da_dispatcher'; + let command = 'zk server --components api,tree,eth,state_keeper,da_dispatcher,vm_runner_protective_reads'; command = `DATABASE_MERKLE_TREE_MODE=full ${command}`; if (newPubdataPrice) { diff --git a/core/tests/upgrade-test/tests/upgrade.test.ts b/core/tests/upgrade-test/tests/upgrade.test.ts index deeb69462fb..1af5e0727db 100644 --- a/core/tests/upgrade-test/tests/upgrade.test.ts +++ b/core/tests/upgrade-test/tests/upgrade.test.ts @@ -24,7 +24,15 @@ const fileConfig = shouldLoadConfigFromFile(); const contracts: Contracts = initContracts(pathToHome, fileConfig.loadFromFile); -let serverComponents = ['api', 'tree', 'eth', 'state_keeper', 'commitment_generator', 'da_dispatcher']; +let serverComponents = [ + 'api', + 'tree', + 'eth', + 'state_keeper', + 'commitment_generator', + 'da_dispatcher', + 'vm_runner_protective_reads' +]; const depositAmount = ethers.parseEther('0.001'); diff --git a/etc/env/base/chain.toml b/etc/env/base/chain.toml index 0cb8213119b..6cfacb3c72c 100644 --- a/etc/env/base/chain.toml +++ b/etc/env/base/chain.toml @@ -93,6 +93,8 @@ save_call_traces = true bootloader_hash = "0x010008e742608b21bf7eb23c1a9d0602047e3618b464c9b59c0fba3b3d7ab66e" default_aa_hash = "0x01000563374c277a2c1e34659a2a1e87371bb6d852ce142022d497bfb50b9e32" +protective_reads_persistence_enabled = false + [chain.operations_manager] # Sleep time when there is no new input data delay_interval = 100 diff --git a/etc/env/file_based/general.yaml b/etc/env/file_based/general.yaml index b6ba932d63c..300138e9a86 100644 --- a/etc/env/file_based/general.yaml +++ b/etc/env/file_based/general.yaml @@ -66,7 +66,7 @@ state_keeper: validation_computational_gas_limit: 300000 save_call_traces: true max_circuits_per_batch: 31100 - protective_reads_persistence_enabled: true + protective_reads_persistence_enabled: false mempool: delay_interval: 100 sync_interval_ms: 10