Skip to content

Commit

Permalink
chore: Disable protective_reads_persistence in SK in default local co…
Browse files Browse the repository at this point in the history
…nfig (matter-labs#2573)

## What ❔

Disable protective_reads_persistence in SK in default local config

## Why ❔

It's not required

## Checklist

<!-- Check your PR fulfills the following items. -->
<!-- For draft PRs check the boxes as you complete them. -->

- [ ] PR title corresponds to the body of PR (we generate changelog
entries from PRs).
- [ ] Tests for the changes have been added / updated.
- [ ] Documentation comments have been added / updated.
- [ ] Code has been formatted via `zk fmt` and `zk lint`.
  • Loading branch information
perekopskiy authored Aug 5, 2024
1 parent e44bfff commit 0891244
Show file tree
Hide file tree
Showing 15 changed files with 149 additions and 40 deletions.
62 changes: 57 additions & 5 deletions core/bin/block_reverter/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,10 @@ use std::path::PathBuf;

use anyhow::Context as _;
use clap::{Parser, Subcommand};
use tokio::io::{self, AsyncReadExt};
use tokio::{
fs,
io::{self, AsyncReadExt},
};
use zksync_block_reverter::{
eth_client::{
clients::{Client, PKSigningClient},
Expand All @@ -12,8 +15,8 @@ use zksync_block_reverter::{
};
use zksync_config::{
configs::{
chain::NetworkConfig, wallets::Wallets, DatabaseSecrets, GeneralConfig, L1Secrets,
ObservabilityConfig,
chain::NetworkConfig, wallets::Wallets, BasicWitnessInputProducerConfig, DatabaseSecrets,
GeneralConfig, L1Secrets, ObservabilityConfig, ProtectiveReadsWriterConfig,
},
ContractsConfig, DBConfig, EthConfig, GenesisConfig, PostgresConfig,
};
Expand Down Expand Up @@ -88,6 +91,9 @@ enum Command {
/// Flag that specifies if RocksDB with state keeper cache should be rolled back.
#[arg(long)]
rollback_sk_cache: bool,
/// Flag that specifies if RocksDBs with vm runners' caches should be rolled back.
#[arg(long)]
rollback_vm_runners_cache: bool,
/// Flag that specifies if snapshot files in GCS should be rolled back.
#[arg(long, requires = "rollback_postgres")]
rollback_snapshots: bool,
Expand Down Expand Up @@ -160,6 +166,22 @@ async fn main() -> anyhow::Result<()> {
.context("Failed to find eth config")?,
None => DBConfig::from_env().context("DBConfig::from_env()")?,
};
let protective_reads_writer_config = match &general_config {
Some(general_config) => general_config
.protective_reads_writer_config
.clone()
.context("Failed to find eth config")?,
None => ProtectiveReadsWriterConfig::from_env()
.context("ProtectiveReadsWriterConfig::from_env()")?,
};
let basic_witness_input_producer_config = match &general_config {
Some(general_config) => general_config
.basic_witness_input_producer_config
.clone()
.context("Failed to find eth config")?,
None => BasicWitnessInputProducerConfig::from_env()
.context("BasicWitnessInputProducerConfig::from_env()")?,
};
let contracts = match opts.contracts_config_path {
Some(path) => {
let yaml =
Expand Down Expand Up @@ -294,6 +316,7 @@ async fn main() -> anyhow::Result<()> {
rollback_postgres,
rollback_tree,
rollback_sk_cache,
rollback_vm_runners_cache,
rollback_snapshots,
allow_executed_block_reversion,
} => {
Expand Down Expand Up @@ -341,8 +364,37 @@ async fn main() -> anyhow::Result<()> {
block_reverter.enable_rolling_back_merkle_tree(db_config.merkle_tree.path);
}
if rollback_sk_cache {
block_reverter
.enable_rolling_back_state_keeper_cache(db_config.state_keeper_db_path);
block_reverter.add_rocksdb_storage_path_to_rollback(db_config.state_keeper_db_path);
}

if rollback_vm_runners_cache {
let cache_exists = fs::try_exists(&protective_reads_writer_config.db_path)
.await
.with_context(|| {
format!(
"cannot check whether storage cache path `{}` exists",
protective_reads_writer_config.db_path
)
})?;
if cache_exists {
block_reverter.add_rocksdb_storage_path_to_rollback(
protective_reads_writer_config.db_path,
);
}

let cache_exists = fs::try_exists(&basic_witness_input_producer_config.db_path)
.await
.with_context(|| {
format!(
"cannot check whether storage cache path `{}` exists",
basic_witness_input_producer_config.db_path
)
})?;
if cache_exists {
block_reverter.add_rocksdb_storage_path_to_rollback(
basic_witness_input_producer_config.db_path,
);
}
}

block_reverter
Expand Down
2 changes: 1 addition & 1 deletion core/bin/external_node/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1038,7 +1038,7 @@ async fn run_node(
.allow_rolling_back_executed_batches()
.enable_rolling_back_postgres()
.enable_rolling_back_merkle_tree(config.required.merkle_tree_path.clone())
.enable_rolling_back_state_keeper_cache(config.required.state_cache_path.clone());
.add_rocksdb_storage_path_to_rollback(config.required.state_cache_path.clone());

let mut reorg_detector = ReorgDetector::new(main_node_client.clone(), connection_pool.clone());
// We're checking for the reorg in the beginning because we expect that if reorg is detected during
Expand Down
2 changes: 1 addition & 1 deletion core/bin/zksync_server/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ struct Cli {
/// Comma-separated list of components to launch.
#[arg(
long,
default_value = "api,tree,eth,state_keeper,housekeeper,tee_verifier_input_producer,commitment_generator,da_dispatcher"
default_value = "api,tree,eth,state_keeper,housekeeper,tee_verifier_input_producer,commitment_generator,da_dispatcher,vm_runner_protective_reads"
)]
components: ComponentsToRun,
/// Path to the yaml config. If set, it will be used instead of env vars.
Expand Down

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

24 changes: 24 additions & 0 deletions core/lib/dal/src/vm_runner_dal.rs
Original file line number Diff line number Diff line change
Expand Up @@ -146,6 +146,30 @@ impl VmRunnerDal<'_, '_> {
Ok(())
}

pub async fn delete_bwip_data(&mut self, last_batch_to_keep: L1BatchNumber) -> DalResult<()> {
self.delete_bwip_data_inner(Some(last_batch_to_keep)).await
}

async fn delete_bwip_data_inner(
&mut self,
last_batch_to_keep: Option<L1BatchNumber>,
) -> DalResult<()> {
let l1_batch_number = last_batch_to_keep.map_or(-1, |number| i64::from(number.0));
sqlx::query!(
r#"
DELETE FROM vm_runner_bwip
WHERE
l1_batch_number > $1
"#,
l1_batch_number
)
.instrument("delete_bwip_data")
.with_arg("l1_batch_number", &l1_batch_number)
.execute(self.storage)
.await?;
Ok(())
}

pub async fn get_bwip_latest_processed_batch(&mut self) -> DalResult<Option<L1BatchNumber>> {
let row = sqlx::query!(
r#"
Expand Down
1 change: 1 addition & 0 deletions core/lib/env_config/src/chain.rs
Original file line number Diff line number Diff line change
Expand Up @@ -136,6 +136,7 @@ mod tests {
CHAIN_STATE_KEEPER_SAVE_CALL_TRACES="false"
CHAIN_STATE_KEEPER_BOOTLOADER_HASH=0x010007ede999d096c84553fb514d3d6ca76fbf39789dda76bfeda9f3ae06236e
CHAIN_STATE_KEEPER_DEFAULT_AA_HASH=0x0100055b041eb28aff6e3a6e0f37c31fd053fc9ef142683b05e5f0aee6934066
CHAIN_STATE_KEEPER_PROTECTIVE_READS_PERSISTENCE_ENABLED=true
CHAIN_STATE_KEEPER_L1_BATCH_COMMIT_DATA_GENERATOR_MODE="{l1_batch_commit_data_generator_mode}"
"#
)
Expand Down
55 changes: 31 additions & 24 deletions core/node/block_reverter/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ pub enum NodeRole {
///
/// - State of the Postgres database
/// - State of the Merkle tree
/// - State of the state keeper cache
/// - State of the RocksDB storage cache
/// - Object store for protocol snapshots
///
/// In addition, it can revert the state of the Ethereum contract (if the reverted L1 batches were committed).
Expand All @@ -91,7 +91,7 @@ pub struct BlockReverter {
allow_rolling_back_executed_batches: bool,
connection_pool: ConnectionPool<Core>,
should_roll_back_postgres: bool,
state_keeper_cache_path: Option<String>,
storage_cache_paths: Vec<String>,
merkle_tree_path: Option<String>,
snapshots_object_store: Option<Arc<dyn ObjectStore>>,
}
Expand All @@ -103,7 +103,7 @@ impl BlockReverter {
allow_rolling_back_executed_batches: false,
connection_pool,
should_roll_back_postgres: false,
state_keeper_cache_path: None,
storage_cache_paths: Vec::new(),
merkle_tree_path: None,
snapshots_object_store: None,
}
Expand All @@ -129,8 +129,8 @@ impl BlockReverter {
self
}

pub fn enable_rolling_back_state_keeper_cache(&mut self, path: String) -> &mut Self {
self.state_keeper_cache_path = Some(path);
pub fn add_rocksdb_storage_path_to_rollback(&mut self, path: String) -> &mut Self {
self.storage_cache_paths.push(path);
self
}

Expand Down Expand Up @@ -221,19 +221,15 @@ impl BlockReverter {
}
}

if let Some(state_keeper_cache_path) = &self.state_keeper_cache_path {
let sk_cache_exists = fs::try_exists(state_keeper_cache_path)
.await
.with_context(|| {
format!(
"cannot check whether state keeper cache path `{state_keeper_cache_path}` exists"
)
})?;
for storage_cache_path in &self.storage_cache_paths {
let sk_cache_exists = fs::try_exists(storage_cache_path).await.with_context(|| {
format!("cannot check whether storage cache path `{storage_cache_path}` exists")
})?;
anyhow::ensure!(
sk_cache_exists,
"Path with state keeper cache DB doesn't exist at `{state_keeper_cache_path}`"
"Path with storage cache DB doesn't exist at `{storage_cache_path}`"
);
self.roll_back_state_keeper_cache(last_l1_batch_to_keep, state_keeper_cache_path)
self.roll_back_storage_cache(last_l1_batch_to_keep, storage_cache_path)
.await?;
}
Ok(())
Expand Down Expand Up @@ -266,26 +262,26 @@ impl BlockReverter {
Ok(())
}

/// Rolls back changes in the state keeper cache.
async fn roll_back_state_keeper_cache(
/// Rolls back changes in the storage cache.
async fn roll_back_storage_cache(
&self,
last_l1_batch_to_keep: L1BatchNumber,
state_keeper_cache_path: &str,
storage_cache_path: &str,
) -> anyhow::Result<()> {
tracing::info!("Opening DB with state keeper cache at `{state_keeper_cache_path}`");
let sk_cache = RocksdbStorage::builder(state_keeper_cache_path.as_ref())
tracing::info!("Opening DB with storage cache at `{storage_cache_path}`");
let sk_cache = RocksdbStorage::builder(storage_cache_path.as_ref())
.await
.context("failed initializing state keeper cache")?;
.context("failed initializing storage cache")?;

if sk_cache.l1_batch_number().await > Some(last_l1_batch_to_keep + 1) {
let mut storage = self.connection_pool.connection().await?;
tracing::info!("Rolling back state keeper cache");
tracing::info!("Rolling back storage cache");
sk_cache
.roll_back(&mut storage, last_l1_batch_to_keep)
.await
.context("failed rolling back state keeper cache")?;
.context("failed rolling back storage cache")?;
} else {
tracing::info!("Nothing to roll back in state keeper cache");
tracing::info!("Nothing to roll back in storage cache");
}
Ok(())
}
Expand Down Expand Up @@ -356,10 +352,21 @@ impl BlockReverter {
.blocks_dal()
.delete_l1_batches(last_l1_batch_to_keep)
.await?;
tracing::info!("Rolling back initial writes");
transaction
.blocks_dal()
.delete_initial_writes(last_l1_batch_to_keep)
.await?;
tracing::info!("Rolling back vm_runner_protective_reads");
transaction
.vm_runner_dal()
.delete_protective_reads(last_l1_batch_to_keep)
.await?;
tracing::info!("Rolling back vm_runner_bwip");
transaction
.vm_runner_dal()
.delete_bwip_data(last_l1_batch_to_keep)
.await?;
tracing::info!("Rolling back L2 blocks");
transaction
.blocks_dal()
Expand Down
2 changes: 1 addition & 1 deletion core/node/block_reverter/src/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@ async fn block_reverter_basics(sync_merkle_tree: bool) {
BlockReverter::new(NodeRole::External, pool.clone())
.enable_rolling_back_postgres()
.enable_rolling_back_merkle_tree(merkle_tree_path.to_str().unwrap().to_owned())
.enable_rolling_back_state_keeper_cache(sk_cache_path.to_str().unwrap().to_owned())
.add_rocksdb_storage_path_to_rollback(sk_cache_path.to_str().unwrap().to_owned())
.roll_back(L1BatchNumber(5))
.await
.unwrap();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ impl WiringLayer for BlockReverterLayer {
block_reverter.enable_rolling_back_merkle_tree(path);
}
if let Some(path) = self.state_keeper_cache_path {
block_reverter.enable_rolling_back_state_keeper_cache(path);
block_reverter.add_rocksdb_storage_path_to_rollback(path);
}

Ok(Output {
Expand Down
5 changes: 3 additions & 2 deletions core/tests/revert-test/tests/revert-and-restart-en.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -180,7 +180,7 @@ class MainNode {
replaceAggregatedBlockExecuteDeadline(pathToHome, fileConfig, enableExecute ? 1 : 10000);
}

let components = 'api,tree,eth,state_keeper,commitment_generator,da_dispatcher';
let components = 'api,tree,eth,state_keeper,commitment_generator,da_dispatcher,vm_runner_protective_reads';
if (enableConsensus) {
components += ',consensus';
}
Expand Down Expand Up @@ -445,7 +445,8 @@ describe('Block reverting test', function () {
values.lastExecutedL1BatchNumber.toString(),
'--rollback-postgres',
'--rollback-tree',
'--rollback-sk-cache'
'--rollback-sk-cache',
'--rollback-vm-runners-cache'
]);

console.log('Start main node.');
Expand Down
4 changes: 2 additions & 2 deletions core/tests/revert-test/tests/revert-and-restart.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ describe('Block reverting test', function () {
const pathToHome = path.join(__dirname, '../../../..');

const enableConsensus = process.env.ENABLE_CONSENSUS == 'true';
let components = 'api,tree,eth,state_keeper,commitment_generator,da_dispatcher';
let components = 'api,tree,eth,state_keeper,commitment_generator,da_dispatcher,vm_runner_protective_reads';
if (enableConsensus) {
components += ',consensus';
}
Expand Down Expand Up @@ -237,7 +237,7 @@ describe('Block reverting test', function () {

console.log('Rolling back DB..');
await utils.spawn(
`cd ${pathToHome} && cargo run --bin block_reverter --release -- rollback-db --l1-batch-number ${lastL1BatchNumber} --rollback-postgres --rollback-tree --rollback-sk-cache ${fileConfigFlags}`
`cd ${pathToHome} && cargo run --bin block_reverter --release -- rollback-db --l1-batch-number ${lastL1BatchNumber} --rollback-postgres --rollback-tree --rollback-sk-cache --rollback-vm-runners-cache ${fileConfigFlags}`
);

let blocksCommitted = await mainContract.getTotalBatchesCommitted();
Expand Down
2 changes: 1 addition & 1 deletion core/tests/ts-integration/tests/fees.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -305,7 +305,7 @@ async function setInternalL1GasPrice(
} catch (_) {}

// Run server in background.
let command = 'zk server --components api,tree,eth,state_keeper,da_dispatcher';
let command = 'zk server --components api,tree,eth,state_keeper,da_dispatcher,vm_runner_protective_reads';
command = `DATABASE_MERKLE_TREE_MODE=full ${command}`;

if (newPubdataPrice) {
Expand Down
10 changes: 9 additions & 1 deletion core/tests/upgrade-test/tests/upgrade.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,15 @@ const fileConfig = shouldLoadConfigFromFile();

const contracts: Contracts = initContracts(pathToHome, fileConfig.loadFromFile);

let serverComponents = ['api', 'tree', 'eth', 'state_keeper', 'commitment_generator', 'da_dispatcher'];
let serverComponents = [
'api',
'tree',
'eth',
'state_keeper',
'commitment_generator',
'da_dispatcher',
'vm_runner_protective_reads'
];

const depositAmount = ethers.parseEther('0.001');

Expand Down
2 changes: 2 additions & 0 deletions etc/env/base/chain.toml
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,8 @@ save_call_traces = true
bootloader_hash = "0x010008e742608b21bf7eb23c1a9d0602047e3618b464c9b59c0fba3b3d7ab66e"
default_aa_hash = "0x01000563374c277a2c1e34659a2a1e87371bb6d852ce142022d497bfb50b9e32"

protective_reads_persistence_enabled = false

[chain.operations_manager]
# Sleep time when there is no new input data
delay_interval = 100
Expand Down
2 changes: 1 addition & 1 deletion etc/env/file_based/general.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ state_keeper:
validation_computational_gas_limit: 300000
save_call_traces: true
max_circuits_per_batch: 31100
protective_reads_persistence_enabled: true
protective_reads_persistence_enabled: false
mempool:
delay_interval: 100
sync_interval_ms: 10
Expand Down

0 comments on commit 0891244

Please sign in to comment.