Skip to content

Commit

Permalink
Detect insufficient memory limit
Browse files Browse the repository at this point in the history
  • Loading branch information
JacekGlen committed Sep 22, 2024
1 parent d60b417 commit a81434c
Show file tree
Hide file tree
Showing 2 changed files with 151 additions and 2 deletions.
16 changes: 16 additions & 0 deletions silkworm/capi/silkworm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -491,6 +491,7 @@ int silkworm_execute_blocks_ephemeral(SilkwormHandle handle, MDBX_txn* mdbx_txn,
auto signal_check_time{std::chrono::steady_clock::now()};

BlockNum block_number{start_block};
BlockNum batch_start_block_number{start_block};
BlockNum last_block_number = 0;
db::DataModel da_layer{txn};

Expand Down Expand Up @@ -522,7 +523,14 @@ int silkworm_execute_blocks_ephemeral(SilkwormHandle handle, MDBX_txn* mdbx_txn,
last_exec_result = block_executor.execute_single(block, state_buffer, analysis_cache, state_pool);
update_execution_progress(execution_progress, block, state_buffer, max_batch_size);
} catch (const db::Buffer::MemoryLimitError&) {
// infinite loop detection, buffer memory limit reached but no progress
if (batch_start_block_number == block_number) {
SILK_ERROR << "Buffer memory limit too small to execute a single block (block_number=" << block_number << ")";
return SILKWORM_INTERNAL_ERROR;
}

// batch done
batch_start_block_number = block_number;
break;
}
if (last_exec_result != ValidationResult::kOk) {
Expand Down Expand Up @@ -609,6 +617,7 @@ int silkworm_execute_blocks_perpetual(SilkwormHandle handle, MDBX_env* mdbx_env,

std::optional<Block> block;
BlockNum block_number{start_block};
BlockNum batch_start_block_number{start_block};
BlockNum last_block_number = 0;
AnalysisCache analysis_cache{execution::block::BlockExecutor::kDefaultAnalysisCacheSize};
ObjectPool<evmone::ExecutionState> state_pool;
Expand All @@ -629,7 +638,14 @@ int silkworm_execute_blocks_perpetual(SilkwormHandle handle, MDBX_env* mdbx_env,
last_exec_result = block_executor.execute_single(*block, state_buffer, analysis_cache, state_pool);
update_execution_progress(execution_progress, *block, state_buffer, max_batch_size);
} catch (const db::Buffer::MemoryLimitError&) {
// infinite loop detection, buffer memory limit reached but no progress
if (batch_start_block_number == block_number) {
SILK_ERROR << "Buffer memory limit too small to execute a single block (block_number=" << block_number << ")";
return SILKWORM_INTERNAL_ERROR;
}

// batch done
batch_start_block_number = block_number;
break;
}

Expand Down
137 changes: 135 additions & 2 deletions silkworm/capi/silkworm_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -486,7 +486,7 @@ TEST_CASE_METHOD(CApiTest, "CAPI silkworm_execute_blocks_ephemeral multiple bloc
SilkwormLibrary silkworm_lib{env_path()};

const int chain_id{1};
const uint64_t batch_size{256 * kMebi};
const uint64_t batch_size{3000}; // Small batch size to force multiple iterations
const bool write_change_sets{false}; // We CANNOT write changesets here, TestDatabaseContext db already has them
const bool write_receipts{false}; // We CANNOT write receipts here, TestDatabaseContext db already has them
const bool write_call_traces{false}; // For coherence but don't care
Expand Down Expand Up @@ -583,7 +583,7 @@ TEST_CASE_METHOD(CApiTest, "CAPI silkworm_execute_blocks_perpetual multiple bloc
SilkwormLibrary silkworm_lib{env_path()};

const int chain_id{1};
const uint64_t batch_size{170}; // Small batch size to force multiple iterations, two blocks at a time
const uint64_t batch_size{3000}; // Small batch size to force multiple iterations
const bool write_change_sets{false}; // We CANNOT write changesets here, TestDatabaseContext db already has them
const bool write_receipts{false}; // We CANNOT write receipts here, TestDatabaseContext db already has them
const bool write_call_traces{false}; // For coherence but don't care
Expand Down Expand Up @@ -673,6 +673,139 @@ TEST_CASE_METHOD(CApiTest, "CAPI silkworm_execute_blocks_perpetual multiple bloc
CHECK(db::read_account(ro_txn, to)->balance == 2 * value);
}

TEST_CASE_METHOD(CApiTest, "CAPI silkworm_execute_blocks_ephemeral multiple blocks: insufficient buffer memory", "[silkworm][capi]") {
// Use Silkworm as a library with silkworm_init/silkworm_fini automated by RAII
SilkwormLibrary silkworm_lib{env_path()};

const int chain_id{1};
const uint64_t batch_size{170}; // Small batch size to force multiple iterations
const bool write_change_sets{false}; // We CANNOT write changesets here, TestDatabaseContext db already has them
const bool write_receipts{false}; // We CANNOT write receipts here, TestDatabaseContext db already has them
const bool write_call_traces{false}; // For coherence but don't care

auto execute_blocks = [&](auto tx, auto start_block, auto end_block) {
return silkworm_lib.execute_blocks(tx,
chain_id,
start_block,
end_block,
batch_size,
write_change_sets,
write_receipts,
write_call_traces);
};

/* TestDatabaseContext db contains a test chain made up of 9 blocks */

// Prepare block template (just 1 tx w/ value transfer)
evmc::address from{0x658bdf435d810c91414ec09147daa6db62406379_address}; // funded in genesis
evmc::address to{0x8b299e2b7d7f43c0ce3068263545309ff4ffb521_address}; // untouched address
intx::uint256 value{1};

Block block{};
block.header.gas_limit = 5'000'000;
block.header.gas_used = 21'000;

static constexpr auto kEncoder = [](Bytes& dest, const Receipt& r) { rlp::encode(dest, r); };
std::vector<Receipt> receipts{
{TransactionType::kLegacy, true, block.header.gas_used, {}, {}},
};
block.header.receipts_root = trie::root_hash(receipts, kEncoder);
block.transactions.resize(1);
block.transactions[0].to = to;
block.transactions[0].gas_limit = block.header.gas_limit;
block.transactions[0].type = TransactionType::kLegacy;
block.transactions[0].max_priority_fee_per_gas = 0;
block.transactions[0].max_fee_per_gas = 20 * kGiga;
block.transactions[0].value = value;
block.transactions[0].r = 1; // dummy
block.transactions[0].s = 1; // dummy
block.transactions[0].set_sender(from);

constexpr size_t kBlocks{130};

// Insert N blocks
for (size_t i{10}; i < 10 + kBlocks; ++i) {
block.header.number = i;
insert_block(env, block);
block.transactions.erase(block.transactions.cbegin());
block.transactions.pop_back();
block.transactions[0].nonce++;
}

// Execute N blocks using an *external* txn, then commit
db::RWTxnManaged external_txn0{env};
BlockNum start_block{10}, end_block{10 + kBlocks - 1};
const auto result0{execute_blocks(*external_txn0, start_block, end_block)};
CHECK_NOTHROW(external_txn0.commit_and_stop());
CHECK(result0.execute_block_result == SILKWORM_INTERNAL_ERROR);
}

TEST_CASE_METHOD(CApiTest, "CAPI silkworm_execute_blocks_perpetual multiple blocks: insufficient buffer memory", "[silkworm][capi]") {
// Use Silkworm as a library with silkworm_init/silkworm_fini automated by RAII
SilkwormLibrary silkworm_lib{env_path()};

const int chain_id{1};
const uint64_t batch_size{170}; // Batch size not enough to process a single block
const bool write_change_sets{false}; // We CANNOT write changesets here, TestDatabaseContext db already has them
const bool write_receipts{false}; // We CANNOT write receipts here, TestDatabaseContext db already has them
const bool write_call_traces{false}; // For coherence but don't care

auto execute_blocks = [&](auto start_block, auto end_block) {
return silkworm_lib.execute_blocks_perpetual(env,
chain_id,
start_block,
end_block,
batch_size,
write_change_sets,
write_receipts,
write_call_traces);
};

/* TestDatabaseContext db contains a test chain made up of 9 blocks */

// Prepare block template (just 1 tx w/ value transfer)
evmc::address from{0x658bdf435d810c91414ec09147daa6db62406379_address}; // funded in genesis
evmc::address to{0x8b299e2b7d7f43c0ce3068263545309ff4ffb500_address}; // untouched address(es)
intx::uint256 value{1};

Block block{};
block.header.gas_limit = 5'000'000;
block.header.gas_used = 21'000;

static constexpr auto kEncoder = [](Bytes& dest, const Receipt& r) { rlp::encode(dest, r); };
std::vector<Receipt> receipts{
{TransactionType::kLegacy, true, block.header.gas_used, {}, {}},
};
block.header.receipts_root = trie::root_hash(receipts, kEncoder);
block.transactions.resize(1);
block.transactions[0].to = to;
block.transactions[0].gas_limit = block.header.gas_limit;
block.transactions[0].type = TransactionType::kLegacy;
block.transactions[0].max_priority_fee_per_gas = 0;
block.transactions[0].max_fee_per_gas = 20 * kGiga;
block.transactions[0].value = value;
block.transactions[0].r = 1; // dummy
block.transactions[0].s = 1; // dummy
block.transactions[0].set_sender(from);

constexpr size_t kBlocks{130};

// Insert N blocks
for (size_t i{10}; i < 10 + kBlocks; ++i) {
block.header.number = i;
insert_block(env, block);
block.transactions.erase(block.transactions.cbegin());
block.transactions.pop_back();
block.transactions[0].nonce++;
block.transactions[0].to->bytes[19]++; // change recipient address to force batch size growth
}

// Execute N blocks using an *internal* txn
BlockNum start_block{10}, end_block{10 + kBlocks - 1};
const auto result0{execute_blocks(start_block, end_block)};
CHECK(result0.execute_block_result == SILKWORM_INTERNAL_ERROR);
}

TEST_CASE_METHOD(CApiTest, "CAPI silkworm_add_snapshot", "[silkworm][capi]") {
snapshot_test::SampleHeaderSnapshotFile valid_header_snapshot{tmp_dir.path()};
snapshot_test::SampleHeaderSnapshotPath header_snapshot_path{valid_header_snapshot.path()};
Expand Down

0 comments on commit a81434c

Please sign in to comment.