Skip to content

Commit

Permalink
Merge branch 'master' into ci/tidier26
Browse files Browse the repository at this point in the history
  • Loading branch information
yperbasis committed Sep 17, 2024
2 parents 5d85e50 + 1ca6972 commit c5d810d
Show file tree
Hide file tree
Showing 9 changed files with 81 additions and 42 deletions.
3 changes: 2 additions & 1 deletion silkworm/node/execution/active_direct_service_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@
namespace silkworm::execution::api {

using testing::InvokeWithoutArgs;
using testing::NiceMock;

using silkworm::db::test_util::TempChainData;
using silkworm::node::test_util::make_node_settings_from_temp_chain_data;
Expand All @@ -51,7 +52,7 @@ struct ActiveDirectServiceTest : public TaskRunner {
dba{tmp_chaindata.env()} {
tmp_chaindata.add_genesis_data();
tmp_chaindata.commit_txn();
mock_execution_engine = std::make_unique<MockExecutionEngine>(context(), settings, dba);
mock_execution_engine = std::make_unique<NiceMock<MockExecutionEngine>>(context(), settings, dba);
direct_service = std::make_unique<ActiveDirectServiceForTest>(*mock_execution_engine, execution_context);
execution_context_thread = std::thread{[this]() {
direct_service->execution_loop();
Expand Down
16 changes: 9 additions & 7 deletions silkworm/node/stagedsync/stages/stage_senders.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -302,13 +302,16 @@ Stage::Result Senders::parallel_recover(db::RWTxn& txn) {

// Start from first block and read all in sequence
for (auto current_block_num = start_block_num; current_block_num <= target_block_num; ++current_block_num) {
auto current_hash = db::read_canonical_header_hash(txn, current_block_num);
const auto current_hash = db::read_canonical_header_hash(txn, current_block_num);
if (!current_hash) throw StageError(Stage::Result::kBadChainSequence,
"Canonical hash at height " + std::to_string(current_block_num) + " not found");
const auto block_header = data_model.read_header(current_block_num, *current_hash);
if (!block_header) throw StageError(Stage::Result::kBadChainSequence,
"Canonical header at height " + std::to_string(current_block_num) + " not found");
BlockBody block_body;
auto found = data_model.read_body(*current_hash, current_block_num, block_body);
const auto found = data_model.read_body(*current_hash, current_block_num, block_body);
if (!found) throw StageError(Stage::Result::kBadChainSequence,
"Canonical block at height " + std::to_string(current_block_num) + " not found");
"Canonical body at height " + std::to_string(current_block_num) + " not found");

// Every 1024 blocks check if the SignalHandler has been triggered
if ((current_block_num % 1024 == 0) && is_stopping()) {
Expand All @@ -322,7 +325,7 @@ Stage::Result Senders::parallel_recover(db::RWTxn& txn) {
}

total_collected_senders += block_body.transactions.size();
success_or_throw(add_to_batch(current_block_num, *current_hash, block_body.transactions));
success_or_throw(add_to_batch(current_block_num, block_header->timestamp, *current_hash, block_body.transactions));

// Process batch in parallel if max size has been reached
if (batch_->size() >= max_batch_size_) {
Expand Down Expand Up @@ -374,13 +377,12 @@ Stage::Result Senders::parallel_recover(db::RWTxn& txn) {
return ret;
}

Stage::Result Senders::add_to_batch(BlockNum block_num, const Hash& block_hash, const std::vector<Transaction>& transactions) {
Stage::Result Senders::add_to_batch(BlockNum block_num, BlockTime block_timestamp, const Hash& block_hash, const std::vector<Transaction>& transactions) {
if (is_stopping()) {
return Stage::Result::kAborted;
}

// We're only interested in revisions up to London, so it's OK to not detect time-based forks.
const evmc_revision rev{chain_config_.revision(block_num, /*block_time=*/0)};
const evmc_revision rev{chain_config_.revision(block_num, block_timestamp)};
const bool has_homestead{rev >= EVMC_HOMESTEAD};
const bool has_spurious_dragon{rev >= EVMC_SPURIOUS_DRAGON};

Expand Down
2 changes: 1 addition & 1 deletion silkworm/node/stagedsync/stages/stage_senders.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ class Senders final : public Stage {
private:
Stage::Result parallel_recover(db::RWTxn& txn);

Stage::Result add_to_batch(BlockNum block_num, const Hash& block_hash, const std::vector<Transaction>& transactions);
Stage::Result add_to_batch(BlockNum block_num, BlockTime block_timestamp, const Hash& block_hash, const std::vector<Transaction>& transactions);
void recover_batch(ThreadPool& worker_pool, const secp256k1_context* context);
void collect_senders();
void collect_senders(std::shared_ptr<AddressRecoveryBatch>& batch);
Expand Down
70 changes: 43 additions & 27 deletions silkworm/node/stagedsync/stages_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -173,34 +173,51 @@ TEST_CASE("Sync Stages") {
}

SECTION("Senders") {
std::vector<evmc::bytes32> block_hashes{
0x3ac225168df54212a25c1c01fd35bebfea408fdac2e31ddd6f80a4bbf9a5f1cb_bytes32,
0xb5553de315e0edf504d9150af82dafa5c4667fa618ed0a6f19c69b41166c5510_bytes32,
0x0b42b6393c1f53060fe3ddbfcd7aadcca894465a5a438f69c87d790b2299b9b2_bytes32};

auto sample_transactions{test::sample_transactions()};
REQUIRE(!sample_transactions.empty());
auto sample_receipts{test::sample_receipts()};
REQUIRE(!sample_receipts.empty());

BlockBody block_body;
Block block{};
block.header.gas_limit = 5'000'000;
block.header.gas_used = 21'000;

static constexpr auto kEncoder = [](Bytes& dest, const Receipt& r) { rlp::encode(dest, r); };
block.header.receipts_root = trie::root_hash(std::vector<Receipt>{sample_receipts[0]}, kEncoder);
block.transactions.resize(1);
block.transactions[0] = sample_transactions[0];

// First block - 1 transaction
block_body.transactions.push_back(sample_transactions[0]);
REQUIRE_NOTHROW(db::write_body(txn, block_body, block_hashes[0].bytes, 1));
Block b1 = block;
b1.header.number = 1;
REQUIRE_NOTHROW(db::write_header(txn, b1.header));
const auto b1_hash = b1.header.hash();
REQUIRE_NOTHROW(db::write_body(txn, b1, b1_hash, b1.header.number));
const auto tx_sequence_after_block1{db::read_map_sequence(txn, db::table::kBlockTransactions.name)};
REQUIRE(tx_sequence_after_block1 == 5); // 1 tx + 2 system txs for block 1

// Second block - 1 transactions
REQUIRE_NOTHROW(db::write_body(txn, block_body, block_hashes[1].bytes, 2));
// Second block - 1 transaction
Block b2 = block;
b2.header.number = 2;
REQUIRE_NOTHROW(db::write_header(txn, b2.header));
const auto b2_hash = b2.header.hash();
REQUIRE_NOTHROW(db::write_body(txn, b2, b2_hash, b2.header.number));
const auto tx_sequence_after_block2{db::read_map_sequence(txn, db::table::kBlockTransactions.name)};
REQUIRE(tx_sequence_after_block2 == 8); // 1 tx + 2 system txs for block 2

// Third block - 0 transactions
block_body.transactions.clear();
REQUIRE_NOTHROW(db::write_body(txn, block_body, block_hashes[2].bytes, 3));
Block b3 = block;
b3.header.number = 3;
b3.header.transactions_root = kEmptyRoot;
b3.transactions.clear();
REQUIRE_NOTHROW(db::write_header(txn, b3.header));
const auto b3_hash = b3.header.hash();
REQUIRE_NOTHROW(db::write_body(txn, b3, b3_hash, b3.header.number));

// Write canonical hashes
REQUIRE_NOTHROW(db::write_canonical_header_hash(txn, block_hashes[0].bytes, 1));
REQUIRE_NOTHROW(db::write_canonical_header_hash(txn, block_hashes[1].bytes, 2));
REQUIRE_NOTHROW(db::write_canonical_header_hash(txn, block_hashes[2].bytes, 3));
REQUIRE_NOTHROW(db::write_canonical_header_hash(txn, b1_hash.bytes, 1));
REQUIRE_NOTHROW(db::write_canonical_header_hash(txn, b2_hash.bytes, 2));
REQUIRE_NOTHROW(db::write_canonical_header_hash(txn, b3_hash.bytes, 3));

// Update progresses
REQUIRE_NOTHROW(db::stages::write_stage_progress(txn, db::stages::kBlockBodiesKey, 3));
Expand Down Expand Up @@ -241,20 +258,20 @@ TEST_CASE("Sync Stages") {

REQUIRE_NOTHROW(txn.commit_and_renew());

constexpr auto kExpectedSender{0xc15eb501c014515ad0ecb4ecbf75cc597110b060_address};
{
auto senders_map{txn->open_map(db::table::kSenders.name)};
REQUIRE(txn->get_map_stat(senders_map).ms_entries == 2);

auto expected_sender{0xc15eb501c014515ad0ecb4ecbf75cc597110b060_address};
auto written_senders{db::read_senders(txn, 1, block_hashes[0].bytes)};
auto written_senders{db::read_senders(txn, 1, b1_hash.bytes)};
REQUIRE(written_senders.size() == 1);
REQUIRE(written_senders[0] == expected_sender);
REQUIRE(written_senders[0] == kExpectedSender);

written_senders = db::read_senders(txn, 2, block_hashes[1].bytes);
written_senders = db::read_senders(txn, 2, b2_hash.bytes);
REQUIRE(written_senders.size() == 1);
REQUIRE(written_senders[0] == expected_sender);
REQUIRE(written_senders[0] == kExpectedSender);

written_senders = db::read_senders(txn, 3, block_hashes[2].bytes);
written_senders = db::read_senders(txn, 3, b3_hash.bytes);
REQUIRE(written_senders.empty());
}

Expand All @@ -267,15 +284,14 @@ TEST_CASE("Sync Stages") {
auto senders_map{txn->open_map(db::table::kSenders.name)};
REQUIRE(txn->get_map_stat(senders_map).ms_entries == 1);

auto expected_sender{0xc15eb501c014515ad0ecb4ecbf75cc597110b060_address};
auto written_senders{db::read_senders(txn, 1, block_hashes[0].bytes)};
auto written_senders{db::read_senders(txn, 1, b1_hash.bytes)};
REQUIRE(written_senders.size() == 1);
REQUIRE(written_senders[0] == expected_sender);
REQUIRE(written_senders[0] == kExpectedSender);

written_senders = db::read_senders(txn, 2, block_hashes[1].bytes);
written_senders = db::read_senders(txn, 2, b2_hash.bytes);
REQUIRE(written_senders.empty());

written_senders = db::read_senders(txn, 3, block_hashes[2].bytes);
written_senders = db::read_senders(txn, 3, b3_hash.bytes);
REQUIRE(written_senders.empty());
}

Expand All @@ -284,7 +300,7 @@ TEST_CASE("Sync Stages") {
stage.set_prune_mode_senders(db::BlockAmount(db::BlockAmount::Type::kBefore, 2));
stage_result = stage.prune(txn);
REQUIRE(stage_result == stagedsync::Stage::Result::kSuccess);
auto written_senders{db::read_senders(txn, 1, block_hashes[0].bytes)};
auto written_senders{db::read_senders(txn, 1, b1_hash.bytes)};
REQUIRE(written_senders.empty());
}

Expand Down
3 changes: 3 additions & 0 deletions silkworm/rpc/commands/debug_api.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -259,6 +259,9 @@ Task<void> DebugRpcApi::handle_debug_storage_range_at(const nlohmann::json& requ
result["nextKey"] = "0x" + silkworm::to_hex(next_key);
}

reply = make_json_content(request, result);
} catch (const std::invalid_argument& e) {
nlohmann::json result = {{"storage", nullptr}, {"nextKey", nullptr}};
reply = make_json_content(request, result);
} catch (const std::exception& e) {
SILK_ERROR << "exception: " << e.what() << " processing request: " << request.dump();
Expand Down
4 changes: 4 additions & 0 deletions silkworm/rpc/commands/erigon_api.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -256,6 +256,8 @@ Task<void> ErigonRpcApi::handle_erigon_get_header_by_hash(const nlohmann::json&
} else {
reply = make_json_content(request, *header);
}
} catch (const std::invalid_argument& iv) {
reply = make_json_error(request, kServerError, "block header not found: 0x" + silkworm::to_hex(block_hash));
} catch (const std::exception& e) {
SILK_ERROR << "exception: " << e.what() << " processing request: " << request.dump();
reply = make_json_error(request, kInternalError, e.what());
Expand Down Expand Up @@ -423,6 +425,8 @@ Task<void> ErigonRpcApi::handle_erigon_get_logs_by_hash(const nlohmann::json& re
SILK_DEBUG << "logs.size(): " << logs.size();

reply = make_json_content(request, logs);
} catch (const std::invalid_argument& iv) {
reply = make_json_content(request, nullptr);
} catch (const std::exception& e) {
SILK_ERROR << "exception: " << e.what() << " processing request: " << request.dump();
reply = make_json_error(request, kInternalError, e.what());
Expand Down
8 changes: 6 additions & 2 deletions silkworm/rpc/commands/eth_api.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -281,7 +281,7 @@ Task<void> EthereumRpcApi::handle_eth_get_block_transaction_count_by_hash(const
reply = make_json_content(request, nullptr);
}
} catch (const std::invalid_argument& iv) {
reply = make_json_content(request, 0x0);
reply = make_json_content(request, nullptr);
} catch (const std::exception& e) {
SILK_ERROR << "exception: " << e.what() << " processing request: " << request.dump();
reply = make_json_error(request, kInternalError, e.what());
Expand Down Expand Up @@ -318,7 +318,7 @@ Task<void> EthereumRpcApi::handle_eth_get_block_transaction_count_by_number(cons
reply = make_json_content(request, nullptr);
}
} catch (const std::invalid_argument& iv) {
reply = make_json_content(request, 0x0);
reply = make_json_content(request, nullptr);
} catch (const std::exception& e) {
SILK_ERROR << "exception: " << e.what() << " processing request: " << request.dump();
reply = make_json_error(request, kInternalError, e.what());
Expand Down Expand Up @@ -464,6 +464,8 @@ Task<void> EthereumRpcApi::handle_eth_get_uncle_count_by_block_hash(const nlohma
const auto ommers = block_with_hash->block.ommers.size();
reply = make_json_content(request, to_quantity(ommers));
}
} catch (const std::invalid_argument& iv) {
reply = make_json_content(request, nullptr);
} catch (const std::exception& e) {
SILK_ERROR << "exception: " << e.what() << " processing request: " << request.dump();
reply = make_json_error(request, kInternalError, e.what());
Expand Down Expand Up @@ -919,6 +921,8 @@ Task<void> EthereumRpcApi::handle_eth_estimate_gas(const nlohmann::json& request
const auto estimated_gas = co_await estimate_gas_oracle.estimate_gas(call, latest_block, block_number_for_gas_limit);

reply = make_json_content(request, to_quantity(estimated_gas));
} catch (const std::invalid_argument& iv) {
reply = make_json_content(request, to_quantity(0));
} catch (const rpc::EstimateGasException& e) {
SILK_ERROR << "EstimateGasException: code: " << e.error_code() << " message: " << e.message() << " processing request: " << request.dump();
if (e.data().empty()) {
Expand Down
13 changes: 11 additions & 2 deletions silkworm/rpc/core/evm_trace.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1775,7 +1775,12 @@ Task<void> TraceCallExecutor::trace_filter(const TraceFilter& trace_filter, cons
filter.count = trace_filter.count;

auto block_number = trace_filter.from_block.number();
auto block_with_hash = co_await core::read_block_by_number_or_hash(block_cache_, storage, tx_, trace_filter.from_block);
std::shared_ptr<BlockWithHash> block_with_hash;
try {
block_with_hash = co_await core::read_block_by_number_or_hash(block_cache_, storage, tx_, trace_filter.from_block);
} catch (const std::invalid_argument& iv) {
block_with_hash = nullptr;
}

while (block_number <= trace_filter.to_block.number()) {
if (!block_with_hash) {
Expand All @@ -1798,7 +1803,11 @@ Task<void> TraceCallExecutor::trace_filter(const TraceFilter& trace_filter, cons
}

block_number++;
block_with_hash = co_await core::read_block_by_number(block_cache_, storage, block_number);
try {
block_with_hash = co_await core::read_block_by_number(block_cache_, storage, block_number);
} catch (const std::invalid_argument& iv) {
block_with_hash = nullptr;
}
}

stream.close_array();
Expand Down
4 changes: 2 additions & 2 deletions silkworm/rpc/ethdb/kv/backend_providers.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,13 +23,13 @@ namespace silkworm::rpc::ethdb::kv {

inline db::chain::BlockProvider block_provider(ethbackend::BackEnd* backend) {
return [backend](auto block_num, HashAsSpan hash, bool read_senders, auto& block) -> Task<bool> {
return backend->get_block(block_num, hash, read_senders, block);
co_return co_await backend->get_block(block_num, hash, read_senders, block);
};
}

inline db::chain::BlockNumberFromTxnHashProvider block_number_from_txn_hash_provider(ethbackend::BackEnd* backend) {
return [backend](HashAsSpan hash) -> Task<BlockNum> {
return backend->get_block_number_from_txn_hash(hash);
co_return co_await backend->get_block_number_from_txn_hash(hash);
};
}

Expand Down

0 comments on commit c5d810d

Please sign in to comment.