diff --git a/silkworm/node/db/access_layer.cpp b/silkworm/node/db/access_layer.cpp index 8a88b826ef..2ea20281c5 100644 --- a/silkworm/node/db/access_layer.cpp +++ b/silkworm/node/db/access_layer.cpp @@ -1082,6 +1082,47 @@ bool DataModel::read_block(const evmc::bytes32& hash, BlockNum height, Block& bl return read_block_from_snapshot(height, /*read_senders=*/true, block); } +void DataModel::for_last_n_headers(size_t n, std::function callback) const { + constexpr bool throw_notfound{false}; + + // Try to read N headers from the database + size_t read_count{0}; + std::optional last_read_number_from_db; + + const auto headers_cursor{txn_.ro_cursor(db::table::kHeaders)}; + auto data = headers_cursor->to_last(throw_notfound); + while (data && read_count < n) { + // Read header + BlockHeader header; + ByteView data_view = db::from_slice(data.value); + success_or_throw(rlp::decode(data_view, header)); + ++read_count; + last_read_number_from_db = header.number; + // Consume header + callback(std::move(header)); + // Move backward + data = headers_cursor->to_previous(throw_notfound); + } + if (read_count == n) { + return; + } + + // We've reached the first header in db but still need to read more from snapshots + if (last_read_number_from_db) { + ensure(*last_read_number_from_db == repository_->max_block_available() + 1, + "db and snapshot block numbers are not contiguous"); + } + auto block_number_in_snapshots = repository_->max_block_available(); + while (read_count < n) { + auto header{read_header_from_snapshot(block_number_in_snapshots)}; + if (!header) return; + ++block_number_in_snapshots; + ++read_count; + // Consume header + callback(std::move(*header)); + } +} + bool DataModel::read_block_from_snapshot(BlockNum height, bool read_senders, Block& block) { if (!repository_) { return false; diff --git a/silkworm/node/db/access_layer.hpp b/silkworm/node/db/access_layer.hpp index 26ed1fc2e4..ea3bc6b183 100644 --- a/silkworm/node/db/access_layer.hpp +++ b/silkworm/node/db/access_layer.hpp @@ -19,6 +19,7 @@ // Database Access Layer // See Erigon core/rawdb/accessors_chain.go +#include #include #include #include @@ -308,6 +309,9 @@ class DataModel { [[nodiscard]] std::optional read_total_difficulty(BlockNum, HashAsArray hash) const; [[nodiscard]] std::optional read_total_difficulty(ByteView key) const; + //! Read all block headers up to limit in reverse order from last, processing each one via a user defined callback + void for_last_n_headers(size_t n, std::function callback) const; + private: static bool read_block_from_snapshot(BlockNum height, bool read_senders, Block& block); static std::optional read_header_from_snapshot(BlockNum height); diff --git a/silkworm/node/db/db_utils.cpp b/silkworm/node/db/db_utils.cpp index 97a28fe38d..29d9168e09 100644 --- a/silkworm/node/db/db_utils.cpp +++ b/silkworm/node/db/db_utils.cpp @@ -22,19 +22,6 @@ namespace silkworm { -// Read all headers up to limit in reverse order from last, processing each one via a user defined callback -// This implementation uses DataModel and is snapshot aware -void for_last_n_headers(const db::DataModel& data_model, size_t n, std::function callback) { - auto highest_block_num = data_model.highest_block_number(); - - auto first_block_num = highest_block_num > n ? highest_block_num - n + 1 : 0; - for (auto i = first_block_num; i <= highest_block_num; i++) { - auto header = data_model.read_header(i); - if (!header) throw std::logic_error("the headers table must not have any holes"); - callback(std::move(*header)); - } -} - // Return (block-num, hash) of the header with the biggest total difficulty skipping bad headers // see Erigon's HeadersUnwind method for the implementation std::tuple header_with_biggest_td(db::ROTxn& txn, const std::set* bad_headers) { diff --git a/silkworm/node/stagedsync/execution_engine.cpp b/silkworm/node/stagedsync/execution_engine.cpp index 4f9564b546..24f56905d0 100644 --- a/silkworm/node/stagedsync/execution_engine.cpp +++ b/silkworm/node/stagedsync/execution_engine.cpp @@ -243,7 +243,7 @@ auto ExecutionEngine::get_header(BlockNum height, Hash hash) const -> std::optio return main_chain_.get_header(height, hash); } -auto ExecutionEngine::get_last_headers(BlockNum limit) const -> std::vector { +auto ExecutionEngine::get_last_headers(uint64_t limit) const -> std::vector { ensure_invariant(!fork_tracking_active_, "actual get_last_headers() impl assume it is called only at beginning"); // if fork_tracking_active_ is true, we should read blocks from cache where they are not ordered on block number diff --git a/silkworm/node/stagedsync/execution_engine.hpp b/silkworm/node/stagedsync/execution_engine.hpp index fa54a44ce4..13912461af 100644 --- a/silkworm/node/stagedsync/execution_engine.hpp +++ b/silkworm/node/stagedsync/execution_engine.hpp @@ -83,7 +83,7 @@ class ExecutionEngine : public Stoppable { auto get_canonical_body(BlockNum) const -> std::optional; bool is_canonical(Hash) const; auto get_block_number(Hash) const -> std::optional; - auto get_last_headers(BlockNum limit) const -> std::vector; + auto get_last_headers(uint64_t limit) const -> std::vector; auto get_header_td(Hash, std::optional = std::nullopt) const -> std::optional; protected: diff --git a/silkworm/node/stagedsync/forks/main_chain.cpp b/silkworm/node/stagedsync/forks/main_chain.cpp index 958a600be7..3979e5dad3 100644 --- a/silkworm/node/stagedsync/forks/main_chain.cpp +++ b/silkworm/node/stagedsync/forks/main_chain.cpp @@ -21,7 +21,6 @@ #include #include #include -#include #include "extending_fork.hpp" @@ -126,7 +125,7 @@ void MainChain::insert_block(const Block& block) { } auto MainChain::verify_chain(Hash head_block_hash) -> VerificationResult { - SILK_TRACE << "MainChain: verifying chain " << head_block_hash.to_hex(); + SILK_TRACE << "MainChain: verifying chain head=" << head_block_hash.to_hex(); // retrieve the head header auto head_header = get_header(head_block_hash); @@ -328,10 +327,10 @@ auto MainChain::get_block_progress() const -> BlockNum { return data_model_.highest_block_number(); } -auto MainChain::get_last_headers(BlockNum limit) const -> std::vector { +auto MainChain::get_last_headers(uint64_t limit) const -> std::vector { std::vector headers; - for_last_n_headers(data_model_, limit, [&headers](BlockHeader&& header) { + data_model_.for_last_n_headers(limit, [&headers](BlockHeader&& header) { headers.emplace_back(std::move(header)); }); diff --git a/silkworm/node/stagedsync/forks/main_chain.hpp b/silkworm/node/stagedsync/forks/main_chain.hpp index e0ef2764d1..d0e1951281 100644 --- a/silkworm/node/stagedsync/forks/main_chain.hpp +++ b/silkworm/node/stagedsync/forks/main_chain.hpp @@ -46,7 +46,6 @@ class ExtendingFork; class MainChain { public: explicit MainChain(asio::io_context&, NodeSettings&, db::RWAccess); - MainChain(MainChain&&); void open(); // needed to circumvent mdbx threading model limitations void close(); @@ -75,7 +74,7 @@ class MainChain { auto get_header(BlockNum, Hash) const -> std::optional; auto get_canonical_hash(BlockNum) const -> std::optional; auto get_header_td(BlockNum, Hash) const -> std::optional; - auto get_last_headers(BlockNum limit) const -> std::vector; + auto get_last_headers(uint64_t limit) const -> std::vector; auto extends_last_fork_choice(BlockNum, Hash) const -> bool; auto extends(BlockId block, BlockId supposed_parent) const -> bool; auto is_ancestor(BlockId supposed_parent, BlockId block) const -> bool;