diff --git a/silkworm/node/db/access_layer.cpp b/silkworm/node/db/access_layer.cpp index 4ec42ea1e2..0eebe65619 100644 --- a/silkworm/node/db/access_layer.cpp +++ b/silkworm/node/db/access_layer.cpp @@ -914,7 +914,7 @@ BlockNum DataModel::highest_block_number() const { // Assume last block is likely on db: first lookup there const auto header_cursor{txn_.ro_cursor(db::table::kHeaders)}; const auto data{header_cursor->to_last(/*.throw_not_found*/ false)}; - if (data.done) { + if (data.done && data.key.size() >= sizeof(uint64_t)) { return endian::load_big_u64(static_cast(data.key.data())); } @@ -923,21 +923,28 @@ BlockNum DataModel::highest_block_number() const { } std::optional DataModel::read_header(BlockNum block_number, HashAsArray block_hash) const { - // Assume recent blocks are more probable: first lookup the block header in the db - auto block_header{db::read_header(txn_, block_number, block_hash)}; - if (block_header) return block_header; - - // Then search for it into the snapshots (if any) - return read_header_from_snapshot(block_number); + return read_header(block_number, Hash(block_hash)); } std::optional DataModel::read_header(BlockNum block_number, const Hash& block_hash) const { - // Assume recent blocks are more probable: first lookup the block header in the db - auto block_header{db::read_header(txn_, block_number, block_hash)}; - if (block_header) return block_header; + if (repository_ && block_number <= repository_->max_block_available()) { + auto header = read_header_from_snapshot(block_number); // todo: check if it is more efficient reading using hash + if (header && header->hash() == block_hash) { // reading using hash avoid this heavy hash calculation + return header; + } + return {}; + } else { + return db::read_header(txn_, block_number, block_hash); + } +} - // Then search for it in the snapshots (if any) - return read_header_from_snapshot(block_number); +std::optional DataModel::read_header(BlockNum block_number) const { + if (repository_ && block_number <= repository_->max_block_available()) { + return read_header_from_snapshot(block_number); + } else { + auto hash = db::read_canonical_hash(txn_, block_number); + return db::read_header(txn_, block_number, *hash); + } } std::optional DataModel::read_header(const Hash& block_hash) const { diff --git a/silkworm/node/db/access_layer.hpp b/silkworm/node/db/access_layer.hpp index 6fb999dfdb..431ff08da6 100644 --- a/silkworm/node/db/access_layer.hpp +++ b/silkworm/node/db/access_layer.hpp @@ -260,6 +260,9 @@ class DataModel { //! Read block header with the specified hash [[nodiscard]] std::optional read_header(const Hash& block_hash) const; + //! Read block header with the specified block number + [[nodiscard]] std::optional read_header(BlockNum block_number) const; + //! Read block number from hash [[nodiscard]] std::optional read_block_number(const Hash& block_hash) const; diff --git a/silkworm/node/db/db_utils.cpp b/silkworm/node/db/db_utils.cpp index 798eef9ca6..97a28fe38d 100644 --- a/silkworm/node/db/db_utils.cpp +++ b/silkworm/node/db/db_utils.cpp @@ -22,26 +22,18 @@ namespace silkworm { -// Read all headers up to limit, in reverse order from last, processing each via a user defined callback -// alternative implementation: use cursor_for_count(cursor, WalkFuncRef, size_t max_count, CursorMoveDirection) -void read_headers_in_reverse_order(db::ROTxn& txn, size_t limit, std::function callback) { - db::PooledCursor header_table(txn, db::table::kHeaders); - - bool throw_notfound = false; - size_t read = 0; - auto data = header_table.to_last(throw_notfound); - while (data && read < limit) { - // read header - BlockHeader header; - ByteView data_view = db::from_slice(data.value); - success_or_throw(rlp::decode(data_view, header)); - read++; - // consume header - callback(std::move(header)); - // move backward - data = header_table.to_previous(throw_notfound); +// Read all headers up to limit in reverse order from last, processing each one via a user defined callback +// This implementation uses DataModel and is snapshot aware +void for_last_n_headers(const db::DataModel& data_model, size_t n, std::function callback) { + auto highest_block_num = data_model.highest_block_number(); + + auto first_block_num = highest_block_num > n ? highest_block_num - n + 1 : 0; + for (auto i = first_block_num; i <= highest_block_num; i++) { + auto header = data_model.read_header(i); + if (!header) throw std::logic_error("the headers table must not have any holes"); + callback(std::move(*header)); } -} // note: maybe we can simplify/replace the implementation with db::cursor_for_count plus lambda +} // Return (block-num, hash) of the header with the biggest total difficulty skipping bad headers // see Erigon's HeadersUnwind method for the implementation diff --git a/silkworm/node/db/db_utils.hpp b/silkworm/node/db/db_utils.hpp index 3bf3751566..9f226abad0 100644 --- a/silkworm/node/db/db_utils.hpp +++ b/silkworm/node/db/db_utils.hpp @@ -22,12 +22,13 @@ #include #include #include +#include #include namespace silkworm { -//! \brief Read all headers up to limit, in reverse order from last, processing each via a user defined callback -void read_headers_in_reverse_order(db::ROTxn& txn, size_t limit, std::function callback); +//! \brief Read the lasdt n headers, in forward order, processing each via a user defined callback +void for_last_n_headers(const db::DataModel&, size_t n, std::function callback); //! \brief Return (block-num, hash) of the header with the biggest total difficulty skipping bad headers std::tuple header_with_biggest_td(db::ROTxn& txn, const std::set* bad_headers = nullptr); diff --git a/silkworm/node/stagedsync/forks/main_chain.cpp b/silkworm/node/stagedsync/forks/main_chain.cpp index 860cd09528..958a600be7 100644 --- a/silkworm/node/stagedsync/forks/main_chain.cpp +++ b/silkworm/node/stagedsync/forks/main_chain.cpp @@ -325,19 +325,13 @@ auto MainChain::get_body(Hash header_hash) const -> std::optional { } auto MainChain::get_block_progress() const -> BlockNum { - BlockNum block_progress = 0; - - read_headers_in_reverse_order(tx_, 1, [&block_progress](BlockHeader&& header) { - block_progress = header.number; - }); - - return block_progress; + return data_model_.highest_block_number(); } auto MainChain::get_last_headers(BlockNum limit) const -> std::vector { std::vector headers; - read_headers_in_reverse_order(tx_, limit, [&headers](BlockHeader&& header) { + for_last_n_headers(data_model_, limit, [&headers](BlockHeader&& header) { headers.emplace_back(std::move(header)); }); diff --git a/silkworm/node/stagedsync/forks/main_chain_test.cpp b/silkworm/node/stagedsync/forks/main_chain_test.cpp index 0c079ece80..db99f19256 100644 --- a/silkworm/node/stagedsync/forks/main_chain_test.cpp +++ b/silkworm/node/stagedsync/forks/main_chain_test.cpp @@ -306,6 +306,10 @@ TEST_CASE("MainChain") { main_chain.insert_block(block1); main_chain.insert_block(block2); main_chain.insert_block(block3); + + auto block_progress = main_chain.get_block_progress(); + REQUIRE(block_progress == block3.header.number); + auto verification = main_chain.verify_chain(block3_hash); REQUIRE(holds_alternative(verification));