Skip to content

Commit

Permalink
node: last N headers from snapshots (#1359)
Browse files Browse the repository at this point in the history
  • Loading branch information
canepat authored Jul 23, 2023
1 parent eb6b7de commit 38ea478
Show file tree
Hide file tree
Showing 7 changed files with 51 additions and 21 deletions.
41 changes: 41 additions & 0 deletions silkworm/node/db/access_layer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1082,6 +1082,47 @@ bool DataModel::read_block(const evmc::bytes32& hash, BlockNum height, Block& bl
return read_block_from_snapshot(height, /*read_senders=*/true, block);
}

void DataModel::for_last_n_headers(size_t n, std::function<void(BlockHeader&&)> callback) const {
constexpr bool throw_notfound{false};

// Try to read N headers from the database
size_t read_count{0};
std::optional<BlockNum> last_read_number_from_db;

const auto headers_cursor{txn_.ro_cursor(db::table::kHeaders)};
auto data = headers_cursor->to_last(throw_notfound);
while (data && read_count < n) {
// Read header
BlockHeader header;
ByteView data_view = db::from_slice(data.value);
success_or_throw(rlp::decode(data_view, header));
++read_count;
last_read_number_from_db = header.number;
// Consume header
callback(std::move(header));
// Move backward
data = headers_cursor->to_previous(throw_notfound);
}
if (read_count == n) {
return;
}

// We've reached the first header in db but still need to read more from snapshots
if (last_read_number_from_db) {
ensure(*last_read_number_from_db == repository_->max_block_available() + 1,
"db and snapshot block numbers are not contiguous");
}
auto block_number_in_snapshots = repository_->max_block_available();
while (read_count < n) {
auto header{read_header_from_snapshot(block_number_in_snapshots)};
if (!header) return;
++block_number_in_snapshots;
++read_count;
// Consume header
callback(std::move(*header));
}
}

bool DataModel::read_block_from_snapshot(BlockNum height, bool read_senders, Block& block) {
if (!repository_) {
return false;
Expand Down
4 changes: 4 additions & 0 deletions silkworm/node/db/access_layer.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
// Database Access Layer
// See Erigon core/rawdb/accessors_chain.go

#include <functional>
#include <optional>
#include <span>
#include <string>
Expand Down Expand Up @@ -308,6 +309,9 @@ class DataModel {
[[nodiscard]] std::optional<intx::uint256> read_total_difficulty(BlockNum, HashAsArray hash) const;
[[nodiscard]] std::optional<intx::uint256> read_total_difficulty(ByteView key) const;

//! Read all block headers up to limit in reverse order from last, processing each one via a user defined callback
void for_last_n_headers(size_t n, std::function<void(BlockHeader&&)> callback) const;

private:
static bool read_block_from_snapshot(BlockNum height, bool read_senders, Block& block);
static std::optional<BlockHeader> read_header_from_snapshot(BlockNum height);
Expand Down
13 changes: 0 additions & 13 deletions silkworm/node/db/db_utils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -22,19 +22,6 @@

namespace silkworm {

// Read all headers up to limit in reverse order from last, processing each one via a user defined callback
// This implementation uses DataModel and is snapshot aware
void for_last_n_headers(const db::DataModel& data_model, size_t n, std::function<void(BlockHeader&&)> callback) {
auto highest_block_num = data_model.highest_block_number();

auto first_block_num = highest_block_num > n ? highest_block_num - n + 1 : 0;
for (auto i = first_block_num; i <= highest_block_num; i++) {
auto header = data_model.read_header(i);
if (!header) throw std::logic_error("the headers table must not have any holes");
callback(std::move(*header));
}
}

// Return (block-num, hash) of the header with the biggest total difficulty skipping bad headers
// see Erigon's HeadersUnwind method for the implementation
std::tuple<BlockNum, Hash> header_with_biggest_td(db::ROTxn& txn, const std::set<Hash>* bad_headers) {
Expand Down
2 changes: 1 addition & 1 deletion silkworm/node/stagedsync/execution_engine.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -243,7 +243,7 @@ auto ExecutionEngine::get_header(BlockNum height, Hash hash) const -> std::optio
return main_chain_.get_header(height, hash);
}

auto ExecutionEngine::get_last_headers(BlockNum limit) const -> std::vector<BlockHeader> {
auto ExecutionEngine::get_last_headers(uint64_t limit) const -> std::vector<BlockHeader> {
ensure_invariant(!fork_tracking_active_, "actual get_last_headers() impl assume it is called only at beginning");
// if fork_tracking_active_ is true, we should read blocks from cache where they are not ordered on block number

Expand Down
2 changes: 1 addition & 1 deletion silkworm/node/stagedsync/execution_engine.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ class ExecutionEngine : public Stoppable {
auto get_canonical_body(BlockNum) const -> std::optional<BlockBody>;
bool is_canonical(Hash) const;
auto get_block_number(Hash) const -> std::optional<BlockNum>;
auto get_last_headers(BlockNum limit) const -> std::vector<BlockHeader>;
auto get_last_headers(uint64_t limit) const -> std::vector<BlockHeader>;
auto get_header_td(Hash, std::optional<BlockNum> = std::nullopt) const -> std::optional<TotalDifficulty>;

protected:
Expand Down
7 changes: 3 additions & 4 deletions silkworm/node/stagedsync/forks/main_chain.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@
#include <silkworm/core/common/as_range.hpp>
#include <silkworm/infra/common/ensure.hpp>
#include <silkworm/node/db/access_layer.hpp>
#include <silkworm/node/db/db_utils.hpp>

#include "extending_fork.hpp"

Expand Down Expand Up @@ -126,7 +125,7 @@ void MainChain::insert_block(const Block& block) {
}

auto MainChain::verify_chain(Hash head_block_hash) -> VerificationResult {
SILK_TRACE << "MainChain: verifying chain " << head_block_hash.to_hex();
SILK_TRACE << "MainChain: verifying chain head=" << head_block_hash.to_hex();

// retrieve the head header
auto head_header = get_header(head_block_hash);
Expand Down Expand Up @@ -328,10 +327,10 @@ auto MainChain::get_block_progress() const -> BlockNum {
return data_model_.highest_block_number();
}

auto MainChain::get_last_headers(BlockNum limit) const -> std::vector<BlockHeader> {
auto MainChain::get_last_headers(uint64_t limit) const -> std::vector<BlockHeader> {
std::vector<BlockHeader> headers;

for_last_n_headers(data_model_, limit, [&headers](BlockHeader&& header) {
data_model_.for_last_n_headers(limit, [&headers](BlockHeader&& header) {
headers.emplace_back(std::move(header));
});

Expand Down
3 changes: 1 addition & 2 deletions silkworm/node/stagedsync/forks/main_chain.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,6 @@ class ExtendingFork;
class MainChain {
public:
explicit MainChain(asio::io_context&, NodeSettings&, db::RWAccess);
MainChain(MainChain&&);

void open(); // needed to circumvent mdbx threading model limitations
void close();
Expand Down Expand Up @@ -75,7 +74,7 @@ class MainChain {
auto get_header(BlockNum, Hash) const -> std::optional<BlockHeader>;
auto get_canonical_hash(BlockNum) const -> std::optional<Hash>;
auto get_header_td(BlockNum, Hash) const -> std::optional<TotalDifficulty>;
auto get_last_headers(BlockNum limit) const -> std::vector<BlockHeader>;
auto get_last_headers(uint64_t limit) const -> std::vector<BlockHeader>;
auto extends_last_fork_choice(BlockNum, Hash) const -> bool;
auto extends(BlockId block, BlockId supposed_parent) const -> bool;
auto is_ancestor(BlockId supposed_parent, BlockId block) const -> bool;
Expand Down

0 comments on commit 38ea478

Please sign in to comment.