Skip to content

Commit

Permalink
sync: treat clang-tidy warnings as errors (#1740)
Browse files Browse the repository at this point in the history
  • Loading branch information
yperbasis authored Jan 8, 2024
1 parent 2651b6a commit ef35532
Show file tree
Hide file tree
Showing 12 changed files with 54 additions and 57 deletions.
4 changes: 2 additions & 2 deletions silkworm/rpc/commands/erigon_api_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ TEST_CASE_METHOD(ErigonRpcApiTest, "ErigonRpcApi::handle_erigon_get_block_by_tim
})"_json,
reply));

std::string expected_rsp{"{\"jsonrpc\":\"2.0\",\"id\":1,\"error\":{\"code\":100,\"message\":\"invalid erigon_getBlockByTimestamp params: []\"}}"};
std::string expected_rsp{R"({"jsonrpc":"2.0","id":1,"error":{"code":100,"message":"invalid erigon_getBlockByTimestamp params: []"}})"};
CHECK(reply == expected_rsp);
}
SECTION("request params are incomplete: return error") {
Expand All @@ -92,7 +92,7 @@ TEST_CASE_METHOD(ErigonRpcApiTest, "ErigonRpcApi::handle_erigon_get_block_by_tim
"id":1,
"error":{"code":100,"message":"invalid erigon_getBlockByTimestamp params: [\"1658865942\"]"}
})"_json;
std::string expected_rsp{"{\"jsonrpc\":\"2.0\",\"id\":1,\"error\":{\"code\":100,\"message\":\"invalid erigon_getBlockByTimestamp params: [\\\"1658865942\\\"]\"}}"};
std::string expected_rsp{R"({"jsonrpc":"2.0","id":1,"error":{"code":100,"message":"invalid erigon_getBlockByTimestamp params: [\"1658865942\"]"}})"};
CHECK(reply == expected_rsp);
}
SECTION("request 1st param is invalid: return error") {
Expand Down
4 changes: 4 additions & 0 deletions silkworm/sync/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,10 @@
find_package(absl REQUIRED)
find_package(magic_enum REQUIRED)

if(SILKWORM_CLANG_TIDY)
set(CMAKE_CXX_CLANG_TIDY "${CLANG_TIDY};-warnings-as-errors=*")
endif()

file(GLOB_RECURSE SILKWORM_SYNC_SRC CONFIGURE_DEPENDS "*.cpp" "*.hpp")
list(FILTER SILKWORM_SYNC_SRC EXCLUDE REGEX "_test\\.cpp$")

Expand Down
14 changes: 6 additions & 8 deletions silkworm/sync/block_exchange.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,6 @@

namespace silkworm {

using silkworm::sentry::api::MessageFromPeer;

BlockExchange::BlockExchange(SentryClient& sentry, const db::ROAccess& dba, const ChainConfig& chain_config)
: db_access_{dba},
sentry_{sentry},
Expand Down Expand Up @@ -149,8 +147,8 @@ void BlockExchange::execution_loop() {
stop();
}

size_t BlockExchange::request_headers(time_point_t tp, size_t max_nr_of_requests) {
if (max_nr_of_requests == 0) return 0;
size_t BlockExchange::request_headers(time_point_t tp, size_t max_requests) {
if (max_requests == 0) return 0;
if (!downloading_active_) return 0;
if (header_chain_.in_sync()) return 0;

Expand All @@ -169,13 +167,13 @@ size_t BlockExchange::request_headers(time_point_t tp, size_t max_nr_of_requests
if (request_message->nack_requests() > 0) break;

sent_requests++;
} while (sent_requests < max_nr_of_requests);
} while (sent_requests < max_requests);

return sent_requests;
}

size_t BlockExchange::request_bodies(time_point_t tp, size_t max_nr_of_requests) {
if (max_nr_of_requests == 0) return 0;
size_t BlockExchange::request_bodies(time_point_t tp, size_t max_requests) {
if (max_requests == 0) return 0;
if (!downloading_active_) return 0;
if (body_sequence_.has_completed()) return 0;

Expand All @@ -194,7 +192,7 @@ size_t BlockExchange::request_bodies(time_point_t tp, size_t max_nr_of_requests)
if (request_message->nack_requests() > 0) break;

sent_requests++;
} while (sent_requests < max_nr_of_requests);
} while (sent_requests < max_requests);

return sent_requests;
}
Expand Down
7 changes: 2 additions & 5 deletions silkworm/sync/internals/body_sequence.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,6 @@

namespace silkworm {

BodySequence::BodySequence() {
}

void BodySequence::current_state(BlockNum highest_in_db) {
highest_body_in_output_ = highest_in_db;
target_height_ = highest_in_db;
Expand Down Expand Up @@ -158,7 +155,7 @@ std::shared_ptr<OutboundMessage> BodySequence::request_bodies(time_point_t tp) {

statistics_.requested_items += packet.request.size();

if (packet.request.size() == 0) {
if (packet.request.empty()) {
retrieval_condition_ = "no more bodies to request";
if (retrieval_condition_ != prev_condition) {
SILK_TRACE << "BodySequence, no more bodies to request";
Expand Down Expand Up @@ -251,7 +248,7 @@ void BodySequence::make_new_requests(GetBlockBodiesPacket66& packet, BlockNum& m

//! Save headers of witch it has to download bodies
void BodySequence::download_bodies(const Headers& headers) {
for (auto header : headers) {
for (const auto& header : headers) {
BlockNum bn = header->number;

BodyRequest new_request;
Expand Down
2 changes: 1 addition & 1 deletion silkworm/sync/internals/body_sequence.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ inline std::vector<std::shared_ptr<Block>> to_plain_blocks(const Blocks& blocks)
*/
class BodySequence {
public:
explicit BodySequence();
explicit BodySequence() = default;
~BodySequence() = default;

// sync current state - this must be done at header forward
Expand Down
2 changes: 1 addition & 1 deletion silkworm/sync/internals/chain_fork_view.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ class ChainForkView {
public:
ChainForkView(ChainHead headers_head);

void reset_head(ChainHead headers_head);
void reset_head(ChainHead new_head);

TotalDifficulty add(const BlockHeader&);
TotalDifficulty add(const BlockHeader&, TotalDifficulty parent_td);
Expand Down
43 changes: 20 additions & 23 deletions silkworm/sync/internals/header_chain.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
#include "header_chain.hpp"

#include <algorithm>
#include <ranges>

#include <gsl/util>

Expand Down Expand Up @@ -495,7 +496,7 @@ std::shared_ptr<OutboundMessage> HeaderChain::anchor_extension_request(time_poin
return send_penalties;
}

void HeaderChain::invalidate(std::shared_ptr<Anchor> anchor) {
void HeaderChain::invalidate(const std::shared_ptr<Anchor>& anchor) {
remove(anchor);
// remove upwards
auto& link_to_remove = anchor->links;
Expand All @@ -522,7 +523,7 @@ std::optional<GetBlockHeadersPacket66> HeaderChain::save_external_announce(Hash
request.request.skip = 0;
request.request.reverse = false;

return {std::move(request)};
return request;
}

void HeaderChain::request_nack(const GetBlockHeadersPacket66& packet) {
Expand Down Expand Up @@ -557,7 +558,7 @@ void HeaderChain::request_nack(const GetBlockHeadersPacket66& packet) {
bool HeaderChain::has_link(Hash hash) { return (links_.find(hash) != links_.end()); }

bool HeaderChain::find_bad_header(const std::vector<BlockHeader>& headers) {
for (auto& header : headers) {
return std::ranges::any_of(headers, [this](const BlockHeader& header) {
if (is_zero(header.parent_hash) && header.number != 0) {
log::Warning("HeaderStage") << "received malformed header: " << header.number;
return true;
Expand All @@ -573,8 +574,8 @@ bool HeaderChain::find_bad_header(const std::vector<BlockHeader>& headers) {
log::Warning("HeaderStage") << "received bad header: " << header.number;
return true;
}
}
return false;
return false;
});
}

std::tuple<Penalty, HeaderChain::RequestMoreHeaders> HeaderChain::accept_headers(const std::vector<BlockHeader>& headers, uint64_t requestId, const PeerId& peer_id) {
Expand Down Expand Up @@ -792,8 +793,8 @@ std::optional<std::shared_ptr<Link>> HeaderChain::get_link(const Hash& hash) con
}

// find_anchors find the anchor the link is anchored to
std::tuple<std::optional<std::shared_ptr<Anchor>>, HeaderChain::DeepLink> HeaderChain::find_anchor(std::shared_ptr<Link> link) const {
auto parent_link = link;
std::tuple<std::optional<std::shared_ptr<Anchor>>, HeaderChain::DeepLink> HeaderChain::find_anchor(const std::shared_ptr<Link>& link) const {
std::shared_ptr<Link> parent_link = link;
decltype(links_.begin()) it;
do {
it = links_.find(parent_link->header->parent_hash);
Expand All @@ -817,8 +818,8 @@ std::tuple<std::optional<std::shared_ptr<Anchor>>, HeaderChain::DeepLink> Header
return {a->second, parent_link};
}

void HeaderChain::connect(std::shared_ptr<Link> attachment_link, Segment::Slice segment_slice,
std::shared_ptr<Anchor> anchor) {
void HeaderChain::connect(const std::shared_ptr<Link>& attachment_link, Segment::Slice segment_slice,
const std::shared_ptr<Anchor>& anchor) {
using std::to_string;
// Extend up

Expand All @@ -835,8 +836,7 @@ void HeaderChain::connect(std::shared_ptr<Link> attachment_link, Segment::Slice

// Iterate over headers backwards (from parents towards children)
std::shared_ptr<Link> prev_link = attachment_link;
for (auto h = segment_slice.rbegin(); h != segment_slice.rend(); h++) {
auto header = *h;
for (auto header : std::ranges::reverse_view(segment_slice)) {
bool persisted = false;
auto link = add_header_as_link(*header, persisted);
if (prev_link->persisted) insert_list_.push(link);
Expand Down Expand Up @@ -872,7 +872,7 @@ void HeaderChain::connect(std::shared_ptr<Link> attachment_link, Segment::Slice
<< (anchor_preverified ? " (V)" : "");
}

HeaderChain::RequestMoreHeaders HeaderChain::extend_down(Segment::Slice segment_slice, std::shared_ptr<Anchor> anchor) {
HeaderChain::RequestMoreHeaders HeaderChain::extend_down(Segment::Slice segment_slice, const std::shared_ptr<Anchor>& anchor) {
// Add or find new anchor
auto new_anchor_header = *segment_slice.rbegin(); // lowest header
bool check_limits = false;
Expand All @@ -886,8 +886,7 @@ HeaderChain::RequestMoreHeaders HeaderChain::extend_down(Segment::Slice segment_
// Iterate over headers backwards (from parents towards children)
// Add all headers in the segments as links to this anchor
std::shared_ptr<Link> prev_link;
for (auto h = segment_slice.rbegin(); h != segment_slice.rend(); h++) {
auto header = *h;
for (auto header : std::ranges::reverse_view(segment_slice)) {
bool persisted = false;
auto link = add_header_as_link(*header, persisted);
if (!prev_link)
Expand All @@ -913,7 +912,7 @@ HeaderChain::RequestMoreHeaders HeaderChain::extend_down(Segment::Slice segment_
return !pre_existing;
}

void HeaderChain::extend_up(std::shared_ptr<Link> attachment_link, Segment::Slice segment_slice) {
void HeaderChain::extend_up(const std::shared_ptr<Link>& attachment_link, Segment::Slice segment_slice) {
using std::to_string;
// Search for bad headers
if (bad_headers_.contains(attachment_link->hash)) {
Expand All @@ -927,8 +926,7 @@ void HeaderChain::extend_up(std::shared_ptr<Link> attachment_link, Segment::Slic

// Iterate over headers backwards (from parents towards children)
std::shared_ptr<Link> prev_link = attachment_link;
for (auto h = segment_slice.rbegin(); h != segment_slice.rend(); h++) {
auto header = *h;
for (auto header : std::ranges::reverse_view(segment_slice)) {
bool persisted = false;
auto link = add_header_as_link(*header, persisted);
if (prev_link->persisted) insert_list_.push(link);
Expand Down Expand Up @@ -961,12 +959,11 @@ HeaderChain::RequestMoreHeaders HeaderChain::new_anchor(Segment::Slice segment_s
// Add or find anchor
auto anchor_header = *segment_slice.rbegin(); // lowest header
bool check_limits = true;
auto [anchor, pre_existing] = add_anchor_if_not_present(*anchor_header, peerId, check_limits);
auto [anchor, pre_existing] = add_anchor_if_not_present(*anchor_header, std::move(peerId), check_limits);

// Iterate over headers backwards (from parents towards children)
std::shared_ptr<Link> prev_link;
for (auto h = segment_slice.rbegin(); h != segment_slice.rend(); h++) {
auto header = *h;
for (auto header : std::ranges::reverse_view(segment_slice)) {
bool persisted = false;
auto link = add_header_as_link(*header, persisted);
if (!prev_link)
Expand Down Expand Up @@ -1009,7 +1006,7 @@ std::tuple<std::shared_ptr<Anchor>, HeaderChain::Pre_Existing> HeaderChain::add_
", limit: " + to_string(anchor_limit));
}

std::shared_ptr<Anchor> anchor = std::make_shared<Anchor>(anchor_header, peerId);
std::shared_ptr<Anchor> anchor = std::make_shared<Anchor>(anchor_header, std::move(peerId));
if (anchor->blockHeight > 0) {
anchors_[anchor_header.parent_hash] = anchor;
anchor_queue_.push(anchor);
Expand All @@ -1027,7 +1024,7 @@ std::shared_ptr<Link> HeaderChain::add_header_as_link(const BlockHeader& header,
return link;
}

void HeaderChain::remove(std::shared_ptr<Anchor> anchor) {
void HeaderChain::remove(const std::shared_ptr<Anchor>& anchor) {
size_t erased1 = anchors_.erase(anchor->parentHash);
bool erased2 = anchor_queue_.erase(anchor);

Expand Down Expand Up @@ -1056,7 +1053,7 @@ uint64_t HeaderChain::generate_request_id() {
return request_id_prefix * 10000 + request_count;
}

uint64_t HeaderChain::is_valid_request_id(uint64_t request_id) {
uint64_t HeaderChain::is_valid_request_id(uint64_t request_id) const {
uint64_t prefix = request_id / 10000;
return request_id_prefix == prefix;
}
Expand Down
14 changes: 7 additions & 7 deletions silkworm/sync/internals/header_chain.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -127,14 +127,14 @@ class HeaderChain {
std::tuple<std::optional<std::shared_ptr<Link>>, End> find_link(const Segment&, size_t start) const;
std::optional<std::shared_ptr<Link>> get_link(const Hash& hash) const;
using DeepLink = std::shared_ptr<Link>;
std::tuple<std::optional<std::shared_ptr<Anchor>>, DeepLink> find_anchor(std::shared_ptr<Link> link) const;
std::tuple<std::optional<std::shared_ptr<Anchor>>, DeepLink> find_anchor(const std::shared_ptr<Link>& link) const;

void reduce_links_to(size_t limit);
void reduce_persisted_links_to(size_t limit);

using Pre_Existing = bool;
void invalidate(std::shared_ptr<Anchor>);
void remove(std::shared_ptr<Anchor>);
void invalidate(const std::shared_ptr<Anchor>&);
void remove(const std::shared_ptr<Anchor>&);
bool find_bad_header(const std::vector<BlockHeader>&);
std::shared_ptr<Link> add_header_as_link(const BlockHeader& header, bool persisted);
std::tuple<std::shared_ptr<Anchor>, Pre_Existing> add_anchor_if_not_present(const BlockHeader& header, PeerId, bool check_limits);
Expand All @@ -153,9 +153,9 @@ class HeaderChain {
};
VerificationResult verify(const Link& link);

void connect(std::shared_ptr<Link>, Segment::Slice, std::shared_ptr<Anchor>);
RequestMoreHeaders extend_down(Segment::Slice, std::shared_ptr<Anchor>);
void extend_up(std::shared_ptr<Link>, Segment::Slice);
void connect(const std::shared_ptr<Link>&, Segment::Slice, const std::shared_ptr<Anchor>&);
RequestMoreHeaders extend_down(Segment::Slice, const std::shared_ptr<Anchor>&);
void extend_up(const std::shared_ptr<Link>&, Segment::Slice);
RequestMoreHeaders new_anchor(Segment::Slice, PeerId);

OldestFirstAnchorQueue anchor_queue_; // Priority queue of anchors used to sequence the header requests
Expand All @@ -178,7 +178,7 @@ class HeaderChain {
time_point_t last_nack_;

uint64_t generate_request_id();
uint64_t is_valid_request_id(uint64_t request_id);
uint64_t is_valid_request_id(uint64_t request_id) const;

uint64_t request_id_prefix;
uint64_t request_count = 0;
Expand Down
10 changes: 5 additions & 5 deletions silkworm/sync/internals/statistics.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -76,11 +76,11 @@ void Network_Statistics::inaccurate_copy(const Network_Statistics& other) {
malformed_msgs = other.malformed_msgs.load();
}

#define SHOW(LABEL, VARIABLE, FACTOR) \
(os << std::setfill('_') << std::right \
<< ", " LABEL ":" << std::setw(5) << curr.VARIABLE.load() / FACTOR \
<< "(+" << std::setw(2) << (curr.VARIABLE.load() - prev.VARIABLE.load()) / FACTOR \
<< ", +" << std::setw(2) << (curr.VARIABLE.load() - prev.VARIABLE.load()) / FACTOR / elapsed_s << "/s)")
#define SHOW(LABEL, VARIABLE, FACTOR) \
(os << std::setfill('_') << std::right \
<< ", " LABEL ":" << std::setw(5) << curr.VARIABLE.load() / (FACTOR) \
<< "(+" << std::setw(2) << (curr.VARIABLE.load() - prev.VARIABLE.load()) / (FACTOR) \
<< ", +" << std::setw(2) << (curr.VARIABLE.load() - prev.VARIABLE.load()) / (FACTOR) / elapsed_s << "/s)")

std::ostream& operator<<(std::ostream& os, std::tuple<Network_Statistics&, Network_Statistics&, seconds_t> stats) {
Network_Statistics& prev = get<0>(stats);
Expand Down
5 changes: 3 additions & 2 deletions silkworm/sync/messages/outbound_new_block.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,14 +23,15 @@

namespace silkworm {

OutboundNewBlock::OutboundNewBlock(Blocks b, bool f) : blocks_to_announce_{std::move(b)}, is_first_sync_{f} {}
OutboundNewBlock::OutboundNewBlock(Blocks b, bool is_first_sync)
: blocks_to_announce_{std::move(b)}, is_first_sync_{is_first_sync} {}

void OutboundNewBlock::execute(db::ROAccess, HeaderChain&, BodySequence&, SentryClient& sentry) {
if (is_first_sync_) return; // Don't announce blocks during first sync

for (auto& block_ptr : blocks_to_announce_) {
const BlockEx& block = *block_ptr;
NewBlockPacket packet{{block, block.header}, block.td};
NewBlockPacket packet{block, block.td}; // NOLINT(cppcoreguidelines-slicing)
auto peers = send_packet(sentry, std::move(packet));

// no peers available
Expand Down
2 changes: 1 addition & 1 deletion silkworm/sync/messages/outbound_new_block_hashes.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@

namespace silkworm {

OutboundNewBlockHashes::OutboundNewBlockHashes(bool f) : is_first_sync_{f} {}
OutboundNewBlockHashes::OutboundNewBlockHashes(bool is_first_sync) : is_first_sync_{is_first_sync} {}

void OutboundNewBlockHashes::execute(db::ROAccess, HeaderChain& hc, BodySequence&, SentryClient& sentry) {
auto& announces_to_do = hc.announces_to_do();
Expand Down
4 changes: 2 additions & 2 deletions silkworm/sync/sentry_client.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -151,9 +151,9 @@ Task<void> resolve_promise_with_awaitable_result(std::promise<void>& promise, Ta
}

template <typename T>
static T sync_spawn(concurrency::TaskGroup& tasks, any_io_executor executor, Task<T> task) {
static T sync_spawn(concurrency::TaskGroup& tasks, const any_io_executor& executor, Task<T> task) {
std::promise<T> promise;
tasks.spawn(std::move(executor), resolve_promise_with_awaitable_result(promise, std::move(task)));
tasks.spawn(executor, resolve_promise_with_awaitable_result(promise, std::move(task)));
return promise.get_future().get();
}

Expand Down

0 comments on commit ef35532

Please sign in to comment.