diff --git a/cmd/dev/check_changes.cpp b/cmd/dev/check_changes.cpp index 156c10ef46..33c0467362 100644 --- a/cmd/dev/check_changes.cpp +++ b/cmd/dev/check_changes.cpp @@ -194,8 +194,8 @@ int main(int argc, char* argv[]) { auto calculated_it{calculated_storage_changes.cbegin()}; auto db_it{db_storage_changes.cbegin()}; for (; calculated_it != calculated_storage_changes.cend() && db_it != db_storage_changes.cend(); ++calculated_it, ++db_it) { - auto calculated_change{*calculated_it}; - auto stored_change{*db_it}; + const auto& calculated_change{*calculated_it}; + const auto& stored_change{*db_it}; if (calculated_change != stored_change) { std::cout << "Mismatch number " << mismatch_count + 1 << ") is:\n- calculated change:\n"; print_storage_changes(calculated_change.first, calculated_change.second); diff --git a/cmd/dev/check_pow.cpp b/cmd/dev/check_pow.cpp index e1268bcfe5..f12aaf94f0 100644 --- a/cmd/dev/check_pow.cpp +++ b/cmd/dev/check_pow.cpp @@ -61,7 +61,7 @@ int main(int argc, char* argv[]) { app.add_flag("--debug", options.debug, "May print some debug/trace info."); - CLI11_PARSE(app, argc, argv); + CLI11_PARSE(app, argc, argv) if (options.debug) { log::set_verbosity(log::Level::kDebug); @@ -142,7 +142,7 @@ int main(int argc, char* argv[]) { std::cout << "\n Pow Verification error on block " << block_num << " : \n" << "Error: " << ec << "\n" << "Final hash " << to_hex(f) << " expected below " << to_hex(b) << "\n" - << "Mix hash " << to_hex(m) << " expected mix " << to_hex(m) << std::endl; + << "Mix hash " << to_hex(m) << " expected mix " << to_hex(m) << "\n"; break; } diff --git a/cmd/dev/check_tx_lookup.cpp b/cmd/dev/check_tx_lookup.cpp index 6073f3c481..fe2a17fde2 100644 --- a/cmd/dev/check_tx_lookup.cpp +++ b/cmd/dev/check_tx_lookup.cpp @@ -21,7 +21,6 @@ #include #include #include -#include #include #include @@ -41,7 +40,7 @@ int main(int argc, char* argv[]) { ->capture_default_str() ->check(CLI::Range(1u, UINT32_MAX)); - CLI11_PARSE(app, argc, argv); + CLI11_PARSE(app, argc, argv) auto data_dir{DataDirectory::from_chaindata(chaindata)}; data_dir.deploy(); diff --git a/cmd/dev/db_toolbox.cpp b/cmd/dev/db_toolbox.cpp index 1e6e8d57b8..a6c6d33fc0 100644 --- a/cmd/dev/db_toolbox.cpp +++ b/cmd/dev/db_toolbox.cpp @@ -249,7 +249,7 @@ bool user_confirmation(const std::string& message = {"Confirm ?"}) { if (std::regex_search(user_input, matches, pattern, std::regex_constants::match_default)) { break; } - std::cout << "Hmmm... maybe you didn't read carefully. I repeat:" << std::endl; + std::cout << "Unexpected user input: " << user_input << "\n"; } while (true); if (matches[2].length()) { @@ -272,7 +272,7 @@ void do_clear(db::EnvConfig& config, bool dry, bool always_yes, const std::vecto for (const auto& tablename : table_names) { if (!db::has_map(txn, tablename.c_str())) { - std::cout << "Table " << tablename << " not found" << std::endl; + std::cout << "Table " << tablename << " not found\n"; continue; } @@ -280,7 +280,7 @@ void do_clear(db::EnvConfig& config, bool dry, bool always_yes, const std::vecto size_t rcount{txn.get_map_stat(table_map).ms_entries}; if (!rcount && !drop) { - std::cout << " Table " << tablename << " is already empty. Skipping" << std::endl; + std::cout << " Table " << tablename << " is already empty. Skipping\n"; continue; } @@ -290,12 +290,12 @@ void do_clear(db::EnvConfig& config, bool dry, bool always_yes, const std::vecto if (!always_yes) { if (!user_confirmation()) { - std::cout << " Skipped." << std::endl; + std::cout << " Skipped.\n"; continue; } } - std::cout << (dry ? "Simulating commit ..." : "Committing ...") << std::endl; + std::cout << (dry ? "Simulating commit ..." : "Committing ...") << "\n"; if (drop) { txn.drop_map(table_map); @@ -387,12 +387,11 @@ void do_scan(db::EnvConfig& config) { auto tablesInfo{get_tables_info(txn)}; - std::cout << "\n Database tables : " << tablesInfo.tables.size() << "\n" - << std::endl; + std::cout << "\n Database tables : " << tablesInfo.tables.size() << "\n\n"; if (!tablesInfo.tables.empty()) { std::cout << (boost::format(fmt_hdr) % "Dbi" % "Table name" % "Progress" % "Keys" % "Data" % "Total") - << std::endl; + << "\n"; std::cout << (boost::format(fmt_hdr) % std::string(3, '-') % std::string(24, '-') % std::string(50, '-') % std::string(13, '-') % std::string(13, '-') % std::string(13, '-')) << std::flush; @@ -445,7 +444,7 @@ void do_scan(db::EnvConfig& config) { } std::cout << "\n" - << (SignalHandler::signalled() ? "Aborted" : "Done") << " !\n " << std::endl; + << (SignalHandler::signalled() ? "Aborted" : "Done") << " !\n " << "\n"; txn.commit(); env.close(config.shared); } @@ -465,8 +464,8 @@ void do_stages(db::EnvConfig& config) { if (txn.get_map_stat(crs.map()).ms_entries) { std::cout << "\n" - << (boost::format(fmt_hdr) % "Stage Name" % "Block") << std::endl; - std::cout << (boost::format(fmt_hdr) % std::string(24, '-') % std::string(10, '-')) << std::endl; + << (boost::format(fmt_hdr) % "Stage Name" % "Block") << "\n"; + std::cout << (boost::format(fmt_hdr) % std::string(24, '-') % std::string(10, '-')) << "\n"; auto result{crs.to_first(/*throw_notfound =*/false)}; while (result) { @@ -482,14 +481,12 @@ void do_stages(db::EnvConfig& config) { bool Known{db::stages::is_known_stage(result.key.char_ptr() + offset)}; std::cout << (boost::format(fmt_row) % result.key.as_string() % height % (Known ? std::string(8, ' ') : "Unknown")) - << std::endl; + << "\n"; result = crs.to_next(/*throw_notfound =*/false); } - std::cout << "\n" - << std::endl; + std::cout << "\n\n"; } else { - std::cout << "\n There are no stages to list\n" - << std::endl; + std::cout << "\n There are no stages to list\n\n"; } txn.commit(); @@ -512,26 +509,24 @@ void do_migrations(db::EnvConfig& config) { if (txn.get_map_stat(crs.map()).ms_entries) { std::cout << "\n" - << (boost::format(fmt_hdr) % "Migration Name") << std::endl; - std::cout << (boost::format(fmt_hdr) % std::string(24, '-')) << std::endl; + << (boost::format(fmt_hdr) % "Migration Name") << "\n"; + std::cout << (boost::format(fmt_hdr) % std::string(24, '-')) << "\n"; auto result{crs.to_first(/*throw_notfound =*/false)}; while (result) { - std::cout << (boost::format(fmt_row) % result.key.as_string()) << std::endl; + std::cout << (boost::format(fmt_row) % result.key.as_string()) << "\n"; result = crs.to_next(/*throw_notfound =*/false); } - std::cout << "\n" - << std::endl; + std::cout << "\n\n"; } else { - std::cout << "\n There are no migrations to list\n" - << std::endl; + std::cout << "\n There are no migrations to list\n\n"; } txn.commit(); env.close(config.shared); } -void do_stage_set(db::EnvConfig& config, std::string&& stage_name, uint32_t new_height, bool dry) { +void do_stage_set(db::EnvConfig& config, const std::string& stage_name, uint32_t new_height, bool dry) { config.readonly = false; if (!config.exclusive) { @@ -553,8 +548,7 @@ void do_stage_set(db::EnvConfig& config, std::string&& stage_name, uint32_t new_ txn.commit_and_renew(); } - std::cout << "\n Stage " << stage_name << " touched from " << old_height << " to " << new_height << "\n" - << std::endl; + std::cout << "\n Stage " << stage_name << " touched from " << old_height << " to " << new_height << "\n\n"; } void unwind(db::EnvConfig& config, BlockNum unwind_point, bool remove_blocks) { @@ -633,18 +627,18 @@ void do_tables(db::EnvConfig& config) { auto dbTablesInfo{get_tables_info(txn)}; auto dbFreeInfo{get_free_info(txn)}; - std::cout << "\n Database tables : " << dbTablesInfo.tables.size() << std::endl; + std::cout << "\n Database tables : " << dbTablesInfo.tables.size() << "\n"; std::cout << " Effective pruning : " << db::read_prune_mode(txn).to_string() << "\n" - << std::endl; + << "\n"; if (!dbTablesInfo.tables.empty()) { std::cout << (boost::format(fmt_hdr) % "Dbi" % "Table name" % "Records" % "D" % "Branch" % "Leaf" % "Overflow" % "Size" % "Key" % "Value") - << std::endl; + << "\n"; std::cout << (boost::format(fmt_hdr) % std::string(3, '-') % std::string(26, '-') % std::string(10, '-') % std::string(2, '-') % std::string(10, '-') % std::string(10, '-') % std::string(10, '-') % std::string(12, '-') % std::string(10, '-') % std::string(10, '-')) - << std::endl; + << "\n"; for (auto& item : dbTablesInfo.tables) { auto keyMode = magic_enum::enum_name(item.info.key_mode()); @@ -652,7 +646,7 @@ void do_tables(db::EnvConfig& config) { std::cout << (boost::format(fmt_row) % item.id % item.name % item.stat.ms_entries % item.stat.ms_depth % item.stat.ms_branch_pages % item.stat.ms_leaf_pages % item.stat.ms_overflow_pages % human_size(item.size()) % keyMode % valueMode) - << std::endl; + << "\n"; } } @@ -664,8 +658,7 @@ void do_tables(db::EnvConfig& config) { << " Free pages size (C) : " << (boost::format("%13s") % human_size(dbFreeInfo.size)) << "\n" << " Reclaimable space : " << (boost::format("%13s") % human_size(dbTablesInfo.file_size - dbTablesInfo.size + dbFreeInfo.size)) - << " == A - B + C \n" - << std::endl; + << " == A - B + C \n\n"; txn.commit(); env.close(config.shared); @@ -683,15 +676,14 @@ void do_freelist(db::EnvConfig& config, bool detail) { std::cout << "\n" << (boost::format(fmt_hdr) % "TxId" % "Pages" % "Size") << "\n" << (boost::format(fmt_hdr) % std::string(9, '-') % std::string(9, '-') % std::string(12, '-')) - << std::endl; + << "\n"; for (auto& item : db_free_info.entries) { - std::cout << (boost::format(fmt_row) % item.id % item.pages % human_size(item.size)) << std::endl; + std::cout << (boost::format(fmt_row) % item.id % item.pages % human_size(item.size)) << "\n"; } } std::cout << "\n Record count : " << boost::format("%13u") % db_free_info.entries.size() << "\n" << " Free pages count : " << boost::format("%13u") % db_free_info.pages << "\n" - << " Free pages size : " << boost::format("%13s") % human_size(db_free_info.size) << "\n" - << std::endl; + << " Free pages size : " << boost::format("%13s") % human_size(db_free_info.size) << "\n\n"; txn.commit(); env.close(config.shared); @@ -706,8 +698,7 @@ void do_schema(db::EnvConfig& config) { throw std::runtime_error("Not a Silkworm db or no schema version found"); } std::cout << "\n" - << "Database schema version : " << schema_version->to_string() << "\n" - << std::endl; + << "Database schema version : " << schema_version->to_string() << "\n\n"; env.close(config.shared); } @@ -746,9 +737,9 @@ void do_compact(db::EnvConfig& config, const std::string& work_dir, bool replace } std::cout << "\n Compacting database from " << config.path << "\n into " << target_file_path - << "\n Please be patient as there is no progress report ..." << std::endl; + << "\n Please be patient as there is no progress report ...\n"; env.copy(/*destination*/ target_file_path.string(), /*compactify*/ true, /*forcedynamic*/ true); - std::cout << "\n Database compaction " << (SignalHandler::signalled() ? "aborted !" : "completed ...") << std::endl; + std::cout << "\n Database compaction " << (SignalHandler::signalled() ? "aborted !" : "completed ...") << "\n"; env.close(); if (!SignalHandler::signalled()) { @@ -763,7 +754,7 @@ void do_compact(db::EnvConfig& config, const std::string& work_dir, bool replace auto source_file_path{db::get_datafile_path(fs::path(config.path))}; // Create a backup copy before replacing ? if (!nobak) { - std::cout << " Creating backup copy of origin database ..." << std::endl; + std::cout << " Creating backup copy of origin database ...\n"; std::string src_file_back{db::kDbDataFileName}; src_file_back.append(".bak"); fs::path src_path_bak{source_file_path.parent_path() / fs::path{src_file_back}}; @@ -773,7 +764,7 @@ void do_compact(db::EnvConfig& config, const std::string& work_dir, bool replace fs::rename(source_file_path, src_path_bak); } - std::cout << " Replacing origin database with compacted ..." << std::endl; + std::cout << " Replacing origin database with compacted ...\n"; if (fs::exists(source_file_path)) { fs::remove(source_file_path); } @@ -829,7 +820,7 @@ void do_copy(db::EnvConfig& src_config, const std::string& target_dir, bool crea } size_t bytesWritten{0}; - std::cout << boost::format(" %-24s %=50s") % "Table" % "Progress" << std::endl; + std::cout << boost::format(" %-24s %=50s") % "Table" % "Progress\n"; std::cout << boost::format(" %-24s %=50s") % std::string(24, '-') % std::string(50, '-') << std::flush; // Loop source tables @@ -951,7 +942,7 @@ void do_copy(db::EnvConfig& src_config, const std::string& target_dir, bool crea std::cout << progress.print_interval(batch_committed ? 'W' : '.') << std::flush; } - std::cout << "\n All done!" << std::endl; + std::cout << "\n All done!\n"; } static size_t print_multi_table_diff(db::ROCursorDupSort* cursor1, db::ROCursorDupSort* cursor2, bool force_print = false) { @@ -1454,7 +1445,7 @@ void do_first_byte_analysis(db::EnvConfig& config) { code_cursor.to_first(); cursor_for_each(code_cursor, [&histogram, &batch_size, &progress](ByteView, ByteView value) { - if (value.length() > 0) { + if (!value.empty()) { uint8_t first_byte{value.at(0)}; ++histogram[first_byte]; } @@ -1467,10 +1458,9 @@ void do_first_byte_analysis(db::EnvConfig& config) { BlockNum last_block{db::stages::read_stage_progress(txn, db::stages::kExecutionKey)}; progress.set_current(total_entries); - std::cout << progress.print_interval('.') << std::endl; + std::cout << progress.print_interval('.') << "\n"; - std::cout << "\n Last block : " << last_block << "\n Contracts : " << total_entries << "\n" - << std::endl; + std::cout << "\n Last block : " << last_block << "\n Contracts : " << total_entries << "\n\n"; // Sort histogram by usage (from most used to less used) std::vector> histogram_sorted; @@ -1483,14 +1473,13 @@ void do_first_byte_analysis(db::EnvConfig& config) { if (!histogram_sorted.empty()) { std::cout << (boost::format(" %-4s %8s") % "Byte" % "Count") << "\n" - << (boost::format(" %-4s %8s") % std::string(4, '-') % std::string(8, '-')) << std::endl; + << (boost::format(" %-4s %8s") % std::string(4, '-') % std::string(8, '-')) << "\n"; for (const auto& [byte_code, usage_count] : histogram_sorted) { - std::cout << (boost::format(" 0x%02x %8u") % static_cast(byte_code) % usage_count) << std::endl; + std::cout << (boost::format(" 0x%02x %8u") % static_cast(byte_code) % usage_count) << "\n"; } } - std::cout << "\n" - << std::endl; + std::cout << "\n\n"; } void do_extract_headers(db::EnvConfig& config, const std::string& file_name, uint32_t step) { @@ -1510,7 +1499,7 @@ void do_extract_headers(db::EnvConfig& config, const std::string& file_name, uin out_stream << "/* Generated by Silkworm toolbox's extract headers */\n" << "#include \n" << "#include \n" - << "static const uint64_t preverified_hashes_mainnet_internal[] = {" << std::endl; + << "static const uint64_t preverified_hashes_mainnet_internal[] = {\n"; BlockNum block_max{silkworm::db::stages::read_stage_progress(txn, db::stages::kHeadersKey)}; BlockNum max_height{0}; @@ -1529,7 +1518,7 @@ void do_extract_headers(db::EnvConfig& config, const std::string& file_name, uin std::string hex{to_hex(chuncks[i], true)}; out_stream << hex << ","; } - out_stream << std::endl; + out_stream << "\n"; max_height = block_num; } @@ -1537,8 +1526,7 @@ void do_extract_headers(db::EnvConfig& config, const std::string& file_name, uin << "};\n" << "const uint64_t* preverified_hashes_mainnet_data(){return &preverified_hashes_mainnet_internal[0];}\n" << "size_t sizeof_preverified_hashes_mainnet_data(){return sizeof(preverified_hashes_mainnet_internal);}\n" - << "uint64_t preverified_hashes_mainnet_height(){return " << max_height << "ull;}\n" - << std::endl; + << "uint64_t preverified_hashes_mainnet_height(){return " << max_height << "ull;}\n\n"; out_stream.close(); } @@ -1577,17 +1565,16 @@ void do_trie_account_analysis(db::EnvConfig& config) { }); progress.set_current(total_entries); - std::cout << progress.print_interval('.') << std::endl; + std::cout << progress.print_interval('.') << "\n"; if (!histogram.empty()) { std::cout << (boost::format(" %-4s %8s") % "Size" % "Count") << "\n" - << (boost::format(" %-4s %8s") % std::string(4, '-') % std::string(8, '-')) << std::endl; + << (boost::format(" %-4s %8s") % std::string(4, '-') % std::string(8, '-')) << "\n"; for (const auto& [size, usage_count] : histogram) { - std::cout << (boost::format(" %4u %8u") % size % usage_count) << std::endl; + std::cout << (boost::format(" %4u %8u") % size % usage_count) << "\n"; } } - std::cout << "\n" - << std::endl; + std::cout << "\n\n"; } void do_trie_scan(db::EnvConfig& config, bool del) { @@ -1601,11 +1588,11 @@ void do_trie_scan(db::EnvConfig& config, bool del) { break; } db::PooledCursor cursor(txn, map_config); - std::cout << " Scanning " << map_config.name << std::endl; + std::cout << " Scanning " << map_config.name << "\n"; auto data{cursor.to_first(false)}; while (data) { if (data.value.empty()) { - std::cout << "Empty value at key " << to_hex(db::from_slice(data.key), true) << std::endl; + std::cout << "Empty value at key " << to_hex(db::from_slice(data.key), true) << "\n"; if (del) { cursor.erase(); } @@ -1622,8 +1609,7 @@ void do_trie_scan(db::EnvConfig& config, bool del) { if (!SignalHandler::signalled()) { txn.commit(); } - std::cout << "\n" - << std::endl; + std::cout << "\n\n"; } void do_trie_integrity(db::EnvConfig& config, bool with_state_coverage, bool continue_scan, bool sanitize) { @@ -1690,7 +1676,7 @@ void do_trie_integrity(db::EnvConfig& config, bool with_state_coverage, bool con throw std::runtime_error(what); } is_healthy = false; - std::cout << " " << what << std::endl; + std::cout << " " << what << "\n"; } if (!trie::is_subset(node_tree_mask, node_state_mask)) { @@ -1718,7 +1704,7 @@ void do_trie_integrity(db::EnvConfig& config, bool with_state_coverage, bool con throw std::runtime_error(what); } is_healthy = false; - std::cout << " " << what << std::endl; + std::cout << " " << what << "\n"; } else { node_has_root = (effective_hashes_count == expected_hashes_count + 1u); } @@ -1732,7 +1718,7 @@ void do_trie_integrity(db::EnvConfig& config, bool with_state_coverage, bool con throw std::runtime_error(what); } is_healthy = false; - std::cout << " " << what << std::endl; + std::cout << " " << what << "\n"; } else if (!node_k.empty() && node_has_root) { log::Warning("Unexpected root hash", {"key", to_hex(data1_k, true)}); } @@ -1814,7 +1800,7 @@ void do_trie_integrity(db::EnvConfig& config, bool with_state_coverage, bool con throw std::runtime_error(what); } is_healthy = false; - std::cout << " " << what << std::endl; + std::cout << " " << what << "\n"; } } @@ -1829,7 +1815,7 @@ void do_trie_integrity(db::EnvConfig& config, bool with_state_coverage, bool con throw std::runtime_error(what); } is_healthy = false; - std::cout << " " << what << std::endl; + std::cout << " " << what << "\n"; } } @@ -1853,7 +1839,7 @@ void do_trie_integrity(db::EnvConfig& config, bool with_state_coverage, bool con auto bits_to_match{buffer.length() * 4}; - // >>> See Erigon's /ethdb/kv_util.go::BytesMask + // >>> See Erigon /ethdb/kv_util.go::BytesMask uint8_t mask{0xff}; auto fixed_bytes{(bits_to_match + 7) / 8}; auto shift_bits{bits_to_match & 7}; @@ -1872,7 +1858,7 @@ void do_trie_integrity(db::EnvConfig& config, bool with_state_coverage, bool con Bytes seek{trie::pack_nibbles(buffer)}; - // On first loop we search HashedAccounts (which is not dupsorted) + // On first loop we search HashedAccounts (which is not dup-sorted) if (!loop_id) { auto data3{state_cursor.lower_bound(db::to_slice(seek), false)}; if (data3) { @@ -1894,7 +1880,7 @@ void do_trie_integrity(db::EnvConfig& config, bool with_state_coverage, bool con throw std::runtime_error(what); } } else { - // On second loop we search HashedStorage (which is dupsorted) + // On second loop we search HashedStorage (which is dup-sorted) auto data3{state_cursor.lower_bound_multivalue(db::to_slice(data1_k.substr(0, prefix_len)), db::to_slice(seek), false)}; if (data3) { @@ -2360,14 +2346,13 @@ int main(int argc, char* argv[]) { if (!*cmd_initgenesis) { if (!data_dir.chaindata().exists() || data_dir.is_pristine()) { - std::cerr << "\n Directory " << data_dir.chaindata().path().string() << " does not exist or is empty" - << std::endl; + std::cerr << "\n Directory " << data_dir.chaindata().path().string() << " does not exist or is empty\n"; return -1; } auto mdbx_path{db::get_datafile_path(data_dir.chaindata().path())}; if (!fs::exists(mdbx_path) || !fs::is_regular_file(mdbx_path)) { std::cerr << "\n Directory " << data_dir.chaindata().path().string() << " does not contain " - << db::kDbDataFileName << std::endl; + << db::kDbDataFileName << "\n"; return -1; } } @@ -2414,8 +2399,7 @@ int main(int argc, char* argv[]) { *cmd_initgenesis_chain_opt ? cmd_initgenesis_chain_opt->as() : 0u, static_cast(*app_dry_opt)); if (*app_dry_opt) { - std::cout << "\nGenesis initialization succeeded. Due to --dry flag no data is persisted\n" - << std::endl; + std::cout << "\nGenesis initialization succeeded. Due to --dry flag no data is persisted\n\n"; fs::remove_all(data_dir.path()); } } else if (*cmd_chainconfig) { diff --git a/cmd/dev/genesistool.cpp b/cmd/dev/genesistool.cpp index b5895b1f14..6fb9639543 100644 --- a/cmd/dev/genesistool.cpp +++ b/cmd/dev/genesistool.cpp @@ -60,9 +60,9 @@ void to_byte_array(fs::path& in, fs::path& out) { // Write bytes to output file std::string var_name{in.filename().replace_extension("").string()}; std::ofstream out_stream{out.string()}; - out_stream << "/* Generated from " << in.filename().string() << " using silkworm's genesistool*/" << std::endl; - out_stream << "#include \"" + var_name + ".hpp\"" << std::endl; - out_stream << "constexpr char " << var_name << "_data_internal[] = {" << std::endl; + out_stream << "/* Generated from " << in.filename().string() << " using silkworm's genesistool*/\n"; + out_stream << "#include \"" + var_name + ".hpp\"\n"; + out_stream << "constexpr char " << var_name << "_data_internal[] = {\n"; auto max{bytes.size()}; auto count{1u}; @@ -71,11 +71,11 @@ void to_byte_array(fs::path& in, fs::path& out) { << ((count % 16 == 0) ? "\n" : " "); ++count; } - out_stream << "};" << std::endl; - out_stream << "namespace silkworm {" << std::endl; + out_stream << "};\n"; + out_stream << "namespace silkworm {\n"; out_stream << "constinit const std::string_view " << var_name << "_json{&" << var_name - << "_data_internal[0], sizeof(" << var_name << "_data_internal)};" << std::endl; - out_stream << "}" << std::endl; + << "_data_internal[0], sizeof(" << var_name << "_data_internal)};\n"; + out_stream << "}\n"; out_stream.close(); } @@ -95,7 +95,7 @@ int main(int argc, char* argv[]) { app_main.add_flag("-w,--overwrite", overwrite, "Whether to overwrite existing files"); - CLI11_PARSE(app_main, argc, argv); + CLI11_PARSE(app_main, argc, argv) // Get genesis files in input directory static const std::regex genesis_pattern{R"(^genesis_(.*)?\.json$)", std::regex_constants::icase}; @@ -111,7 +111,7 @@ int main(int argc, char* argv[]) { } } if (input_entries.empty()) { - std::cerr << "\nNo files matching genesis pattern in input directory" << std::endl; + std::cerr << "\nNo files matching genesis pattern in input directory" << "\n"; return -1; } @@ -121,8 +121,7 @@ int main(int argc, char* argv[]) { output_file_path.replace_extension(".cpp"); bool exists{fs::exists(output_file_path)}; bool skip{exists && !overwrite}; - std::cout << input_file_path.string() << (skip ? " Skipped (exists)" : " -> " + output_file_path.string()) - << std::endl; + std::cout << input_file_path.string() << (skip ? " Skipped (exists)" : " -> " + output_file_path.string()) << "\n"; if (exists && !skip) { fs::remove(output_file_path); } diff --git a/cmd/dev/kzg_g2_uncompress.cpp b/cmd/dev/kzg_g2_uncompress.cpp index 6ee4efaa6a..9a09423a4c 100644 --- a/cmd/dev/kzg_g2_uncompress.cpp +++ b/cmd/dev/kzg_g2_uncompress.cpp @@ -62,5 +62,5 @@ int main() { print_blst_fp(out.z.fp[0]); std::cout << ",\n "; print_blst_fp(out.z.fp[1]); - std::cout << "}}" << std::endl; + std::cout << "}}\n"; } diff --git a/cmd/dev/scan_txs.cpp b/cmd/dev/scan_txs.cpp index fff0689a2b..43c854f1c4 100644 --- a/cmd/dev/scan_txs.cpp +++ b/cmd/dev/scan_txs.cpp @@ -40,14 +40,14 @@ int main(int argc, char* argv[]) { uint64_t to{UINT64_MAX}; app.add_option("--to", to, "check up to block number (exclusive)"); - CLI11_PARSE(app, argc, argv); + CLI11_PARSE(app, argc, argv) if (from > to) { std::cerr << "--from (" << from << ") must be less than or equal to --to (" << to << ").\n"; return -1; } - int retvar{0}; + int rv{0}; // Note: If Erigon is actively syncing its database (syncing), it is important not to create // long-running database reads transactions even though that may make your processing faster. @@ -111,7 +111,7 @@ int main(int argc, char* argv[]) { // Report and reset counters if ((block_num % 50000) == 0) { - std::cout << block_num << "," << nTxs << "," << nErrors << std::endl; + std::cout << block_num << "," << nTxs << "," << nErrors << "\n"; nTxs = nErrors = 0; } else if ((block_num % 100) == 0) { @@ -125,9 +125,9 @@ int main(int argc, char* argv[]) { } } catch (std::exception& ex) { - std::cout << ex.what() << std::endl; - retvar = -1; + std::cout << ex.what() << "\n"; + rv = -1; } - return retvar; + return rv; } diff --git a/cmd/dev/snapshots.cpp b/cmd/dev/snapshots.cpp index 55b420a25a..94d4c479bd 100644 --- a/cmd/dev/snapshots.cpp +++ b/cmd/dev/snapshots.cpp @@ -64,7 +64,7 @@ struct DownloadSettings : public bittorrent::BitTorrentSettings { }; //! The Snapshots tools -enum class SnapshotTool { +enum class SnapshotTool : uint8_t { count_bodies, count_headers, create_index, diff --git a/silkworm/rpc/core/call_many.cpp b/silkworm/rpc/core/call_many.cpp index 5f3daf9ae8..dd8677298d 100644 --- a/silkworm/rpc/core/call_many.cpp +++ b/silkworm/rpc/core/call_many.cpp @@ -23,22 +23,14 @@ #include #include #include -#include #include -#include -#include -#include #include #include #include -#include -#include #include #include #include -#include -#include #include #include @@ -78,7 +70,7 @@ CallManyResult CallExecutor::executes_all_bundles(const silkworm::ChainConfig& c for (const auto& bundle : bundles) { const auto& block_override = bundle.block_override; - // creates a block copy where ovverides few values + // creates a block copy where overrides few values auto block_with_hash_shared_copy = std::make_shared(); *block_with_hash_shared_copy = *block_with_hash; diff --git a/silkworm/rpc/ethdb/cursor.cpp b/silkworm/rpc/ethdb/cursor.cpp index 1149a72926..b8973e5cae 100644 --- a/silkworm/rpc/ethdb/cursor.cpp +++ b/silkworm/rpc/ethdb/cursor.cpp @@ -47,7 +47,7 @@ Task SplitCursor::next() { } bool SplitCursor::match_key(const silkworm::ByteView& key) { - if (key.length() == 0) { + if (key.empty()) { return false; } if (match_bytes_ == 0) { @@ -65,7 +65,7 @@ bool SplitCursor::match_key(const silkworm::ByteView& key) { SplittedKeyValue SplitCursor::split_key_value(const KeyValue& kv) { const silkworm::Bytes& key = kv.key; - if (key.length() == 0) { + if (key.empty()) { return SplittedKeyValue{}; } if (!match_key(key)) { @@ -115,7 +115,7 @@ Task SplitCursorDupSort::next_dup() { } bool SplitCursorDupSort::match_key(const silkworm::ByteView& key) { - if (key.length() == 0) { + if (key.empty()) { return false; } if (match_bytes_ == 0) { @@ -133,7 +133,7 @@ bool SplitCursorDupSort::match_key(const silkworm::ByteView& key) { SplittedKeyValue SplitCursorDupSort::split_key_value(const KeyValue& kv) { const silkworm::Bytes& key = kv.key; - if (key.length() == 0) { + if (key.empty()) { return SplittedKeyValue{}; } if (!match_key(key)) { diff --git a/silkworm/rpc/json/call_bundle_test.cpp b/silkworm/rpc/json/call_bundle_test.cpp index 88dc276021..1f2aba92d1 100644 --- a/silkworm/rpc/json/call_bundle_test.cpp +++ b/silkworm/rpc/json/call_bundle_test.cpp @@ -20,9 +20,6 @@ #include #include -#include -#include - namespace silkworm::rpc { TEST_CASE("serialize empty call_bundle", "[rpc][to_json]") {