diff --git a/cmd/dev/backend_kv_server.cpp b/cmd/dev/backend_kv_server.cpp index de9fdff131..b8c735c331 100644 --- a/cmd/dev/backend_kv_server.cpp +++ b/cmd/dev/backend_kv_server.cpp @@ -39,7 +39,7 @@ #include #include #include -#include +#include #include #include #include @@ -68,7 +68,7 @@ struct StandaloneBackEndKVSettings : public SilkwormSettings { }; //! Parse the command-line arguments into the BackEnd and KV server settings -int parse_command_line(int argc, char* argv[], CLI::App& app, StandaloneBackEndKVSettings& settings) { +void parse_command_line(int argc, char* argv[], CLI::App& app, StandaloneBackEndKVSettings& settings) { auto& log_settings = settings.log_settings; auto& node_settings = settings.node_settings; auto& server_settings = settings.node_settings.server_settings; @@ -106,8 +106,6 @@ int parse_command_line(int argc, char* argv[], CLI::App& app, StandaloneBackEndK /*create=*/false, /*readonly=*/true}; node_settings.chaindata_env_config.max_readers = max_readers; - - return 0; } std::shared_ptr make_sentry_client( @@ -160,10 +158,7 @@ int main(int argc, char* argv[]) { try { StandaloneBackEndKVSettings settings; - int result_code = parse_command_line(argc, argv, cli, settings); - if (result_code != 0) { - return result_code; - } + parse_command_line(argc, argv, cli, settings); const auto pid = boost::this_process::get_id(); const auto tid = std::this_thread::get_id(); @@ -215,7 +210,7 @@ int main(int argc, char* argv[]) { }; backend.set_node_name(node_name); - rpc::BackEndKvServer server{server_settings, backend}; + node::BackEndKvServer server{server_settings, backend}; // Standalone BackEndKV server has no staged loop, so this simulates periodic state changes Task tasks; diff --git a/silkworm/db/remote/kv/api/client.hpp b/silkworm/db/remote/kv/api/client.hpp index 223a9ec455..db9147ac1e 100644 --- a/silkworm/db/remote/kv/api/client.hpp +++ b/silkworm/db/remote/kv/api/client.hpp @@ -20,7 +20,7 @@ #include "service.hpp" -namespace silkworm::remote::kv::api { +namespace silkworm::kv::api { struct Client { virtual ~Client() = default; @@ -28,4 +28,4 @@ struct Client { virtual std::shared_ptr service() = 0; }; -} // namespace silkworm::remote::kv::api +} // namespace silkworm::kv::api diff --git a/silkworm/db/remote/kv/api/direct_client.cpp b/silkworm/db/remote/kv/api/direct_client.cpp index f4ead8ce0f..869050f389 100644 --- a/silkworm/db/remote/kv/api/direct_client.cpp +++ b/silkworm/db/remote/kv/api/direct_client.cpp @@ -16,7 +16,7 @@ #include "direct_client.hpp" -namespace silkworm::remote::kv::api { +namespace silkworm::kv::api { DirectClient::DirectClient(std::shared_ptr direct_service) : direct_service_(std::move(direct_service)) {} @@ -25,4 +25,4 @@ std::shared_ptr DirectClient::service() { return direct_service_; } -} // namespace silkworm::remote::kv::api +} // namespace silkworm::kv::api diff --git a/silkworm/db/remote/kv/api/direct_client.hpp b/silkworm/db/remote/kv/api/direct_client.hpp index cdcd06709e..0b1cecfd6a 100644 --- a/silkworm/db/remote/kv/api/direct_client.hpp +++ b/silkworm/db/remote/kv/api/direct_client.hpp @@ -21,7 +21,7 @@ #include "../api/client.hpp" #include "../api/direct_service.hpp" -namespace silkworm::remote::kv::api { +namespace silkworm::kv::api { struct DirectClient : public api::Client { explicit DirectClient(std::shared_ptr direct_service); @@ -33,4 +33,4 @@ struct DirectClient : public api::Client { std::shared_ptr direct_service_; }; -} // namespace silkworm::remote::kv::api +} // namespace silkworm::kv::api diff --git a/silkworm/db/remote/kv/api/direct_service.cpp b/silkworm/db/remote/kv/api/direct_service.cpp index f1cc2c21d3..5d48b4de41 100644 --- a/silkworm/db/remote/kv/api/direct_service.cpp +++ b/silkworm/db/remote/kv/api/direct_service.cpp @@ -16,10 +16,15 @@ #include "direct_service.hpp" -namespace silkworm::remote::kv::api { +namespace silkworm::kv::api { DirectService::DirectService() = default; +// rpc Version(google.protobuf.Empty) returns (types.VersionReply); +Task DirectService::version() { + co_return kCurrentVersion; +} + /** Temporal Point Queries **/ // rpc HistoryGet(HistoryGetReq) returns (HistoryGetReply); @@ -54,4 +59,4 @@ Task DirectService::get_domain_range(const DomainRangeQuery&) co_return DomainRangeResult{}; } -} // namespace silkworm::remote::kv::api +} // namespace silkworm::kv::api diff --git a/silkworm/db/remote/kv/api/direct_service.hpp b/silkworm/db/remote/kv/api/direct_service.hpp index 30fe60d0ef..f7092722f3 100644 --- a/silkworm/db/remote/kv/api/direct_service.hpp +++ b/silkworm/db/remote/kv/api/direct_service.hpp @@ -18,7 +18,7 @@ #include "service.hpp" -namespace silkworm::remote::kv::api { +namespace silkworm::kv::api { //! Straightforward asynchronous implementation of KV API service relying on \code Domains. //! This is used both client-side by 'direct' (i.e. no-gRPC) implementation and server-side by gRPC server. @@ -33,6 +33,9 @@ class DirectService : public Service { DirectService(DirectService&&) = delete; DirectService& operator=(DirectService&&) = delete; + // rpc Version(google.protobuf.Empty) returns (types.VersionReply); + Task version() override; + /** Temporal Point Queries **/ // rpc HistoryGet(HistoryGetReq) returns (HistoryGetReply); @@ -53,4 +56,4 @@ class DirectService : public Service { Task get_domain_range(const DomainRangeQuery&) override; }; -} // namespace silkworm::remote::kv::api +} // namespace silkworm::kv::api diff --git a/silkworm/db/remote/kv/api/endpoint/common.hpp b/silkworm/db/remote/kv/api/endpoint/common.hpp index 150247a69e..ff16fbb77a 100644 --- a/silkworm/db/remote/kv/api/endpoint/common.hpp +++ b/silkworm/db/remote/kv/api/endpoint/common.hpp @@ -21,7 +21,7 @@ #include -namespace silkworm::remote::kv::api { +namespace silkworm::kv::api { using TxId = uint64_t; using Timestamp = int64_t; @@ -29,4 +29,4 @@ using Timestamp = int64_t; using ListOfBytes = std::vector; using ListOfTimestamp = std::vector; -} // namespace silkworm::remote::kv::api +} // namespace silkworm::kv::api diff --git a/silkworm/db/remote/kv/api/endpoint/temporal_point.hpp b/silkworm/db/remote/kv/api/endpoint/temporal_point.hpp index 980e603c24..1ef27706ac 100644 --- a/silkworm/db/remote/kv/api/endpoint/temporal_point.hpp +++ b/silkworm/db/remote/kv/api/endpoint/temporal_point.hpp @@ -21,7 +21,7 @@ #include "common.hpp" -namespace silkworm::remote::kv::api { +namespace silkworm::kv::api { struct PointResult { bool success{false}; @@ -47,4 +47,4 @@ struct DomainPointQuery { using DomainPointResult = PointResult; -} // namespace silkworm::remote::kv::api +} // namespace silkworm::kv::api diff --git a/silkworm/db/remote/kv/api/endpoint/temporal_range.hpp b/silkworm/db/remote/kv/api/endpoint/temporal_range.hpp index 9998bca07d..c6836fd281 100644 --- a/silkworm/db/remote/kv/api/endpoint/temporal_range.hpp +++ b/silkworm/db/remote/kv/api/endpoint/temporal_range.hpp @@ -23,7 +23,7 @@ #include "common.hpp" -namespace silkworm::remote::kv::api { +namespace silkworm::kv::api { struct IndexRangeQuery { TxId tx_id{0}; @@ -74,4 +74,4 @@ struct DomainRangeQuery { using DomainRangeResult = RangeResult; -} // namespace silkworm::remote::kv::api +} // namespace silkworm::kv::api diff --git a/silkworm/db/remote/kv/api/endpoint/version.hpp b/silkworm/db/remote/kv/api/endpoint/version.hpp new file mode 100644 index 0000000000..84e867937a --- /dev/null +++ b/silkworm/db/remote/kv/api/endpoint/version.hpp @@ -0,0 +1,28 @@ +/* + Copyright 2024 The Silkworm Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#pragma once + +#include + +namespace silkworm::kv::api { + +using Version = std::tuple; + +//! Current KV API protocol version. +constexpr auto kCurrentVersion = Version{5, 1, 0}; + +} // namespace silkworm::kv::api diff --git a/silkworm/db/remote/kv/api/service.hpp b/silkworm/db/remote/kv/api/service.hpp index 4db97b2203..dd1e0cd9e7 100644 --- a/silkworm/db/remote/kv/api/service.hpp +++ b/silkworm/db/remote/kv/api/service.hpp @@ -20,12 +20,16 @@ #include "endpoint/temporal_point.hpp" #include "endpoint/temporal_range.hpp" +#include "endpoint/version.hpp" -namespace silkworm::remote::kv::api { +namespace silkworm::kv::api { struct Service { virtual ~Service() = default; + // rpc Version(google.protobuf.Empty) returns (types.VersionReply); + virtual Task version() = 0; + /** Temporal Point Queries **/ // rpc HistoryGet(HistoryGetReq) returns (HistoryGetReply); @@ -46,4 +50,4 @@ struct Service { virtual Task get_domain_range(const DomainRangeQuery&) = 0; }; -} // namespace silkworm::remote::kv::api +} // namespace silkworm::kv::api diff --git a/silkworm/db/remote/kv/grpc/client/endpoint/temporal_point.cpp b/silkworm/db/remote/kv/grpc/client/endpoint/temporal_point.cpp index e964f8f7a9..84d4bf33c2 100644 --- a/silkworm/db/remote/kv/grpc/client/endpoint/temporal_point.cpp +++ b/silkworm/db/remote/kv/grpc/client/endpoint/temporal_point.cpp @@ -18,7 +18,7 @@ #include -namespace silkworm::remote::kv::grpc::client { +namespace silkworm::kv::grpc::client { namespace proto = ::remote; @@ -65,4 +65,4 @@ api::DomainPointResult domain_get_result_from_response(const proto::DomainGetRep return result; } -} // namespace silkworm::remote::kv::grpc::client +} // namespace silkworm::kv::grpc::client diff --git a/silkworm/db/remote/kv/grpc/client/endpoint/temporal_point.hpp b/silkworm/db/remote/kv/grpc/client/endpoint/temporal_point.hpp index 0b42340417..615a8cc892 100644 --- a/silkworm/db/remote/kv/grpc/client/endpoint/temporal_point.hpp +++ b/silkworm/db/remote/kv/grpc/client/endpoint/temporal_point.hpp @@ -20,7 +20,7 @@ #include "../../../api/endpoint/temporal_point.hpp" -namespace silkworm::remote::kv::grpc::client { +namespace silkworm::kv::grpc::client { ::remote::HistoryGetReq history_get_request_from_query(const api::HistoryPointQuery&); api::HistoryPointResult history_get_result_from_response(const ::remote::HistoryGetReply&); @@ -28,4 +28,4 @@ api::HistoryPointResult history_get_result_from_response(const ::remote::History ::remote::DomainGetReq domain_get_request_from_query(const api::DomainPointQuery&); api::DomainPointResult domain_get_result_from_response(const ::remote::DomainGetReply&); -} // namespace silkworm::remote::kv::grpc::client +} // namespace silkworm::kv::grpc::client diff --git a/silkworm/db/remote/kv/grpc/client/endpoint/temporal_point_test.cpp b/silkworm/db/remote/kv/grpc/client/endpoint/temporal_point_test.cpp index 6af41775c7..007913b40f 100644 --- a/silkworm/db/remote/kv/grpc/client/endpoint/temporal_point_test.cpp +++ b/silkworm/db/remote/kv/grpc/client/endpoint/temporal_point_test.cpp @@ -22,10 +22,10 @@ #include "../../test_util/sample_protos.hpp" -namespace silkworm::remote::kv::grpc::client { +namespace silkworm::kv::grpc::client { using namespace evmc::literals; -using namespace silkworm::remote::kv::test_util; +using namespace silkworm::kv::test_util; using namespace silkworm::test_util; namespace proto = ::remote; @@ -95,4 +95,4 @@ TEST_CASE("domain_get_result_from_response", "[node][remote][kv][grpc]") { } } -} // namespace silkworm::remote::kv::grpc::client +} // namespace silkworm::kv::grpc::client diff --git a/silkworm/db/remote/kv/grpc/client/endpoint/temporal_range.cpp b/silkworm/db/remote/kv/grpc/client/endpoint/temporal_range.cpp index 9b5ea78590..89b19459cf 100644 --- a/silkworm/db/remote/kv/grpc/client/endpoint/temporal_range.cpp +++ b/silkworm/db/remote/kv/grpc/client/endpoint/temporal_range.cpp @@ -19,7 +19,7 @@ #include #include -namespace silkworm::remote::kv::grpc::client { +namespace silkworm::kv::grpc::client { namespace proto = ::remote; @@ -96,4 +96,4 @@ api::DomainRangeResult domain_range_result_from_response(const ::remote::Pairs& return result; } -} // namespace silkworm::remote::kv::grpc::client +} // namespace silkworm::kv::grpc::client diff --git a/silkworm/db/remote/kv/grpc/client/endpoint/temporal_range.hpp b/silkworm/db/remote/kv/grpc/client/endpoint/temporal_range.hpp index 5f91d47e4c..d61de9b71e 100644 --- a/silkworm/db/remote/kv/grpc/client/endpoint/temporal_range.hpp +++ b/silkworm/db/remote/kv/grpc/client/endpoint/temporal_range.hpp @@ -20,7 +20,7 @@ #include "../../../api/endpoint/temporal_range.hpp" -namespace silkworm::remote::kv::grpc::client { +namespace silkworm::kv::grpc::client { ::remote::IndexRangeReq index_range_request_from_query(const api::IndexRangeQuery&); api::IndexRangeResult index_range_result_from_response(const ::remote::IndexRangeReply&); @@ -31,4 +31,4 @@ api::HistoryRangeResult history_range_result_from_response(const ::remote::Pairs ::remote::DomainRangeReq domain_range_request_from_query(const api::DomainRangeQuery&); api::DomainRangeResult domain_range_result_from_response(const ::remote::Pairs&); -} // namespace silkworm::remote::kv::grpc::client +} // namespace silkworm::kv::grpc::client diff --git a/silkworm/db/remote/kv/grpc/client/endpoint/temporal_range_test.cpp b/silkworm/db/remote/kv/grpc/client/endpoint/temporal_range_test.cpp index e27a1b6bab..50bbd07627 100644 --- a/silkworm/db/remote/kv/grpc/client/endpoint/temporal_range_test.cpp +++ b/silkworm/db/remote/kv/grpc/client/endpoint/temporal_range_test.cpp @@ -22,10 +22,10 @@ #include "../../test_util/sample_protos.hpp" -namespace silkworm::remote::kv::grpc::client { +namespace silkworm::kv::grpc::client { using namespace evmc::literals; -using namespace silkworm::remote::kv::test_util; +using namespace silkworm::kv::test_util; using namespace silkworm::test_util; namespace proto = ::remote; @@ -140,4 +140,4 @@ TEST_CASE("domain_range_result_from_response", "[node][remote][kv][grpc]") { } } -} // namespace silkworm::remote::kv::grpc::client +} // namespace silkworm::kv::grpc::client diff --git a/silkworm/db/remote/kv/grpc/client/remote_client.cpp b/silkworm/db/remote/kv/grpc/client/remote_client.cpp index 45742dadd2..c552902513 100644 --- a/silkworm/db/remote/kv/grpc/client/remote_client.cpp +++ b/silkworm/db/remote/kv/grpc/client/remote_client.cpp @@ -24,7 +24,7 @@ #include "endpoint/temporal_point.hpp" #include "endpoint/temporal_range.hpp" -namespace silkworm::remote::kv::grpc::client { +namespace silkworm::kv::grpc::client { namespace proto = ::remote; using Stub = proto::KV::StubInterface; @@ -48,6 +48,11 @@ class RemoteClientImpl final : public api::Service { RemoteClientImpl(const RemoteClientImpl&) = delete; RemoteClientImpl& operator=(const RemoteClientImpl&) = delete; + // rpc Version(google.protobuf.Empty) returns (types.VersionReply); + Task version() override { + co_return api::kCurrentVersion; + } + /** Temporal Point Queries **/ // rpc HistoryGet(HistoryGetReq) returns (HistoryGetReply); @@ -106,4 +111,4 @@ std::shared_ptr RemoteClient::service() { return p_impl_; } -} // namespace silkworm::remote::kv::grpc::client +} // namespace silkworm::kv::grpc::client diff --git a/silkworm/db/remote/kv/grpc/client/remote_client.hpp b/silkworm/db/remote/kv/grpc/client/remote_client.hpp index 4ed24c8767..a840ba388a 100644 --- a/silkworm/db/remote/kv/grpc/client/remote_client.hpp +++ b/silkworm/db/remote/kv/grpc/client/remote_client.hpp @@ -25,7 +25,7 @@ #include "../../api/client.hpp" #include "../../api/service.hpp" -namespace silkworm::remote::kv::grpc::client { +namespace silkworm::kv::grpc::client { class RemoteClientImpl; @@ -40,4 +40,4 @@ struct RemoteClient : public api::Client { std::shared_ptr p_impl_; }; -} // namespace silkworm::remote::kv::grpc::client +} // namespace silkworm::kv::grpc::client diff --git a/silkworm/db/remote/kv/grpc/client/remote_client_test.cpp b/silkworm/db/remote/kv/grpc/client/remote_client_test.cpp index 6dbf22b979..ac37ab3a55 100644 --- a/silkworm/db/remote/kv/grpc/client/remote_client_test.cpp +++ b/silkworm/db/remote/kv/grpc/client/remote_client_test.cpp @@ -28,10 +28,10 @@ #include "../test_util/sample_protos.hpp" -namespace silkworm::remote::kv::grpc::client { +namespace silkworm::kv::grpc::client { using namespace silkworm::grpc::test_util; -using namespace silkworm::remote::kv::test_util; +using namespace silkworm::kv::test_util; namespace proto = ::remote; using StrictMockKVStub = testing::StrictMock; @@ -181,4 +181,4 @@ TEST_CASE_METHOD(RemoteClientTestRunner, "KV::DomainRange", "[node][remote][kv][ } } -} // namespace silkworm::remote::kv::grpc::client +} // namespace silkworm::kv::grpc::client diff --git a/silkworm/db/remote/kv/grpc/server/kv_calls.cpp b/silkworm/db/remote/kv/grpc/server/kv_calls.cpp index a314e493e5..1c03f53ca7 100644 --- a/silkworm/db/remote/kv/grpc/server/kv_calls.cpp +++ b/silkworm/db/remote/kv/grpc/server/kv_calls.cpp @@ -27,14 +27,14 @@ #include #include -namespace silkworm::rpc { +namespace silkworm::kv::grpc::server { using boost::asio::as_tuple; using namespace boost::asio::experimental::awaitable_operators; using boost::asio::steady_timer; using boost::asio::use_awaitable; -KvVersion higher_version_ignoring_patch(KvVersion lhs, KvVersion rhs) { +api::Version higher_version_ignoring_patch(api::Version lhs, api::Version rhs) { uint32_t lhs_major = std::get<0>(lhs); uint32_t lhs_minor = std::get<1>(lhs); uint32_t rhs_major = std::get<0>(rhs); @@ -57,7 +57,7 @@ KvVersion higher_version_ignoring_patch(KvVersion lhs, KvVersion rhs) { types::VersionReply KvVersionCall::response_; void KvVersionCall::fill_predefined_reply() { - const auto max_version = higher_version_ignoring_patch(kDbSchemaVersion, kKvApiVersion); + const auto max_version = higher_version_ignoring_patch(kDbSchemaVersion, api::kCurrentVersion); KvVersionCall::response_.set_major(std::get<0>(max_version)); KvVersionCall::response_.set_minor(std::get<1>(max_version)); KvVersionCall::response_.set_patch(std::get<2>(max_version)); @@ -65,7 +65,7 @@ void KvVersionCall::fill_predefined_reply() { Task KvVersionCall::operator()() { SILK_TRACE << "KvVersionCall START"; - co_await agrpc::finish(responder_, response_, grpc::Status::OK); + co_await agrpc::finish(responder_, response_, ::grpc::Status::OK); SILK_TRACE << "KvVersionCall END version: " << response_.major() << "." << response_.minor() << "." << response_.patch(); } @@ -78,7 +78,7 @@ void TxCall::set_max_ttl_duration(const std::chrono::milliseconds& max_ttl_durat Task TxCall::operator()(mdbx::env* chaindata_env) { SILK_TRACE << "TxCall peer: " << peer() << " MDBX readers: " << chaindata_env->get_info().mi_numreaders; - grpc::Status status{grpc::Status::OK}; + ::grpc::Status status{::grpc::Status::OK}; try { // Assign a monotonically increasing unique ID to remote transaction const auto tx_id = ++next_tx_id_; @@ -93,7 +93,7 @@ Task TxCall::operator()(mdbx::env* chaindata_env) { tx_id_pair.set_view_id(read_only_txn_->id()); if (!co_await agrpc::write(responder_, tx_id_pair)) { SILK_WARN << "Tx closed by peer: " << server_context_.peer() << " error: write failed"; - co_await agrpc::finish(responder_, grpc::Status::OK); + co_await agrpc::finish(responder_, ::grpc::Status::OK); co_return; } SILK_DEBUG << "TxCall announcement with txid=" << read_only_txn_->id() << " sent"; @@ -128,15 +128,15 @@ Task TxCall::operator()(mdbx::env* chaindata_env) { } catch (const mdbx::exception& e) { const auto error_message = "start tx failed: " + std::string{e.what()}; SILK_ERROR << "Tx peer: " << peer() << " " << error_message; - status = grpc::Status{grpc::StatusCode::RESOURCE_EXHAUSTED, error_message}; - } catch (const server::CallException& ce) { + status = ::grpc::Status{::grpc::StatusCode::RESOURCE_EXHAUSTED, error_message}; + } catch (const rpc::server::CallException& ce) { status = ce.status(); } catch (const boost::system::system_error& se) { if (se.code() != boost::asio::error::operation_aborted) { - status = grpc::Status{grpc::StatusCode::INTERNAL, se.what()}; + status = ::grpc::Status{::grpc::StatusCode::INTERNAL, se.what()}; } } catch (const std::exception& exc) { - status = grpc::Status{grpc::StatusCode::INTERNAL, exc.what()}; + status = ::grpc::Status{::grpc::StatusCode::INTERNAL, exc.what()}; } }; // NOLINTNEXTLINE(cppcoreguidelines-avoid-capturing-lambda-coroutines) @@ -151,7 +151,7 @@ Task TxCall::operator()(mdbx::env* chaindata_env) { if (!ec) { const auto error_msg{"no incoming request in " + std::to_string(max_idle_duration_.count()) + " ms"}; SILK_WARN << "Tx idle peer: " << server_context_.peer() << " error: " << error_msg; - status = grpc::Status{grpc::StatusCode::DEADLINE_EXCEEDED, error_msg}; + status = ::grpc::Status{::grpc::StatusCode::DEADLINE_EXCEEDED, error_msg}; break; } } @@ -173,11 +173,11 @@ Task TxCall::operator()(mdbx::env* chaindata_env) { } catch (const mdbx::exception& e) { const auto error_message = "start tx failed: " + std::string{e.what()}; SILK_ERROR << "Tx peer: " << peer() << " " << error_message; - status = grpc::Status{grpc::StatusCode::RESOURCE_EXHAUSTED, error_message}; - } catch (const server::CallException& ce) { + status = ::grpc::Status{::grpc::StatusCode::RESOURCE_EXHAUSTED, error_message}; + } catch (const rpc::server::CallException& ce) { status = ce.status(); } catch (const std::exception& exc) { - status = grpc::Status{grpc::StatusCode::INTERNAL, exc.what()}; + status = ::grpc::Status{::grpc::StatusCode::INTERNAL, exc.what()}; } co_await agrpc::finish(responder_, status); @@ -208,14 +208,14 @@ void TxCall::handle_cursor_open(const remote::Cursor* request, remote::Pair& res if (!db::has_map(read_only_txn_, bucket_name.c_str())) { const auto err = "unknown bucket: " + request->bucket_name(); SILK_ERROR << "Tx peer: " << peer() << " op=" << remote::Op_Name(request->op()) << " " << err; - throw_with_error(grpc::Status{grpc::StatusCode::INVALID_ARGUMENT, err}); + throw_with_error(::grpc::Status{::grpc::StatusCode::INVALID_ARGUMENT, err}); } // The number of opened cursors shall not exceed the maximum threshold. if (cursors_.size() == kMaxTxCursors) { const auto err = "maximum cursors per txn reached: " + std::to_string(cursors_.size()); SILK_ERROR << "Tx peer: " << peer() << " op=" << remote::Op_Name(request->op()) << " " << err; - throw_with_error(grpc::Status{grpc::StatusCode::RESOURCE_EXHAUSTED, err}); + throw_with_error(::grpc::Status{::grpc::StatusCode::RESOURCE_EXHAUSTED, err}); } // Create a new database cursor tracking also bucket name (needed for reopening). We create a read-only dup-sort @@ -233,7 +233,7 @@ void TxCall::handle_cursor_open(const remote::Cursor* request, remote::Pair& res if (!inserted) { const auto error_message = "assigned cursor ID already in use: " + std::to_string(last_cursor_id_); SILK_ERROR << "Tx peer: " << peer() << " op=" << remote::Op_Name(request->op()) << " " << error_message; - throw_with_error(grpc::Status{grpc::StatusCode::ALREADY_EXISTS, error_message}); + throw_with_error(::grpc::Status{::grpc::StatusCode::ALREADY_EXISTS, error_message}); } // Send the assigned cursor ID back to the client. @@ -246,7 +246,7 @@ void TxCall::handle_cursor_operation(const remote::Cursor* request, remote::Pair if (cursor_it == cursors_.end()) { const auto error_message = "unknown cursor: " + std::to_string(request->cursor()); SILK_ERROR << "Tx peer: " << peer() << " op=" << remote::Op_Name(request->op()) << " " << error_message; - throw_with_error(grpc::Status{grpc::StatusCode::INVALID_ARGUMENT, error_message}); + throw_with_error(::grpc::Status{::grpc::StatusCode::INVALID_ARGUMENT, error_message}); } auto& cursor = cursor_it->second.cursor; try { @@ -262,7 +262,7 @@ void TxCall::handle_cursor_close(const remote::Cursor* request) { if (cursor_it == cursors_.end()) { const auto error_message = "unknown cursor: " + std::to_string(request->cursor()); SILK_ERROR << "Tx peer: " << peer() << " op: " << remote::Op_Name(request->op()) << " " << error_message; - throw_with_error(grpc::Status{grpc::StatusCode::INVALID_ARGUMENT, error_message}); + throw_with_error(::grpc::Status{::grpc::StatusCode::INVALID_ARGUMENT, error_message}); } cursors_.erase(cursor_it); SILK_DEBUG << "Tx peer: " << peer() << " closed cursor: " << request->cursor(); @@ -646,16 +646,16 @@ void TxCall::throw_with_internal_error(const remote::Cursor* request, const std: error_message.append(remote::Op_Name(request->op())); error_message.append(" on cursor: "); error_message.append(std::to_string(request->cursor())); - throw_with_error(grpc::Status{grpc::StatusCode::INTERNAL, error_message}); + throw_with_error(::grpc::Status{::grpc::StatusCode::INTERNAL, error_message}); } void TxCall::throw_with_internal_error(const std::string& message) { - throw_with_error(grpc::Status{grpc::StatusCode::INTERNAL, message}); + throw_with_error(::grpc::Status{::grpc::StatusCode::INTERNAL, message}); } -void TxCall::throw_with_error(grpc::Status&& status) { +void TxCall::throw_with_error(::grpc::Status&& status) { SILK_ERROR << "Tx peer: " << peer() << " " << status.error_message(); - throw server::CallException{std::move(status)}; + throw rpc::server::CallException{std::move(status)}; } Task StateChangesCall::operator()(StateChangeCollection* source) { @@ -682,7 +682,7 @@ Task StateChangesCall::operator()(StateChangeCollection* source) { if (!token) { const auto error_message = "assigned consumer token already in use: " + std::to_string(source->last_token()); SILK_ERROR << "StateChanges peer: " << peer() << " subscription failed " << error_message; - co_await agrpc::finish(responder_, grpc::Status{grpc::StatusCode::ALREADY_EXISTS, error_message}); + co_await agrpc::finish(responder_, ::grpc::Status{::grpc::StatusCode::ALREADY_EXISTS, error_message}); co_return; } @@ -713,7 +713,7 @@ Task StateChangesCall::operator()(StateChangeCollection* source) { } SILK_DEBUG << "Closing state change stream server-side"; - co_await agrpc::finish(responder_, grpc::Status::OK); + co_await agrpc::finish(responder_, ::grpc::Status::OK); SILK_DEBUG << "State change stream closed server-side"; SILK_TRACE << "StateChangesCall END"; @@ -724,7 +724,7 @@ Task SnapshotsCall::operator()() { SILK_TRACE << "SnapshotsCall START"; remote::SnapshotsReply response; // TODO(canepat) implement properly - co_await agrpc::finish(responder_, response, grpc::Status::OK); + co_await agrpc::finish(responder_, response, ::grpc::Status::OK); SILK_TRACE << "SnapshotsCall END #blocks_files: " << response.blocks_files_size() << " #history_files: " << response.history_files_size(); } @@ -732,7 +732,7 @@ Task HistoryGetCall::operator()() { SILK_TRACE << "HistoryGetCall START"; remote::HistoryGetReply response; // TODO(canepat) implement properly - co_await agrpc::finish(responder_, response, grpc::Status::OK); + co_await agrpc::finish(responder_, response, ::grpc::Status::OK); SILK_TRACE << "HistoryGetCall END ok: " << response.ok() << " value: " << response.v(); } @@ -740,7 +740,7 @@ Task DomainGetCall::operator()() { SILK_TRACE << "DomainGetCall START"; remote::DomainGetReply response; // TODO(canepat) implement properly - co_await agrpc::finish(responder_, response, grpc::Status::OK); + co_await agrpc::finish(responder_, response, ::grpc::Status::OK); SILK_TRACE << "DomainGetCall END ok: " << response.ok() << " value: " << response.v(); } @@ -748,7 +748,7 @@ Task IndexRangeCall::operator()() { SILK_TRACE << "IndexRangeCall START"; remote::IndexRangeReply response; // TODO(canepat) implement properly - co_await agrpc::finish(responder_, response, grpc::Status::OK); + co_await agrpc::finish(responder_, response, ::grpc::Status::OK); SILK_TRACE << "IndexRangeCall END #timestamps: " << response.timestamps_size() << " next_page_token: " << response.next_page_token(); } @@ -756,7 +756,7 @@ Task HistoryRangeCall::operator()() { SILK_TRACE << "HistoryRangeCall START"; remote::Pairs response; // TODO(canepat) implement properly - co_await agrpc::finish(responder_, response, grpc::Status::OK); + co_await agrpc::finish(responder_, response, ::grpc::Status::OK); SILK_TRACE << "HistoryRangeCall END #keys: " << response.keys_size() << " #values: " << response.values_size() << " next_page_token: " << response.next_page_token(); } @@ -765,9 +765,9 @@ Task DomainRangeCall::operator()() { SILK_TRACE << "DomainRangeCall START"; remote::Pairs response; // TODO(canepat) implement properly - co_await agrpc::finish(responder_, response, grpc::Status::OK); + co_await agrpc::finish(responder_, response, ::grpc::Status::OK); SILK_TRACE << "DomainRangeCall END #keys: " << response.keys_size() << " #values: " << response.values_size() << " next_page_token: " << response.next_page_token(); } -} // namespace silkworm::rpc +} // namespace silkworm::kv::grpc::server diff --git a/silkworm/db/remote/kv/grpc/server/kv_calls.hpp b/silkworm/db/remote/kv/grpc/server/kv_calls.hpp index 7d36b578bc..bac8f1c06b 100644 --- a/silkworm/db/remote/kv/grpc/server/kv_calls.hpp +++ b/silkworm/db/remote/kv/grpc/server/kv_calls.hpp @@ -34,22 +34,18 @@ #include #include +#include "../../api/direct_service.hpp" #include "state_change_collection.hpp" // KV API protocol versions // 5.1.0 - first issue -namespace silkworm::rpc { +namespace silkworm::kv::grpc::server { -using KvVersion = std::tuple; - -KvVersion higher_version_ignoring_patch(KvVersion lhs, KvVersion rhs); +api::Version higher_version_ignoring_patch(api::Version lhs, api::Version rhs); //! Current DB schema version. -constexpr auto kDbSchemaVersion = KvVersion{3, 0, 0}; - -//! Current KV API protocol version. -constexpr auto kKvApiVersion = KvVersion{5, 1, 0}; +constexpr auto kDbSchemaVersion = api::Version{3, 0, 0}; //! The max life duration for MDBX transactions (long-lived transactions are discouraged). constexpr std::chrono::milliseconds kMaxTxDuration{60'000}; @@ -59,7 +55,7 @@ constexpr std::size_t kMaxTxCursors{100}; //! Unary RPC for Version method of 'ethbackend' gRPC protocol. //! rpc Version(google.protobuf.Empty) returns (types.VersionReply); -class KvVersionCall : public server::UnaryCall { +class KvVersionCall : public rpc::server::UnaryCall { public: using Base::UnaryCall; @@ -73,7 +69,7 @@ class KvVersionCall : public server::UnaryCall { +class TxCall : public rpc::server::BidiStreamingCall { public: using Base::BidiStreamingCall; @@ -142,7 +138,7 @@ class TxCall : public server::BidiStreamingCall { void throw_with_internal_error(const std::string& message); - void throw_with_error(grpc::Status&& status); + void throw_with_error(::grpc::Status&& status); static std::chrono::milliseconds max_ttl_duration_; static inline uint64_t next_tx_id_{0}; @@ -154,7 +150,7 @@ class TxCall : public server::BidiStreamingCall { //! Server-streaming RPC for StateChanges method of 'kv' gRPC protocol. //! rpc StateChanges(StateChangeRequest) returns (stream StateChangeBatch); -class StateChangesCall : public server::ServerStreamingCall { +class StateChangesCall : public rpc::server::ServerStreamingCall { public: using Base::ServerStreamingCall; @@ -163,7 +159,7 @@ class StateChangesCall : public server::ServerStreamingCall { +class SnapshotsCall : public rpc::server::UnaryCall { public: using Base::UnaryCall; @@ -172,7 +168,7 @@ class SnapshotsCall : public server::UnaryCall { +class HistoryGetCall : public rpc::server::UnaryCall { public: using Base::UnaryCall; @@ -181,7 +177,7 @@ class HistoryGetCall : public server::UnaryCall { +class DomainGetCall : public rpc::server::UnaryCall { public: using Base::UnaryCall; @@ -190,7 +186,7 @@ class DomainGetCall : public server::UnaryCall { +class IndexRangeCall : public rpc::server::UnaryCall { public: using Base::UnaryCall; @@ -199,7 +195,7 @@ class IndexRangeCall : public server::UnaryCall { +class HistoryRangeCall : public rpc::server::UnaryCall { public: using Base::UnaryCall; @@ -208,11 +204,11 @@ class HistoryRangeCall : public server::UnaryCall { +class DomainRangeCall : public rpc::server::UnaryCall { public: using Base::UnaryCall; Task operator()(); }; -} // namespace silkworm::rpc +} // namespace silkworm::kv::grpc::server diff --git a/silkworm/db/remote/kv/grpc/server/kv_calls_test.cpp b/silkworm/db/remote/kv/grpc/server/kv_calls_test.cpp index a1ae28943d..0553e57140 100644 --- a/silkworm/db/remote/kv/grpc/server/kv_calls_test.cpp +++ b/silkworm/db/remote/kv/grpc/server/kv_calls_test.cpp @@ -22,42 +22,42 @@ #include -namespace silkworm::rpc { +namespace silkworm::kv::grpc::server { TEST_CASE("higher_version_ignoring_patch", "[silkworm][rpc][kv_calls]") { SECTION("lhs.major > rhs.major") { - KvVersion lhs{2, 0, 0}; - KvVersion rhs{1, 0, 0}; + api::Version lhs{2, 0, 0}; + api::Version rhs{1, 0, 0}; CHECK(higher_version_ignoring_patch(lhs, rhs) == lhs); } SECTION("rhs.major > lhs.major") { - KvVersion lhs{2, 0, 0}; - KvVersion rhs{3, 0, 0}; + api::Version lhs{2, 0, 0}; + api::Version rhs{3, 0, 0}; CHECK(higher_version_ignoring_patch(lhs, rhs) == rhs); } SECTION("lhs.minor > rhs.minor") { - KvVersion lhs{2, 5, 0}; - KvVersion rhs{2, 2, 0}; + api::Version lhs{2, 5, 0}; + api::Version rhs{2, 2, 0}; CHECK(higher_version_ignoring_patch(lhs, rhs) == lhs); } SECTION("rhs.minor > lhs.minor") { - KvVersion lhs{2, 5, 0}; - KvVersion rhs{2, 6, 0}; + api::Version lhs{2, 5, 0}; + api::Version rhs{2, 6, 0}; CHECK(higher_version_ignoring_patch(lhs, rhs) == rhs); } SECTION("patch not relevant") { - KvVersion lhs1{2, 5, 0}; - KvVersion rhs1{2, 5, 0}; + api::Version lhs1{2, 5, 0}; + api::Version rhs1{2, 5, 0}; CHECK(higher_version_ignoring_patch(lhs1, rhs1) == lhs1); - KvVersion lhs2{2, 5, 1}; - KvVersion rhs2{2, 5, 0}; + api::Version lhs2{2, 5, 1}; + api::Version rhs2{2, 5, 0}; CHECK(higher_version_ignoring_patch(lhs2, rhs2) == lhs2); - KvVersion lhs3{2, 5, 0}; - KvVersion rhs3{2, 5, 1}; + api::Version lhs3{2, 5, 0}; + api::Version rhs3{2, 5, 1}; CHECK(higher_version_ignoring_patch(lhs3, rhs3) == lhs3); } } @@ -90,4 +90,4 @@ TEST_CASE("dump_mdbx_result", "[silkworm][rpc][kv_calls]") { ro_txn.abort(); } -} // namespace silkworm::rpc +} // namespace silkworm::kv::grpc::server diff --git a/silkworm/db/remote/kv/grpc/server/kv_server.cpp b/silkworm/db/remote/kv/grpc/server/kv_server.cpp index 890e01cf75..f45e01b006 100644 --- a/silkworm/db/remote/kv/grpc/server/kv_server.cpp +++ b/silkworm/db/remote/kv/grpc/server/kv_server.cpp @@ -22,16 +22,18 @@ #include "kv_calls.hpp" -namespace silkworm::rpc { +namespace silkworm::kv::grpc::server { -KvServer::KvServer(const ServerSettings& settings, mdbx::env* chaindata_env, StateChangeCollection* state_change_source) +using rpc::request_repeatedly; + +KvServer::KvServer(const rpc::ServerSettings& settings, mdbx::env* chaindata_env, StateChangeCollection* state_change_source) : Server(settings), chaindata_env_{chaindata_env}, state_change_source_{state_change_source} { setup_kv_calls(); SILK_INFO << "KvServer created listening on: " << settings.address_uri; } // Register the gRPC services: they must exist for the lifetime of the server built by builder. -void KvServer::register_async_services(grpc::ServerBuilder& builder) { +void KvServer::register_async_services(::grpc::ServerBuilder& builder) { builder.RegisterService(&kv_async_service_); } @@ -95,4 +97,4 @@ void KvServer::register_request_calls() { } } -} // namespace silkworm::rpc +} // namespace silkworm::kv::grpc::server diff --git a/silkworm/db/remote/kv/grpc/server/kv_server.hpp b/silkworm/db/remote/kv/grpc/server/kv_server.hpp index e0da595992..8e059e8658 100644 --- a/silkworm/db/remote/kv/grpc/server/kv_server.hpp +++ b/silkworm/db/remote/kv/grpc/server/kv_server.hpp @@ -26,17 +26,17 @@ #include "state_change_collection.hpp" -namespace silkworm::rpc { +namespace silkworm::kv::grpc::server { -class KvServer : public virtual Server { +class KvServer : public virtual rpc::Server { public: - KvServer(const ServerSettings& settings, mdbx::env* chaindata_env, StateChangeCollection* state_change_source); + KvServer(const rpc::ServerSettings& settings, mdbx::env* chaindata_env, StateChangeCollection* state_change_source); KvServer(const KvServer&) = delete; KvServer& operator=(const KvServer&) = delete; protected: - void register_async_services(grpc::ServerBuilder& builder) override; + void register_async_services(::grpc::ServerBuilder& builder) override; void register_request_calls() override; private: @@ -53,4 +53,4 @@ class KvServer : public virtual Server { StateChangeCollection* state_change_source_; }; -} // namespace silkworm::rpc +} // namespace silkworm::kv::grpc::server diff --git a/silkworm/db/remote/kv/grpc/server/kv_server_test.cpp b/silkworm/db/remote/kv/grpc/server/kv_server_test.cpp index 15e4271469..76966579dd 100644 --- a/silkworm/db/remote/kv/grpc/server/kv_server_test.cpp +++ b/silkworm/db/remote/kv/grpc/server/kv_server_test.cpp @@ -202,6 +202,8 @@ struct TestableStateChangeCollection : public StateChangeCollection { StateChangeTokenObserver token_observer_; }; +using KvServer = kv::grpc::server::KvServer; + struct KvEnd2EndTest { explicit KvEnd2EndTest(silkworm::log::Level log_verbosity = silkworm::log::Level::kNone) : set_verbosity_log_guard{log_verbosity} { @@ -226,7 +228,7 @@ struct KvEnd2EndTest { rw_txn.commit(); state_change_collection = std::make_unique(); - server = std::make_unique(srv_config, &database_env, state_change_collection.get()); + server = std::make_unique(srv_config, &database_env, state_change_collection.get()); server->build_and_start(); } @@ -267,19 +269,19 @@ struct KvEnd2EndTest { std::unique_ptr db_config; mdbx::env_managed database_env; std::unique_ptr state_change_collection; - std::unique_ptr server; + std::unique_ptr server; }; } // namespace -namespace silkworm::rpc { +namespace silkworm::kv::grpc::server { // Exclude gRPC tests from sanitizer builds due to data race warnings inside gRPC library #ifndef SILKWORM_SANITIZE TEST_CASE("KvServer", "[silkworm][node][rpc]") { test_util::SetLogVerbosityGuard guard{log::Level::kNone}; - Grpc2SilkwormLogGuard log_guard; - ServerSettings srv_config; + rpc::Grpc2SilkwormLogGuard log_guard; + rpc::ServerSettings srv_config; srv_config.address_uri = kTestAddressUri; TemporaryDirectory tmp_dir; DataDirectory data_dir{tmp_dir.path()}; @@ -382,7 +384,7 @@ TEST_CASE_METHOD(KvEnd2EndTest, "KvServer E2E: KV", "[silkworm][node][rpc]") { std::vector responses; const auto status = kv_client->tx(requests, responses); CHECK(!status.ok()); - CHECK(status.error_code() == grpc::StatusCode::INVALID_ARGUMENT); + CHECK(status.error_code() == ::grpc::StatusCode::INVALID_ARGUMENT); CHECK(absl::StrContains(status.error_message(), "unknown bucket")); CHECK(responses.size() == 1); CHECK(responses[0].tx_id() != 0); @@ -396,7 +398,7 @@ TEST_CASE_METHOD(KvEnd2EndTest, "KvServer E2E: KV", "[silkworm][node][rpc]") { std::vector responses; const auto status = kv_client->tx(requests, responses); CHECK(!status.ok()); - CHECK(status.error_code() == grpc::StatusCode::INVALID_ARGUMENT); + CHECK(status.error_code() == ::grpc::StatusCode::INVALID_ARGUMENT); CHECK(absl::StrContains(status.error_message(), "unknown bucket")); CHECK(responses.size() == 1); CHECK(responses[0].tx_id() != 0); @@ -409,7 +411,7 @@ TEST_CASE_METHOD(KvEnd2EndTest, "KvServer E2E: KV", "[silkworm][node][rpc]") { std::vector responses; const auto status = kv_client->tx(requests, responses); CHECK(!status.ok()); - CHECK(status.error_code() == grpc::StatusCode::INVALID_ARGUMENT); + CHECK(status.error_code() == ::grpc::StatusCode::INVALID_ARGUMENT); CHECK(absl::StrContains(status.error_message(), "unknown cursor")); CHECK(responses.size() == 1); CHECK(responses[0].tx_id() != 0); @@ -495,7 +497,7 @@ TEST_CASE_METHOD(KvEnd2EndTest, "KvServer E2E: KV", "[silkworm][node][rpc]") { std::vector responses; const auto status = kv_client->tx(requests, responses); CHECK(!status.ok()); - CHECK(status.error_code() == grpc::StatusCode::INVALID_ARGUMENT); + CHECK(status.error_code() == ::grpc::StatusCode::INVALID_ARGUMENT); CHECK(absl::StrContains(status.error_message(), "unknown cursor")); CHECK(responses.size() == 2); CHECK(responses[0].tx_id() != 0); @@ -620,7 +622,7 @@ TEST_CASE_METHOD(KvEnd2EndTest, "KvServer E2E: KV", "[silkworm][node][rpc]") { }); // Start a StateChanges server-streaming call - grpc::ClientContext context1; + ::grpc::ClientContext context1; remote::StateChangeRequest request1; auto subscribe_reply_reader1 = kv_client->statechanges_start(&context1, request1); @@ -629,13 +631,13 @@ TEST_CASE_METHOD(KvEnd2EndTest, "KvServer E2E: KV", "[silkworm][node][rpc]") { token_reset_condition.wait(token_reset_lock, [&] { return token_reset; }); // Start another StateChanges server-streaming call and check it fails - grpc::ClientContext context2; + ::grpc::ClientContext context2; remote::StateChangeRequest request2; auto subscribe_reply_reader2 = kv_client->statechanges_start(&context2, request2); const auto status2 = subscribe_reply_reader2->Finish(); CHECK(!status2.ok()); - CHECK(status2.error_code() == grpc::StatusCode::ALREADY_EXISTS); + CHECK(status2.error_code() == ::grpc::StatusCode::ALREADY_EXISTS); CHECK(absl::StrContains(status2.error_message(), "assigned consumer token already in use")); // Close the server-side RPC stream and check first call completes successfully @@ -698,7 +700,7 @@ TEST_CASE("KvServer E2E: trigger server-side write error", "[silkworm][node][rpc // Start many Tx calls w/o reading responses after writing requests. for (uint32_t i{0}; i < kNumTxs; i++) { - grpc::ClientContext context; + ::grpc::ClientContext context; auto tx_stream = kv_client.tx_start(&context); remote::Pair response; CHECK(tx_stream->Read(&response)); @@ -727,10 +729,10 @@ TEST_CASE("KvServer E2E: Tx max simultaneous readers exceeded", "[silkworm][node auto kv_client = *test.kv_client; // Start and keep open as many Tx calls as the maximum number of readers. - std::vector> client_contexts; + std::vector> client_contexts; std::vector tx_streams; for (uint32_t i{0}; i < test.database_env.max_readers(); i++) { - auto& context = client_contexts.emplace_back(std::make_unique()); + auto& context = client_contexts.emplace_back(std::make_unique<::grpc::ClientContext>()); auto tx_stream = kv_client.tx_start(context.get()); // You must read at least the first unsolicited incoming message (TxID announcement). remote::Pair response; @@ -740,13 +742,13 @@ TEST_CASE("KvServer E2E: Tx max simultaneous readers exceeded", "[silkworm][node } // Now trying to start another Tx call will exceed the maximum number of readers. - grpc::ClientContext context; + ::grpc::ClientContext context; const auto failing_tx_stream = kv_client.tx_start(&context); remote::Pair response; REQUIRE(!failing_tx_stream->Read(&response)); // Tx RPC immediately fails for exhaustion, no TxID announcement auto failing_tx_status = failing_tx_stream->Finish(); CHECK(!failing_tx_status.ok()); - CHECK(failing_tx_status.error_code() == grpc::StatusCode::RESOURCE_EXHAUSTED); + CHECK(failing_tx_status.error_code() == ::grpc::StatusCode::RESOURCE_EXHAUSTED); CHECK(absl::StrContains(failing_tx_status.error_message(), "start tx failed")); // Dispose all the opened Tx calls. @@ -762,7 +764,7 @@ TEST_CASE("KvServer E2E: Tx max opened cursors exceeded", "[silkworm][node][rpc] test.fill_tables(); auto kv_client = *test.kv_client; - grpc::ClientContext context; + ::grpc::ClientContext context; const auto tx_stream = kv_client.tx_start(&context); // You must read at least the first unsolicited incoming message (TxID announcement). remote::Pair response; @@ -791,14 +793,14 @@ TEST_CASE("KvServer E2E: Tx max opened cursors exceeded", "[silkworm][node][rpc] REQUIRE(tx_stream->WritesDone()); auto status = tx_stream->Finish(); CHECK(!status.ok()); - CHECK(status.error_code() == grpc::StatusCode::RESOURCE_EXHAUSTED); + CHECK(status.error_code() == ::grpc::StatusCode::RESOURCE_EXHAUSTED); CHECK(absl::StrContains(status.error_message(), "maximum cursors per txn")); } class TxIdleTimeoutGuard { public: explicit TxIdleTimeoutGuard(uint8_t t) { TxCall::set_max_idle_duration(std::chrono::milliseconds{t}); } - ~TxIdleTimeoutGuard() { TxCall::set_max_idle_duration(server::kDefaultMaxIdleDuration); } + ~TxIdleTimeoutGuard() { TxCall::set_max_idle_duration(rpc::server::kDefaultMaxIdleDuration); } }; TEST_CASE("KvServer E2E: bidirectional idle timeout", "[silkworm][node][rpc]") { @@ -812,28 +814,28 @@ TEST_CASE("KvServer E2E: bidirectional idle timeout", "[silkworm][node][rpc]") { // *appropriate* to call Finish only after all incoming messages have been read (not the // case here, missing tx ID announcement read) *and* no outgoing messages need to be sent. /*SECTION("Tx KO: immediate finish", "[.]") { - grpc::ClientContext context; + ::grpc::ClientContext context; const auto tx_reader_writer = kv_client.tx_start(&context); auto status = tx_reader_writer->Finish(); CHECK(!status.ok()); - CHECK(status.error_code() == grpc::StatusCode::DEADLINE_EXCEEDED); + CHECK(status.error_code() == ::grpc::StatusCode::DEADLINE_EXCEEDED); CHECK(absl::StrContains(status.error_message(), "call idle, no incoming request")); }*/ SECTION("Tx KO: finish after first read (w/o WritesDone)") { - grpc::ClientContext context; + ::grpc::ClientContext context; const auto tx_reader_writer = kv_client.tx_start(&context); remote::Pair response; CHECK(tx_reader_writer->Read(&response)); CHECK(response.tx_id() != 0); auto status = tx_reader_writer->Finish(); CHECK(!status.ok()); - CHECK(status.error_code() == grpc::StatusCode::DEADLINE_EXCEEDED); + CHECK(status.error_code() == ::grpc::StatusCode::DEADLINE_EXCEEDED); CHECK(absl::StrContains(status.error_message(), "no incoming request")); } SECTION("Tx KO: finish after first read and one write/read (w/o WritesDone)") { - grpc::ClientContext context; + ::grpc::ClientContext context; const auto tx_reader_writer = kv_client.tx_start(&context); remote::Pair response; CHECK(tx_reader_writer->Read(&response)); @@ -847,7 +849,7 @@ TEST_CASE("KvServer E2E: bidirectional idle timeout", "[silkworm][node][rpc]") { CHECK(response.cursor_id() != 0); auto status = tx_reader_writer->Finish(); CHECK(!status.ok()); - CHECK(status.error_code() == grpc::StatusCode::DEADLINE_EXCEEDED); + CHECK(status.error_code() == ::grpc::StatusCode::DEADLINE_EXCEEDED); CHECK(absl::StrContains(status.error_message(), "no incoming request")); } } @@ -1803,7 +1805,7 @@ TEST_CASE("KvServer E2E: Tx cursor invalid operations", "[silkworm][node][rpc]") std::vector responses; const auto status = kv_client.tx(requests, responses); CHECK(!status.ok()); - CHECK(status.error_code() == grpc::StatusCode::INTERNAL); + CHECK(status.error_code() == ::grpc::StatusCode::INTERNAL); CHECK(absl::StrContains(status.error_message(), "exception: MDBX_ENODATA")); CHECK(responses.size() == 2); CHECK(responses[0].tx_id() != 0); @@ -1827,7 +1829,7 @@ TEST_CASE("KvServer E2E: Tx cursor invalid operations", "[silkworm][node][rpc]") std::vector responses; const auto status = kv_client.tx(requests, responses); CHECK(!status.ok()); - CHECK(status.error_code() == grpc::StatusCode::INTERNAL); + CHECK(status.error_code() == ::grpc::StatusCode::INTERNAL); CHECK(absl::StrContains(status.error_message(), "exception: MDBX_INCOMPATIBLE")); CHECK(responses.size() == 3); CHECK(responses[0].tx_id() != 0); @@ -1851,7 +1853,7 @@ TEST_CASE("KvServer E2E: Tx cursor invalid operations", "[silkworm][node][rpc]") std::vector responses; const auto status = kv_client.tx(requests, responses); CHECK(!status.ok()); - CHECK(status.error_code() == grpc::StatusCode::INTERNAL); + CHECK(status.error_code() == ::grpc::StatusCode::INTERNAL); CHECK(absl::StrContains(status.error_message(), "exception: MDBX_INCOMPATIBLE")); CHECK(responses.size() == 3); CHECK(responses[0].tx_id() != 0); @@ -1872,7 +1874,7 @@ TEST_CASE("KvServer E2E: Tx cursor invalid operations", "[silkworm][node][rpc]") std::vector responses; const auto status = kv_client.tx(requests, responses); CHECK(!status.ok()); - CHECK(status.error_code() == grpc::StatusCode::INTERNAL); + CHECK(status.error_code() == ::grpc::StatusCode::INTERNAL); CHECK(absl::StrContains(status.error_message(), "exception: mdbx")); CHECK(responses.size() == 2); CHECK(responses[0].tx_id() != 0); @@ -1892,7 +1894,7 @@ TEST_CASE("KvServer E2E: Tx cursor invalid operations", "[silkworm][node][rpc]") std::vector responses; const auto status = kv_client.tx(requests, responses); CHECK(!status.ok()); - CHECK(status.error_code() == grpc::StatusCode::INTERNAL); + CHECK(status.error_code() == ::grpc::StatusCode::INTERNAL); CHECK(absl::StrContains(status.error_message(), "exception: mdbx")); CHECK(responses.size() == 2); CHECK(responses[0].tx_id() != 0); @@ -1912,7 +1914,7 @@ TEST_CASE("KvServer E2E: Tx cursor invalid operations", "[silkworm][node][rpc]") std::vector responses; const auto status = kv_client.tx(requests, responses); CHECK(!status.ok()); - CHECK(status.error_code() == grpc::StatusCode::INTERNAL); + CHECK(status.error_code() == ::grpc::StatusCode::INTERNAL); CHECK(absl::StrContains(status.error_message(), "MDBX_INCOMPATIBLE")); CHECK(responses.size() == 2); CHECK(responses[0].tx_id() != 0); @@ -1932,7 +1934,7 @@ TEST_CASE("KvServer E2E: Tx cursor invalid operations", "[silkworm][node][rpc]") std::vector responses; const auto status = kv_client.tx(requests, responses); CHECK(!status.ok()); - CHECK(status.error_code() == grpc::StatusCode::INTERNAL); + CHECK(status.error_code() == ::grpc::StatusCode::INTERNAL); CHECK(absl::StrContains(status.error_message(), "MDBX_INCOMPATIBLE")); CHECK(responses.size() == 2); CHECK(responses[0].tx_id() != 0); @@ -1953,7 +1955,7 @@ TEST_CASE("KvServer E2E: bidirectional max TTL duration", "[silkworm][node][rpc] TxMaxTimeToLiveGuard ttl_guard{kCustomMaxTimeToLive}; SECTION("Tx: cursor NEXT ops across renew are consecutive") { - grpc::ClientContext context; + ::grpc::ClientContext context; const auto tx_reader_writer = kv_client.tx_start(&context); remote::Pair response; CHECK(tx_reader_writer->Read(&response)); @@ -1989,7 +1991,7 @@ TEST_CASE("KvServer E2E: bidirectional max TTL duration", "[silkworm][node][rpc] } SECTION("Tx: cursor NEXT_DUP ops across renew are consecutive") { - grpc::ClientContext context; + ::grpc::ClientContext context; const auto tx_reader_writer = kv_client.tx_start(&context); remote::Pair response; CHECK(tx_reader_writer->Read(&response)); @@ -2026,7 +2028,7 @@ TEST_CASE("KvServer E2E: bidirectional max TTL duration", "[silkworm][node][rpc] #ifndef _WIN32 SECTION("Tx: cursor NEXT op after renew sees changes") { - grpc::ClientContext context; + ::grpc::ClientContext context; // Start Tx RPC and open one cursor for TestMap table const auto tx_reader_writer = kv_client.tx_start(&context); remote::Pair response; @@ -2095,7 +2097,7 @@ TEST_CASE("KvServer E2E: bidirectional max TTL duration", "[silkworm][node][rpc] } SECTION("Tx: cursor NEXT_DUP op after renew sees changes") { - grpc::ClientContext context; + ::grpc::ClientContext context; // Start Tx RPC and open one cursor for TestMultiMap table const auto tx_reader_writer = kv_client.tx_start(&context); remote::Pair response; @@ -2182,4 +2184,4 @@ TEST_CASE("KvServer E2E: bidirectional max TTL duration", "[silkworm][node][rpc] } #endif // SILKWORM_SANITIZE -} // namespace silkworm::rpc +} // namespace silkworm::kv::grpc::server diff --git a/silkworm/db/remote/kv/grpc/test_util/sample_protos.hpp b/silkworm/db/remote/kv/grpc/test_util/sample_protos.hpp index a94a3ec68b..52e05b9956 100644 --- a/silkworm/db/remote/kv/grpc/test_util/sample_protos.hpp +++ b/silkworm/db/remote/kv/grpc/test_util/sample_protos.hpp @@ -25,7 +25,7 @@ #include "../../api/endpoint/temporal_point.hpp" #include "../../api/endpoint/temporal_range.hpp" -namespace silkworm::remote::kv::test_util { +namespace silkworm::kv::test_util { namespace proto = ::remote; @@ -226,4 +226,4 @@ inline api::DomainRangeResult sample_domain_range_result() { }; } -} // namespace silkworm::remote::kv::test_util +} // namespace silkworm::kv::test_util diff --git a/silkworm/node/remote/ethbackend/grpc/server/backend_kv_server.cpp b/silkworm/node/backend_kv_server.cpp similarity index 85% rename from silkworm/node/remote/ethbackend/grpc/server/backend_kv_server.cpp rename to silkworm/node/backend_kv_server.cpp index 67e7c9f1ff..76440fffbd 100644 --- a/silkworm/node/remote/ethbackend/grpc/server/backend_kv_server.cpp +++ b/silkworm/node/backend_kv_server.cpp @@ -19,16 +19,16 @@ #include #include -namespace silkworm::rpc { +namespace silkworm::node { -BackEndKvServer::BackEndKvServer(const ServerSettings& settings, const EthereumBackEnd& backend) +BackEndKvServer::BackEndKvServer(const rpc::ServerSettings& settings, const EthereumBackEnd& backend) : Server(settings), BackEndServer(settings, backend), KvServer(settings, backend.chaindata_env(), backend.state_change_source()) { } // Register the gRPC services: they must exist for the lifetime of the server built by builder. -void BackEndKvServer::register_async_services(grpc::ServerBuilder& builder) { +void BackEndKvServer::register_async_services(::grpc::ServerBuilder& builder) { BackEndServer::register_async_services(builder); KvServer::register_async_services(builder); } @@ -39,4 +39,4 @@ void BackEndKvServer::register_request_calls() { KvServer::register_request_calls(); } -} // namespace silkworm::rpc +} // namespace silkworm::node diff --git a/silkworm/node/remote/ethbackend/grpc/server/backend_kv_server.hpp b/silkworm/node/backend_kv_server.hpp similarity index 75% rename from silkworm/node/remote/ethbackend/grpc/server/backend_kv_server.hpp rename to silkworm/node/backend_kv_server.hpp index 562765bb83..3515f5524f 100644 --- a/silkworm/node/remote/ethbackend/grpc/server/backend_kv_server.hpp +++ b/silkworm/node/backend_kv_server.hpp @@ -21,18 +21,18 @@ #include #include -namespace silkworm::rpc { +namespace silkworm::node { -class BackEndKvServer : public BackEndServer, public KvServer { +class BackEndKvServer : public ethbackend::grpc::server::BackEndServer, public kv::grpc::server::KvServer { public: - BackEndKvServer(const ServerSettings& settings, const EthereumBackEnd& backend); + BackEndKvServer(const rpc::ServerSettings& settings, const EthereumBackEnd& backend); BackEndKvServer(const BackEndKvServer&) = delete; BackEndKvServer& operator=(const BackEndKvServer&) = delete; protected: - void register_async_services(grpc::ServerBuilder& builder) override; + void register_async_services(::grpc::ServerBuilder& builder) override; void register_request_calls() override; }; -} // namespace silkworm::rpc +} // namespace silkworm::node diff --git a/silkworm/node/node.cpp b/silkworm/node/node.cpp index 55ced6f603..050b30b7af 100644 --- a/silkworm/node/node.cpp +++ b/silkworm/node/node.cpp @@ -30,10 +30,11 @@ #include #include #include -#include #include #include +#include "backend_kv_server.hpp" + namespace silkworm::node { constexpr uint64_t kMaxFileDescriptors{10'240}; @@ -81,7 +82,7 @@ class NodeImpl final { execution::api::DirectClient execution_direct_client_; SentryClientPtr sentry_client_; std::unique_ptr backend_; - std::unique_ptr backend_kv_rpc_server_; + std::unique_ptr backend_kv_rpc_server_; ResourceUsageLog resource_usage_log_; std::unique_ptr bittorrent_client_; }; @@ -105,7 +106,7 @@ NodeImpl::NodeImpl(Settings& settings, SentryClientPtr sentry_client, mdbx::env resource_usage_log_{*settings_.data_directory} { backend_ = std::make_unique(settings_, &chaindata_db_, sentry_client_); backend_->set_node_name(settings_.build_info.node_name); - backend_kv_rpc_server_ = std::make_unique(settings_.server_settings, *backend_); + backend_kv_rpc_server_ = std::make_unique(settings_.server_settings, *backend_); bittorrent_client_ = std::make_unique(settings_.snapshot_settings.bittorrent_settings); } @@ -210,7 +211,7 @@ execution::api::DirectClient& Node::execution_direct_client() { } void Node::setup() { - return p_impl_->setup(); + p_impl_->setup(); } Task Node::run() { diff --git a/silkworm/node/remote/ethbackend/grpc/server/backend_calls.cpp b/silkworm/node/remote/ethbackend/grpc/server/backend_calls.cpp index a5c3b9b360..4e1829972d 100644 --- a/silkworm/node/remote/ethbackend/grpc/server/backend_calls.cpp +++ b/silkworm/node/remote/ethbackend/grpc/server/backend_calls.cpp @@ -24,14 +24,14 @@ #include #include -namespace silkworm::rpc { +namespace silkworm::ethbackend::grpc::server { remote::EtherbaseReply EtherbaseCall::response_; void EtherbaseCall::fill_predefined_reply(const EthereumBackEnd& backend) { const auto etherbase = backend.etherbase(); if (etherbase.has_value()) { - const auto h160 = H160_from_address(etherbase.value()).release(); + const auto h160 = rpc::H160_from_address(etherbase.value()).release(); EtherbaseCall::response_.set_allocated_address(h160); } } @@ -39,10 +39,10 @@ void EtherbaseCall::fill_predefined_reply(const EthereumBackEnd& backend) { Task EtherbaseCall::operator()(const EthereumBackEnd& /*backend*/) { SILK_TRACE << "EtherbaseCall START"; if (response_.has_address()) { - co_await agrpc::finish(responder_, response_, grpc::Status::OK); - SILK_TRACE << "EtherbaseCall END etherbase: " << address_from_H160(response_.address()); + co_await agrpc::finish(responder_, response_, ::grpc::Status::OK); + SILK_TRACE << "EtherbaseCall END etherbase: " << rpc::address_from_H160(response_.address()); } else { - const grpc::Status error{grpc::StatusCode::INTERNAL, "etherbase must be explicitly specified"}; + const ::grpc::Status error{::grpc::StatusCode::INTERNAL, "etherbase must be explicitly specified"}; co_await agrpc::finish_with_error(responder_, error); SILK_TRACE << "EtherbaseCall END error: " << error; } @@ -60,7 +60,7 @@ void NetVersionCall::fill_predefined_reply(const EthereumBackEnd& backend) { Task NetVersionCall::operator()(const EthereumBackEnd& /*backend*/) { SILK_TRACE << "NetVersionCall START"; - co_await agrpc::finish(responder_, response_, grpc::Status::OK); + co_await agrpc::finish(responder_, response_, ::grpc::Status::OK); SILK_TRACE << "NetVersionCall END chain_id: " << response_.id(); } @@ -71,18 +71,18 @@ Task NetPeerCountCall::operator()(const EthereumBackEnd& backend) { auto sentry = co_await sentry_client->service(); remote::NetPeerCountReply response; - grpc::Status result_status{grpc::Status::OK}; + ::grpc::Status result_status{::grpc::Status::OK}; try { auto peer_count = co_await sentry->peer_count(); response.set_count(peer_count); SILK_DEBUG << "Reply OK peer count = " << peer_count; - } catch (const GrpcStatusError& status_error) { + } catch (const rpc::GrpcStatusError& status_error) { result_status = status_error.status(); SILK_ERROR << "Reply KO result: " << result_status; } if (result_status.ok()) { - co_await agrpc::finish(responder_, response, grpc::Status::OK); + co_await agrpc::finish(responder_, response, ::grpc::Status::OK); SILK_TRACE << "NetPeerCountCall END count: " << response.count(); } else { co_await agrpc::finish_with_error(responder_, result_status); @@ -100,7 +100,7 @@ void BackEndVersionCall::fill_predefined_reply() { Task BackEndVersionCall::operator()(const EthereumBackEnd& /*backend*/) { SILK_TRACE << "BackEndVersionCall START"; - co_await agrpc::finish(responder_, response_, grpc::Status::OK); + co_await agrpc::finish(responder_, response_, ::grpc::Status::OK); SILK_TRACE << "BackEndVersionCall END version: " << response_.major() << "." << response_.minor() << "." << response_.patch(); } @@ -112,7 +112,7 @@ void ProtocolVersionCall::fill_predefined_reply() { Task ProtocolVersionCall::operator()(const EthereumBackEnd& /*backend*/) { SILK_TRACE << "ProtocolVersionCall START"; - co_await agrpc::finish(responder_, response_, grpc::Status::OK); + co_await agrpc::finish(responder_, response_, ::grpc::Status::OK); SILK_TRACE << "ProtocolVersionCall END id: " << response_.id(); } @@ -124,7 +124,7 @@ void ClientVersionCall::fill_predefined_reply(const EthereumBackEnd& backend) { Task ClientVersionCall::operator()(const EthereumBackEnd& /*backend*/) { SILK_TRACE << "ClientVersionCall START"; - co_await agrpc::finish(responder_, response_, grpc::Status::OK); + co_await agrpc::finish(responder_, response_, ::grpc::Status::OK); SILK_TRACE << "ClientVersionCall END node name: " << response_.node_name(); } @@ -139,7 +139,7 @@ Task SubscribeCall::operator()(const EthereumBackEnd& /*backend*/) { remote::SubscribeReply response2; response2.set_type(remote::Event::PENDING_LOGS); response2.set_data("334455"); - co_await agrpc::write_and_finish(responder_, response2, grpc::WriteOptions{}, grpc::Status::OK); + co_await agrpc::write_and_finish(responder_, response2, ::grpc::WriteOptions{}, ::grpc::Status::OK); SILK_TRACE << "SubscribeCall END"; } @@ -151,20 +151,20 @@ Task NodeInfoCall::operator()(const EthereumBackEnd& backend) { auto sentry = co_await sentry_client->service(); remote::NodesInfoReply response; - grpc::Status result_status{grpc::Status::OK}; + ::grpc::Status result_status{::grpc::Status::OK}; try { auto node_infos = co_await sentry->node_infos(); for (auto& node_info : node_infos) { SILK_DEBUG << "Reply OK node info: client_id=" << node_info.client_id; response.add_nodes_info()->CopyFrom(sentry::grpc::interfaces::proto_node_info_from_node_info(node_info)); } - } catch (const GrpcStatusError& status_error) { + } catch (const rpc::GrpcStatusError& status_error) { result_status = status_error.status(); SILK_ERROR << "Reply KO result: " << result_status; } if (result_status.ok()) { - co_await agrpc::finish(responder_, response, grpc::Status::OK); + co_await agrpc::finish(responder_, response, ::grpc::Status::OK); SILK_TRACE << "NodeInfoCall END #nodes: " << response.nodes_info_size(); } else { co_await agrpc::finish_with_error(responder_, result_status); @@ -172,4 +172,4 @@ Task NodeInfoCall::operator()(const EthereumBackEnd& backend) { } } -} // namespace silkworm::rpc +} // namespace silkworm::ethbackend::grpc::server diff --git a/silkworm/node/remote/ethbackend/grpc/server/backend_calls.hpp b/silkworm/node/remote/ethbackend/grpc/server/backend_calls.hpp index b5286ba9af..014b9bb37b 100644 --- a/silkworm/node/remote/ethbackend/grpc/server/backend_calls.hpp +++ b/silkworm/node/remote/ethbackend/grpc/server/backend_calls.hpp @@ -37,7 +37,7 @@ // ETHBACKEND API protocol versions // 2.2.0 - first issue -namespace silkworm::rpc { +namespace silkworm::ethbackend::grpc::server { //! Current devp2p 'eth' protocol version in use. constexpr uint64_t kEthDevp2pProtocolVersion = 66; @@ -46,7 +46,7 @@ constexpr uint64_t kEthDevp2pProtocolVersion = 66; constexpr auto kEthBackEndApiVersion = std::make_tuple(2, 3, 0); //! Unary RPC for Etherbase method of 'ethbackend' gRPC protocol. -class EtherbaseCall : public server::UnaryCall { +class EtherbaseCall : public rpc::server::UnaryCall { public: using Base::UnaryCall; @@ -59,7 +59,7 @@ class EtherbaseCall : public server::UnaryCall { +class NetVersionCall : public rpc::server::UnaryCall { public: using Base::UnaryCall; @@ -72,7 +72,7 @@ class NetVersionCall : public server::UnaryCall { +class NetPeerCountCall : public rpc::server::UnaryCall { public: using Base::UnaryCall; @@ -80,7 +80,7 @@ class NetPeerCountCall : public server::UnaryCall { +class BackEndVersionCall : public rpc::server::UnaryCall { public: using Base::UnaryCall; @@ -93,7 +93,7 @@ class BackEndVersionCall : public server::UnaryCall { +class ProtocolVersionCall : public rpc::server::UnaryCall { public: using Base::UnaryCall; @@ -106,7 +106,7 @@ class ProtocolVersionCall : public server::UnaryCall { +class ClientVersionCall : public rpc::server::UnaryCall { public: using Base::UnaryCall; @@ -119,7 +119,7 @@ class ClientVersionCall : public server::UnaryCall { +class SubscribeCall : public rpc::server::ServerStreamingCall { public: using Base::ServerStreamingCall; @@ -127,11 +127,11 @@ class SubscribeCall : public server::ServerStreamingCall { +class NodeInfoCall : public rpc::server::UnaryCall { public: using Base::UnaryCall; Task operator()(const EthereumBackEnd& backend); }; -} // namespace silkworm::rpc +} // namespace silkworm::ethbackend::grpc::server diff --git a/silkworm/node/remote/ethbackend/grpc/server/backend_server.cpp b/silkworm/node/remote/ethbackend/grpc/server/backend_server.cpp index 9a20986ba4..4aee260082 100644 --- a/silkworm/node/remote/ethbackend/grpc/server/backend_server.cpp +++ b/silkworm/node/remote/ethbackend/grpc/server/backend_server.cpp @@ -21,16 +21,18 @@ #include #include -namespace silkworm::rpc { +namespace silkworm::ethbackend::grpc::server { -BackEndServer::BackEndServer(const ServerSettings& settings, const EthereumBackEnd& backend) +using rpc::request_repeatedly; + +BackEndServer::BackEndServer(const rpc::ServerSettings& settings, const EthereumBackEnd& backend) : Server(settings), backend_(backend) { setup_backend_calls(backend); SILK_INFO << "BackEndServer created listening on: " << settings.address_uri; } // Register the gRPC services: they must exist for the lifetime of the server built by builder. -void BackEndServer::register_async_services(grpc::ServerBuilder& builder) { +void BackEndServer::register_async_services(::grpc::ServerBuilder& builder) { builder.RegisterService(&backend_async_service_); } @@ -95,4 +97,4 @@ void BackEndServer::register_request_calls() { } } -} // namespace silkworm::rpc +} // namespace silkworm::ethbackend::grpc::server diff --git a/silkworm/node/remote/ethbackend/grpc/server/backend_server.hpp b/silkworm/node/remote/ethbackend/grpc/server/backend_server.hpp index 48c55bffef..1d21e0cd5b 100644 --- a/silkworm/node/remote/ethbackend/grpc/server/backend_server.hpp +++ b/silkworm/node/remote/ethbackend/grpc/server/backend_server.hpp @@ -24,17 +24,17 @@ #include #include -namespace silkworm::rpc { +namespace silkworm::ethbackend::grpc::server { -class BackEndServer : public virtual Server { +class BackEndServer : public virtual rpc::Server { public: - BackEndServer(const ServerSettings& settings, const EthereumBackEnd& backend); + BackEndServer(const rpc::ServerSettings& settings, const EthereumBackEnd& backend); BackEndServer(const BackEndServer&) = delete; BackEndServer& operator=(const BackEndServer&) = delete; protected: - void register_async_services(grpc::ServerBuilder& builder) override; + void register_async_services(::grpc::ServerBuilder& builder) override; void register_request_calls() override; private: @@ -48,4 +48,4 @@ class BackEndServer : public virtual Server { remote::ETHBACKEND::AsyncService backend_async_service_; }; -} // namespace silkworm::rpc +} // namespace silkworm::ethbackend::grpc::server diff --git a/silkworm/node/remote/ethbackend/grpc/server/backend_server_test.cpp b/silkworm/node/remote/ethbackend/grpc/server/backend_server_test.cpp index 3d184f5504..b2d4bcd88a 100644 --- a/silkworm/node/remote/ethbackend/grpc/server/backend_server_test.cpp +++ b/silkworm/node/remote/ethbackend/grpc/server/backend_server_test.cpp @@ -219,6 +219,8 @@ class TestableEthereumBackEnd : public EthereumBackEnd { } }; +using BackEndServer = ethbackend::grpc::server::BackEndServer; + struct BackEndE2ETest { explicit BackEndE2ETest( silkworm::log::Level log_verbosity = silkworm::log::Level::kNone, @@ -246,7 +248,7 @@ struct BackEndE2ETest { rw_txn.commit(); backend = std::make_unique(options, &database_env); - server = std::make_unique(srv_config, *backend); + server = std::make_unique(srv_config, *backend); server->build_and_start(); } @@ -287,19 +289,19 @@ struct BackEndE2ETest { std::unique_ptr db_config; mdbx::env_managed database_env; std::unique_ptr backend; - std::unique_ptr server; + std::unique_ptr server; }; } // namespace -namespace silkworm::rpc { +namespace silkworm::ethbackend::grpc::server { // Exclude gRPC tests from sanitizer builds due to data race warnings inside gRPC library #ifndef SILKWORM_SANITIZE TEST_CASE("BackEndServer", "[silkworm][node][rpc]") { test_util::SetLogVerbosityGuard guard{log::Level::kNone}; - Grpc2SilkwormLogGuard log_guard; - ServerSettings srv_config; + rpc::Grpc2SilkwormLogGuard log_guard; + rpc::ServerSettings srv_config; srv_config.address_uri = kTestAddressUri; TemporaryDirectory tmp_dir; DataDirectory data_dir{tmp_dir.path()}; @@ -394,7 +396,7 @@ TEST_CASE("BackEndServer E2E: empty node settings", "[silkworm][node][rpc]") { remote::EtherbaseReply response; const auto status = backend_client.etherbase(&response); CHECK(!status.ok()); - CHECK(status.error_code() == grpc::StatusCode::INTERNAL); + CHECK(status.error_code() == ::grpc::StatusCode::INTERNAL); CHECK(status.error_message() == "etherbase must be explicitly specified"); CHECK(!response.has_address()); } @@ -486,4 +488,4 @@ TEST_CASE("BackEndServer E2E: one Sentry status OK", "[silkworm][node][rpc]") { } #endif // SILKWORM_SANITIZE -} // namespace silkworm::rpc +} // namespace silkworm::ethbackend::grpc::server