From 57aa51e785b3d70354f8c1f83c4768165b18eadd Mon Sep 17 00:00:00 2001 From: Valentin Palade Date: Fri, 26 Jan 2024 16:24:17 +0200 Subject: [PATCH 01/13] relay_multicast:mprpc: NO TEST: introducing RecipientUri --- .../frame/mprpc_echo/example_mprpc_echo.cpp | 2 +- solid/frame/mprpc/mprpcconfiguration.hpp | 55 +---- solid/frame/mprpc/mprpcmessage.hpp | 56 ++--- solid/frame/mprpc/mprpcrelayengine.hpp | 29 +-- solid/frame/mprpc/mprpcrelayengines.hpp | 7 +- solid/frame/mprpc/mprpcservice.hpp | 213 ++++++++++++------ solid/frame/mprpc/src/mprpcmessagereader.cpp | 2 +- solid/frame/mprpc/src/mprpcmessagewriter.cpp | 14 +- solid/frame/mprpc/src/mprpcmessagewriter.hpp | 4 +- solid/frame/mprpc/src/mprpcrelayengine.cpp | 15 +- solid/frame/mprpc/src/mprpcrelayengines.cpp | 2 +- solid/frame/mprpc/src/mprpcservice.cpp | 166 +++++++------- solid/frame/mprpc/src/mprpcutility.hpp | 30 ++- .../alpha/client/alphaclient.cpp | 6 +- .../beta/client/betaclient.cpp | 6 +- .../gamma/client/gammaclient.cpp | 6 +- .../test/test_clientfrontback_download.cpp | 4 +- .../test/test_clientfrontback_upload.cpp | 4 +- .../mprpc/test/test_clientserver_basic.cpp | 4 +- .../test/test_clientserver_cancel_client.cpp | 2 +- .../test/test_clientserver_cancel_server.cpp | 4 +- .../mprpc/test/test_clientserver_delayed.cpp | 6 +- .../mprpc/test/test_clientserver_download.cpp | 2 +- .../test/test_clientserver_idempotent.cpp | 12 +- .../mprpc/test/test_clientserver_noserver.cpp | 2 +- .../mprpc/test/test_clientserver_oneshot.cpp | 2 +- .../test/test_clientserver_sendrequest.cpp | 4 +- .../mprpc/test/test_clientserver_split.cpp | 4 +- .../mprpc/test/test_clientserver_topic.cpp | 2 +- .../mprpc/test/test_clientserver_upload.cpp | 2 +- .../test/test_clientserver_upload_single.cpp | 2 +- .../test/test_clientserver_versioning.cpp | 8 +- .../mprpc/test/test_connection_close.cpp | 2 +- .../frame/mprpc/test/test_keepalive_fail.cpp | 4 +- .../mprpc/test/test_keepalive_success.cpp | 4 +- solid/frame/mprpc/test/test_pool_basic.cpp | 2 +- .../mprpc/test/test_pool_delay_close.cpp | 2 +- .../mprpc/test/test_pool_force_close.cpp | 2 +- solid/frame/mprpc/test/test_raw_basic.cpp | 2 +- solid/frame/mprpc/test/test_raw_proxy.cpp | 2 +- solid/frame/mprpc/test/test_relay_basic.cpp | 6 +- .../mprpc/test/test_relay_cancel_request.cpp | 6 +- .../mprpc/test/test_relay_cancel_response.cpp | 6 +- .../mprpc/test/test_relay_close_request.cpp | 4 +- .../mprpc/test/test_relay_close_response.cpp | 4 +- .../mprpc/test/test_relay_detect_close.cpp | 8 +- ...test_relay_detect_close_while_response.cpp | 6 +- .../frame/mprpc/test/test_relay_disabled.cpp | 4 +- solid/frame/mprpc/test/test_relay_split.cpp | 6 +- solid/utility/string.hpp | 26 --- tutorials/mprpc_echo/mprpc_echo_client.cpp | 2 +- .../mprpc_echo/mprpc_echo_client_pool.cpp | 2 +- .../mprpc_echo_relay_client.cpp | 4 +- tutorials/mprpc_file/mprpc_file_client.cpp | 4 +- .../mprpc_request/mprpc_request_client.cpp | 2 +- .../mprpc_request_client.cpp | 2 +- 56 files changed, 397 insertions(+), 392 deletions(-) diff --git a/examples/frame/mprpc_echo/example_mprpc_echo.cpp b/examples/frame/mprpc_echo/example_mprpc_echo.cpp index 1ec1328f..e98c33b1 100644 --- a/examples/frame/mprpc_echo/example_mprpc_echo.cpp +++ b/examples/frame/mprpc_echo/example_mprpc_echo.cpp @@ -347,7 +347,7 @@ void broadcast_message(frame::mprpc::Service& _rsvc, frame::mprpc::MessagePointe solid_log(generic_logger, Verbose, "done stop==============================="); for (Params::StringVectorT::const_iterator it(params.connectstringvec.begin()); it != params.connectstringvec.end(); ++it) { - _rsvc.sendMessage(it->c_str(), _rmsgptr, {frame::mprpc::MessageFlagsE::AwaitResponse}); + _rsvc.sendMessage({*it}, _rmsgptr, {frame::mprpc::MessageFlagsE::AwaitResponse}); } } diff --git a/solid/frame/mprpc/mprpcconfiguration.hpp b/solid/frame/mprpc/mprpcconfiguration.hpp index 7373b480..ba659545 100644 --- a/solid/frame/mprpc/mprpcconfiguration.hpp +++ b/solid/frame/mprpc/mprpcconfiguration.hpp @@ -51,59 +51,6 @@ class Configuration; typedef void (*OnSecureConnectF)(frame::aio::ReactorContext&); typedef void (*OnSecureAcceptF)(frame::aio::ReactorContext&); -#if false -struct BufferBase { - virtual ~BufferBase(); - - char* data() const { return data_; } - size_t capacity() const { return capacity_; } - -protected: - BufferBase(char* _data = nullptr, size_t _cap = 0) - : data_(_data) - , capacity_(_cap) - { - } - void reset(char* _data = nullptr, size_t _cap = 0) - { - data_ = _data; - capacity_ = _cap; - } - -protected: - char* data_; - size_t capacity_; -}; - -template -struct Buffer; - -template <> -struct Buffer<0> : BufferBase { - Buffer(const size_t _cp) - : BufferBase(new char[_cp], _cp) - { - } - ~Buffer() - { - delete[] data_; - } -}; - -template -struct Buffer : BufferBase { - char d_[Cp]; - Buffer() - { - reset(d_, Cp); - } -}; - -using SendBufferPointerT = std::unique_ptr; -using RecvBufferPointerT = std::shared_ptr; - -RecvBufferPointerT make_recv_buffer(const size_t _cp); -#endif enum struct RelayDataFlagsE : uint8_t { First, @@ -124,7 +71,7 @@ struct RelayData { MessageHeader::FlagsT message_flags_ = 0; MessageHeader* pmessage_header_ = nullptr; - RelayData() {} + RelayData() = default; RelayData( RelayData&& _rrelmsg) noexcept diff --git a/solid/frame/mprpc/mprpcmessage.hpp b/solid/frame/mprpc/mprpcmessage.hpp index c5492f8d..2f3b2fa2 100644 --- a/solid/frame/mprpc/mprpcmessage.hpp +++ b/solid/frame/mprpc/mprpcmessage.hpp @@ -11,6 +11,7 @@ #pragma once #include +#include #include #include "solid/system/common.hpp" @@ -36,47 +37,48 @@ class Connection; class ConnectionContext; struct MessageRelayHeader { - std::string uri_; + using GroupIdT = uint32_t; + using ReplicaIdT = uint16_t; + GroupIdT group_id_ = 0; + ReplicaIdT replica_id_ = 0; MessageRelayHeader() = default; - MessageRelayHeader(const std::string& _uri) - : uri_(_uri) + MessageRelayHeader(const GroupIdT _group_id, const ReplicaIdT _replica_id = 0) + : group_id_(_group_id) + , replica_id_(_replica_id) { } - MessageRelayHeader(std::string&& _uri) - : uri_(std::move(_uri)) + + void clear() { + group_id_ = 0; + replica_id_ = 0; } SOLID_REFLECT_V1(_rs, _rthis, _rctx) { if constexpr (std::decay_t::is_const_reflector) { - _rs.add(_rctx.pmessage_relay_header_->uri_, _rctx, 1, "uri", [](auto& _rmeta) { _rmeta.maxSize(20); }); + _rs.add(_rctx.pmessage_relay_header_->group_id_, _rctx, 1, "group_id"); + _rs.add(_rctx.pmessage_relay_header_->replica_id_, _rctx, 2, "replica_id"); } else { - _rs.add(_rthis.uri_, _rctx, 1, "uri", [](auto& _rmeta) { _rmeta.maxSize(20); }); + _rs.add(_rthis.group_id_, _rctx, 1, "group_id"); + _rs.add(_rthis.replica_id_, _rctx, 2, "replica_id"); } } - - bool empty() const noexcept - { - return uri_.empty(); - } - - void clear() - { - uri_.clear(); - } }; +using OptionalMessageRelayHeaderT = std::optional; + std::ostream& operator<<(std::ostream& _ros, const MessageRelayHeader& _header); +std::ostream& operator<<(std::ostream& _ros, const OptionalMessageRelayHeaderT& _header); struct MessageHeader { using FlagsT = MessageFlagsValueT; - FlagsT flags_{0}; - RequestId sender_request_id_; - RequestId recipient_request_id_; - MessageRelayHeader relay_; + FlagsT flags_{0}; + RequestId sender_request_id_; + RequestId recipient_request_id_; + OptionalMessageRelayHeaderT relay_; static MessageFlagsT fetch_state_flags(const MessageFlagsT& _flags) { @@ -117,7 +119,7 @@ struct MessageHeader { flags_ = 0; sender_request_id_.clear(); recipient_request_id_.clear(); - relay_.clear(); + relay_.reset(); } SOLID_REFLECT_V1(_rr, _rthis, _rctx) @@ -130,7 +132,8 @@ struct MessageHeader { _rr.add(_rthis.sender_request_id_.index, _rctx, 4, "recipient_request_index"); _rr.add(_rthis.sender_request_id_.unique, _rctx, 5, "recipient_request_unique"); if (_rctx.message_flags.has(MessageFlagsE::Relayed)) { - _rr.add(_rthis.relay_, _rctx, 6, "relay"); + solid_assert(_rthis.relay_.has_value()); + _rr.add(_rthis.relay_.value(), _rctx, 6, "relay"); } } else { _rr.add(_rthis.flags_, _rctx, 1, "flags"); @@ -142,7 +145,8 @@ struct MessageHeader { [&_rthis](auto& _rr, auto& _rctx) { const MessageFlagsT flags(_rthis.flags_); if (flags.has(MessageFlagsE::Relayed)) { - _rr.add(_rthis.relay_, _rctx, 6, "relay"); + solid_assert(_rthis.relay_.has_value()); + _rr.add(_rthis.relay_.value(), _rctx, 6, "relay"); } }, _rctx); @@ -337,9 +341,9 @@ struct Message : IntrusiveCacheable { return is_response_last(flags()); } - const std::string& uri() const + const auto& relay() const { - return header_.relay_.uri_; + return header_.relay_.value(); } void clearStateFlags() diff --git a/solid/frame/mprpc/mprpcrelayengine.hpp b/solid/frame/mprpc/mprpcrelayengine.hpp index 04515dba..1dda3c3e 100644 --- a/solid/frame/mprpc/mprpcrelayengine.hpp +++ b/solid/frame/mprpc/mprpcrelayengine.hpp @@ -20,15 +20,15 @@ namespace mprpc { namespace relay { struct ConnectionStubBase { - ConnectionStubBase() - : next_(InvalidIndex()) - , prev_(InvalidIndex()) - { - } + ActorIdT id_; + std::string name_; + size_t next_ = InvalidIndex(); + size_t prev_ = InvalidIndex(); + + ConnectionStubBase() = default; + ConnectionStubBase(std::string&& _uname) : name_(std::move(_uname)) - , next_(InvalidIndex()) - , prev_(InvalidIndex()) { } @@ -39,11 +39,6 @@ struct ConnectionStubBase { next_ = InvalidIndex(); prev_ = InvalidIndex(); } - - ActorIdT id_; - std::string name_; - size_t next_; - size_t prev_; }; class EngineCore; @@ -61,6 +56,9 @@ struct ConnectionPrintStub { std::ostream& operator<<(std::ostream& _ros, const ConnectionPrintStub& _rps); class EngineCore : public RelayEngine { + struct Data; + Pimpl impl_; + public: struct Proxy { size_t createConnection(); @@ -93,7 +91,6 @@ class EngineCore : public RelayEngine { template void execute(F& _rf) { - ExecuteFunctionT f(std::ref(_rf)); doExecute(f); } @@ -152,14 +149,10 @@ class EngineCore : public RelayEngine { void doPollNew(const UniqueId& _rrelay_con_uid, PushFunctionT& _try_push_fnc, bool& _rmore) final; void doPollDone(const UniqueId& _rrelay_con_uid, DoneFunctionT& _done_fnc, CancelFunctionT& _cancel_fnc) final; - size_t doRegisterNamedConnection(std::string&& _uname); + size_t doRegisterNamedConnection(MessageRelayHeader&& _relay); size_t doRegisterUnnamedConnection(const ActorIdT& _rcon_uid, UniqueId& _rrelay_con_uid); void doRegisterConnectionId(const ConnectionContext& _rconctx, const size_t _idx); - -private: - struct Data; - Pimpl impl_; }; } // namespace relay diff --git a/solid/frame/mprpc/mprpcrelayengines.hpp b/solid/frame/mprpc/mprpcrelayengines.hpp index 69e8841c..575afc37 100644 --- a/solid/frame/mprpc/mprpcrelayengines.hpp +++ b/solid/frame/mprpc/mprpcrelayengines.hpp @@ -20,6 +20,9 @@ namespace mprpc { namespace relay { class SingleNameEngine : public EngineCore { + struct Data; + Pimpl impl_; + public: SingleNameEngine(Manager& _rm); ~SingleNameEngine(); @@ -29,10 +32,6 @@ class SingleNameEngine : public EngineCore { void unregisterConnectionName(Proxy& _proxy, size_t _conidx) override; size_t registerConnection(Proxy& _proxy, std::string&& _uname) override; std::ostream& print(std::ostream& _ros, const ConnectionStubBase& _rcon) const override; - -private: - struct Data; - Pimpl impl_; }; } // namespace relay diff --git a/solid/frame/mprpc/mprpcservice.hpp b/solid/frame/mprpc/mprpcservice.hpp index 91c6c10c..2d3194aa 100644 --- a/solid/frame/mprpc/mprpcservice.hpp +++ b/solid/frame/mprpc/mprpcservice.hpp @@ -9,6 +9,7 @@ // #pragma once +#include #include "solid/system/exception.hpp" #include "solid/system/statistic.hpp" @@ -167,6 +168,82 @@ struct ServiceStatistic : solid::Statistic { std::ostream& print(std::ostream& _ros) const override; }; +class RecipientUrl { + using ImplOptionalRelayT = std::optional; + + const std::string_view* purl_ = nullptr; + ImplOptionalRelayT relay_; + +public: + using RelayT = MessageRelayHeader; + using OptionalRelayT = ImplOptionalRelayT; + + RecipientUrl( + const std::string_view& _url) + : purl_(&_url) + { + } + + RecipientUrl( + const std::string_view& _url, const RelayT& _relay) + : purl_(&_url) + , relay_(_relay) + { + } + +protected: + RecipientUrl( + const RelayT& _relay) + : relay_(_relay) + { + } + RecipientUrl() = default; +}; + +class RecipientUri : public RecipientUrl { + const RecipientId* pid_ = nullptr; + ConnectionContext* pctx_ = nullptr; + +public: + RecipientUri( + RecipientId const& _id) + : pid_(&_id) + { + } + + RecipientUri( + const std::string_view _url) + : RecipientUrl(_url) + { + } + + RecipientUri( + ConnectionContext& _rctx) + : pctx_(&_rctx) + { + } + + RecipientUri( + RecipientId const& _id, const RelayT& _relay) + : RecipientUrl(_relay) + , pid_(&_id) + { + } + + RecipientUri( + const std::string_view& _url, const RelayT& _relay) + : RecipientUrl(_url, _relay) + { + } + + RecipientUri( + ConnectionContext& _rctx, const RelayT& _relay) + : RecipientUrl(_relay) + , pctx_(&_rctx) + { + } +}; + //! Message Passing Remote Procedure Call Service /*! Allows exchanging ipc::Messages between processes. @@ -216,48 +293,48 @@ class Service : public frame::Service { const ServiceStatistic& statistic() const; - ErrorConditionT createConnectionPool(const std::string_view& _recipient_url, const size_t _persistent_connection_count = 1); + ErrorConditionT createConnectionPool(const RecipientUrl& _recipient_url, const size_t _persistent_connection_count = 1); template ErrorConditionT createConnectionPool( - const std::string_view& _recipient_url, - RecipientId& _rrecipient_id, - const F _event_fnc, - const size_t _persistent_connection_count = 1); + const RecipientUrl& _recipient_uri, + RecipientId& _rrecipient_id, + const F _event_fnc, + const size_t _persistent_connection_count = 1); // send message using recipient name -------------------------------------- template ErrorConditionT sendMessage( - const std::string_view& _recipient_url, + const RecipientUri& _recipient_uri, MessagePointerT const& _rmsgptr, const MessageFlagsT& _flags = 0); template ErrorConditionT sendMessage( - const std::string_view& _recipient_url, + const RecipientUri& _recipient_uri, MessagePointerT const& _rmsgptr, RecipientId& _rrecipient_id, const MessageFlagsT& _flags = 0); template ErrorConditionT sendMessage( - const std::string_view& _recipient_url, + const RecipientUri& _recipient_uri, MessagePointerT const& _rmsgptr, RecipientId& _rrecipient_id, MessageId& _rmsg_id, const MessageFlagsT& _flags = 0); // send message using connection uid ------------------------------------- - +#if 0 template ErrorConditionT sendMessage( RecipientId const& _rrecipient_id, MessagePointerT const& _rmsgptr, const MessageFlagsT& _flags = 0); - +#endif template ErrorConditionT sendMessage( - RecipientId const& _rrecipient_id, + const RecipientUri& _recipient_uri, MessagePointerT const& _rmsgptr, MessageId& _rmsg_id, const MessageFlagsT& _flags = 0); @@ -266,14 +343,14 @@ class Service : public frame::Service { template ErrorConditionT sendRequest( - const std::string_view& _recipient_url, + const RecipientUri& _recipient_uri, MessagePointerT const& _rmsgptr, Fnc _complete_fnc, const MessageFlagsT& _flags = 0); template ErrorConditionT sendRequest( - const std::string_view& _recipient_url, + const RecipientUri& _recipient_uri, MessagePointerT const& _rmsgptr, Fnc _complete_fnc, RecipientId& _rrecipient_id, @@ -281,7 +358,7 @@ class Service : public frame::Service { template ErrorConditionT sendRequest( - const std::string_view& _recipient_url, + const RecipientUri& _recipient_uri, MessagePointerT const& _rmsgptr, Fnc _complete_fnc, RecipientId& _rrecipient_id, @@ -289,7 +366,6 @@ class Service : public frame::Service { const MessageFlagsT& _flags = 0); // send message using connection uid ------------------------------------- - template ErrorConditionT sendResponse( RecipientId const& _rrecipient_id, @@ -302,9 +378,8 @@ class Service : public frame::Service { MessagePointerT const& _rmsgptr, MessageId& _rmsg_id, const MessageFlagsT& _flags = 0); - // send request using connection uid -------------------------------------- - +#if 0 template ErrorConditionT sendRequest( RecipientId const& _rrecipient_id, @@ -319,18 +394,19 @@ class Service : public frame::Service { Fnc _complete_fnc, MessageId& _rmsguid, const MessageFlagsT& _flags = 0); +#endif // send message with complete using recipient name ------------------------ template ErrorConditionT sendMessage( - const std::string_view& _recipient_url, + const RecipientUri& _recipient_uri, MessagePointerT const& _rmsgptr, Fnc _complete_fnc, const MessageFlagsT& _flags); template ErrorConditionT sendMessage( - const std::string_view& _recipient_url, + const RecipientUri& _recipient_uri, MessagePointerT const& _rmsgptr, Fnc _complete_fnc, RecipientId& _rrecipient_id, @@ -338,7 +414,7 @@ class Service : public frame::Service { template ErrorConditionT sendMessage( - const std::string_view& _recipient_url, + const RecipientUri& _recipient_uri, MessagePointerT const& _rmsgptr, Fnc _complete_fnc, RecipientId& _rrecipient_id, @@ -346,6 +422,7 @@ class Service : public frame::Service { const MessageFlagsT& _flags); // send message with complete using connection uid ------------------------ +#if 0 template ErrorConditionT sendMessage( RecipientId const& _rrecipient_id, @@ -360,7 +437,7 @@ class Service : public frame::Service { Fnc _complete_fnc, MessageId& _rmsguid, const MessageFlagsT& _flags); - +#endif // send message using ConnectionContext ---------------------------------- template @@ -368,7 +445,7 @@ class Service : public frame::Service { ConnectionContext& _rctx, MessagePointerT const& _rmsgptr, const MessageFlagsT& _flags = 0); - +#if 0 template ErrorConditionT sendMessage( ConnectionContext& _rctx, @@ -380,8 +457,9 @@ class Service : public frame::Service { ConnectionContext& _rctx, MessagePointerT const& _rmsgptr, Fnc _complete_fnc, - const MessageFlagsT& _flags); + const MessageFlagsT& _flags = 0); +#endif //------------------------------------------------------------------------- ErrorConditionT sendRelay(const ActorIdT& _rconid, RelayData&& _urelmsg); ErrorConditionT sendRelayCancel(RelayData&& _urelmsg); @@ -570,14 +648,13 @@ class Service : public frame::Service { void forwardResolveMessage(ConnectionPoolId const& _rconpoolid, EventBase& _revent); ErrorConditionT doSendMessage( - const char* _recipient_url, - const RecipientId& _rrecipient_id_in, + const RecipientUri& _recipient_uri, MessagePointerT<>& _rmsgptr, MessageCompleteFunctionT& _rcomplete_fnc, RecipientId* _precipient_id_out, MessageId* _pmsg_id_out, const MessageFlagsT& _flags); - +#if 0 ErrorConditionT doSendMessage( ConnectionContext& _rctx, MessagePointerT<>& _rmsgptr, @@ -585,12 +662,12 @@ class Service : public frame::Service { RecipientId* _precipient_id_out, MessageId* _pmsg_id_out, MessageFlagsT _flags); - +#endif ErrorConditionT doCreateConnectionPool( - const std::string_view& _recipient_url, - RecipientId& _rrecipient_id_out, - PoolOnEventFunctionT& _event_fnc, - const size_t _persistent_connection_count); + const RecipientUrl& _recipient_url, + RecipientId& _rrecipient_id_out, + PoolOnEventFunctionT& _event_fnc, + const size_t _persistent_connection_count); ErrorConditionT doForceCloseConnectionPool( RecipientId const& _rrecipient_id, @@ -609,43 +686,41 @@ using ServiceT = frame::ServiceShell; //------------------------------------------------------------------------- template ErrorConditionT Service::sendMessage( - const std::string_view& _recipient_url, + const RecipientUri& _recipient_uri, MessagePointerT const& _rmsgptr, const MessageFlagsT& _flags) { auto msgptr(solid::static_pointer_cast(_rmsgptr)); - RecipientId recipient_id; MessageCompleteFunctionT complete_handler; - return doSendMessage(_recipient_url.data(), recipient_id, msgptr, complete_handler, nullptr, nullptr, _flags); + return doSendMessage(_recipient_uri, msgptr, complete_handler, nullptr, nullptr, _flags); } //------------------------------------------------------------------------- template ErrorConditionT Service::sendMessage( - const std::string_view& _recipient_url, + const RecipientUri& _recipient_uri, MessagePointerT const& _rmsgptr, RecipientId& _rrecipient_id, const MessageFlagsT& _flags) { auto msgptr(solid::static_pointer_cast(_rmsgptr)); - RecipientId recipient_id; MessageCompleteFunctionT complete_handler; - return doSendMessage(_recipient_url.data(), recipient_id, msgptr, complete_handler, &_rrecipient_id, nullptr, _flags); + return doSendMessage(_recipient_uri, msgptr, complete_handler, &_rrecipient_id, nullptr, _flags); } //------------------------------------------------------------------------- template ErrorConditionT Service::sendMessage( - const std::string_view& _recipient_url, + const RecipientUri& _recipient_uri, MessagePointerT const& _rmsgptr, RecipientId& _rrecipient_id, MessageId& _rmsg_id, const MessageFlagsT& _flags) { auto msgptr(solid::static_pointer_cast(_rmsgptr)); - RecipientId recipient_id; MessageCompleteFunctionT complete_handler; - return doSendMessage(_recipient_url.data(), recipient_id, msgptr, complete_handler, &_rrecipient_id, &_rmsg_id, _flags); + return doSendMessage(_recipient_uri, msgptr, complete_handler, &_rrecipient_id, &_rmsg_id, _flags); } // send message using connection uid ------------------------------------- +#if 0 template ErrorConditionT Service::sendMessage( RecipientId const& _rrecipient_id, @@ -656,23 +731,24 @@ ErrorConditionT Service::sendMessage( MessageCompleteFunctionT complete_handler; return doSendMessage(nullptr, _rrecipient_id, msgptr, complete_handler, nullptr, nullptr, _flags); } +#endif //------------------------------------------------------------------------- template ErrorConditionT Service::sendMessage( - RecipientId const& _rrecipient_id, + const RecipientUri& _recipient_uri, MessagePointerT const& _rmsgptr, MessageId& _rmsg_id, const MessageFlagsT& _flags) { auto msgptr(solid::static_pointer_cast(_rmsgptr)); MessageCompleteFunctionT complete_handler; - return doSendMessage(nullptr, _rrecipient_id, msgptr, complete_handler, nullptr, &_rmsg_id, _flags); + return doSendMessage(_recipient_uri, msgptr, complete_handler, nullptr, &_rmsg_id, _flags); } //------------------------------------------------------------------------- // send request using recipient name -------------------------------------- template ErrorConditionT Service::sendRequest( - const std::string_view& _recipient_url, + const RecipientUri& _recipient_uri, MessagePointerT const& _rmsgptr, Fnc _complete_fnc, const MessageFlagsT& _flags) @@ -682,16 +758,15 @@ ErrorConditionT Service::sendRequest( typename message_complete_traits::recv_type>; auto msgptr(solid::static_pointer_cast(_rmsgptr)); - RecipientId recipient_id; CompleteHandlerT fnc(std::forward(_complete_fnc)); MessageCompleteFunctionT complete_handler(std::move(fnc)); - return doSendMessage(_recipient_url.data(), recipient_id, msgptr, complete_handler, nullptr, nullptr, _flags | MessageFlagsE::AwaitResponse); + return doSendMessage(_recipient_uri, msgptr, complete_handler, nullptr, nullptr, _flags | MessageFlagsE::AwaitResponse); } //------------------------------------------------------------------------- template ErrorConditionT Service::sendRequest( - const std::string_view& _recipient_url, + const RecipientUri& _recipient_uri, MessagePointerT const& _rmsgptr, Fnc _complete_fnc, RecipientId& _rrecipient_id, @@ -702,16 +777,15 @@ ErrorConditionT Service::sendRequest( typename message_complete_traits::recv_type>; auto msgptr(solid::static_pointer_cast(_rmsgptr)); - RecipientId recipient_id; CompleteHandlerT fnc(std::forward(_complete_fnc)); MessageCompleteFunctionT complete_handler(std::move(fnc)); - return doSendMessage(_recipient_url.data(), recipient_id, msgptr, complete_handler, &_rrecipient_id, nullptr, _flags | MessageFlagsE::AwaitResponse); + return doSendMessage(_recipient_uri, msgptr, complete_handler, &_rrecipient_id, nullptr, _flags | MessageFlagsE::AwaitResponse); } //------------------------------------------------------------------------- template ErrorConditionT Service::sendRequest( - const std::string_view& _recipient_url, + const RecipientUri& _recipient_uri, MessagePointerT const& _rmsgptr, Fnc _complete_fnc, RecipientId& _rrecipient_id, @@ -723,13 +797,13 @@ ErrorConditionT Service::sendRequest( typename message_complete_traits::recv_type>; auto msgptr(solid::static_pointer_cast(_rmsgptr)); - RecipientId recipient_id; CompleteHandlerT fnc(std::forward(_complete_fnc)); MessageCompleteFunctionT complete_handler(std::move(fnc)); - return doSendMessage(_recipient_url.data(), recipient_id, msgptr, complete_handler, &_rrecipient_id, &_rmsguid, _flags | MessageFlagsE::AwaitResponse); + return doSendMessage(_recipient_uri, msgptr, complete_handler, &_rrecipient_id, &_rmsguid, _flags | MessageFlagsE::AwaitResponse); } //------------------------------------------------------------------------- +#if 0 // send request using connection uid -------------------------------------- template ErrorConditionT Service::sendRequest( @@ -767,7 +841,7 @@ ErrorConditionT Service::sendRequest( return doSendMessage(nullptr, _rrecipient_id, msgptr, complete_handler, nullptr, &_rmsguid, _flags | MessageFlagsE::AwaitResponse); } - +#endif //------------------------------------------------------------------------- // send response using recipient id --------------------------------------- @@ -779,7 +853,7 @@ ErrorConditionT Service::sendResponse( { auto msgptr(solid::static_pointer_cast(_rmsgptr)); MessageCompleteFunctionT complete_handler; - return doSendMessage(nullptr, _rrecipient_id, msgptr, complete_handler, nullptr, nullptr, _flags | MessageFlagsE::Response); + return doSendMessage(_rrecipient_id, msgptr, complete_handler, nullptr, nullptr, _flags | MessageFlagsE::Response); } //------------------------------------------------------------------------- @@ -793,14 +867,14 @@ ErrorConditionT Service::sendResponse( { auto msgptr(solid::static_pointer_cast(_rmsgptr)); MessageCompleteFunctionT complete_handler; - return doSendMessage(nullptr, _rrecipient_id, msgptr, complete_handler, nullptr, &_rmsg_id, _flags | MessageFlagsE::Response); + return doSendMessage(_rrecipient_id, msgptr, complete_handler, nullptr, &_rmsg_id, _flags | MessageFlagsE::Response); } //------------------------------------------------------------------------- // send message with complete using recipient name ------------------------ template ErrorConditionT Service::sendMessage( - const std::string_view& _recipient_url, + const RecipientUri& _recipient_uri, MessagePointerT const& _rmsgptr, Fnc _complete_fnc, const MessageFlagsT& _flags) @@ -810,16 +884,15 @@ ErrorConditionT Service::sendMessage( typename message_complete_traits::recv_type>; auto msgptr(solid::static_pointer_cast(_rmsgptr)); - RecipientId recipient_id; CompleteHandlerT fnc(std::forward(_complete_fnc)); MessageCompleteFunctionT complete_handler(std::move(fnc)); - return doSendMessage(_recipient_url.data(), recipient_id, msgptr, complete_handler, nullptr, nullptr, _flags); + return doSendMessage(_recipient_uri, msgptr, complete_handler, nullptr, nullptr, _flags); } //------------------------------------------------------------------------- template ErrorConditionT Service::sendMessage( - const std::string_view& _recipient_url, + const RecipientUri& _recipient_uri, MessagePointerT const& _rmsgptr, Fnc _complete_fnc, RecipientId& _rrecipient_id, @@ -830,16 +903,15 @@ ErrorConditionT Service::sendMessage( typename message_complete_traits::recv_type>; auto msgptr(solid::static_pointer_cast(_rmsgptr)); - RecipientId recipient_id; CompleteHandlerT fnc(std::forward(_complete_fnc)); MessageCompleteFunctionT complete_handler(std::move(fnc)); - return doSendMessage(_recipient_url.data(), recipient_id, msgptr, complete_handler, &_rrecipient_id, nullptr, _flags); + return doSendMessage(_recipient_uri, msgptr, complete_handler, &_rrecipient_id, nullptr, _flags); } //------------------------------------------------------------------------- template ErrorConditionT Service::sendMessage( - const std::string_view& _recipient_url, + const RecipientUri& _recipient_uri, MessagePointerT const& _rmsgptr, Fnc _complete_fnc, RecipientId& _rrecipient_id, @@ -851,14 +923,14 @@ ErrorConditionT Service::sendMessage( typename message_complete_traits::recv_type>; auto msgptr(solid::static_pointer_cast(_rmsgptr)); - RecipientId recipient_id; CompleteHandlerT fnc(std::forward(_complete_fnc)); MessageCompleteFunctionT complete_handler(std::move(fnc)); - return doSendMessage(_recipient_url.data(), recipient_id, msgptr, complete_handler, &_rrecipient_id, &_rmsguid, _flags); + return doSendMessage(_recipient_uri, msgptr, complete_handler, &_rrecipient_id, &_rmsguid, _flags); } //------------------------------------------------------------------------- // send message with complete using connection uid ------------------------ +#if 0 template ErrorConditionT Service::sendMessage( RecipientId const& _rrecipient_id, @@ -895,6 +967,7 @@ ErrorConditionT Service::sendMessage( return doSendMessage(nullptr, _rrecipient_id, msgptr, complete_handler, nullptr, &_rmsguid, _flags); } +#endif //------------------------------------------------------------------------- // send message using ConnectionContext ---------------------------------- @@ -908,7 +981,7 @@ ErrorConditionT Service::sendResponse( MessageCompleteFunctionT complete_handler; return doSendMessage(_rctx, msgptr, complete_handler, nullptr, nullptr, _flags | MessageFlagsE::Response); } - +#if 0 template ErrorConditionT Service::sendMessage( ConnectionContext& _rctx, @@ -937,6 +1010,8 @@ ErrorConditionT Service::sendMessage( return doSendMessage(_rctx, msgptr, complete_handler, nullptr, nullptr, _flags); } +#endif + //------------------------------------------------------------------------- template ErrorConditionT Service::forceCloseConnectionPool( @@ -1045,7 +1120,7 @@ ErrorConditionT Service::connectionNotifyRecvSomeRawData( return doConnectionNotifyRecvRawData(_rrecipient_id, std::move(complete_fnc)); } //------------------------------------------------------------------------- -inline ErrorConditionT Service::createConnectionPool(const std::string_view& _recipient_url, const size_t _persistent_connection_count) +inline ErrorConditionT Service::createConnectionPool(const RecipientUrl& _recipient_url, const size_t _persistent_connection_count) { RecipientId recipient_id; PoolOnEventFunctionT fnc([](ConnectionContext& _rctx, EventBase&&, const ErrorConditionT&) {}); @@ -1054,10 +1129,10 @@ inline ErrorConditionT Service::createConnectionPool(const std::string_view& _re //------------------------------------------------------------------------- template ErrorConditionT Service::createConnectionPool( - const std::string_view& _recipient_url, - RecipientId& _rrecipient_id, - const F _event_fnc, - const size_t _persistent_connection_count) + const RecipientUrl& _recipient_url, + RecipientId& _rrecipient_id, + const F _event_fnc, + const size_t _persistent_connection_count) { PoolOnEventFunctionT fnc(_event_fnc); return doCreateConnectionPool(_recipient_url, _rrecipient_id, fnc, _persistent_connection_count); diff --git a/solid/frame/mprpc/src/mprpcmessagereader.cpp b/solid/frame/mprpc/src/mprpcmessagereader.cpp index 5a75f881..fdef28c1 100644 --- a/solid/frame/mprpc/src/mprpcmessagereader.cpp +++ b/solid/frame/mprpc/src/mprpcmessagereader.cpp @@ -242,7 +242,7 @@ bool MessageReader::doConsumeMessageHeader( solid_log(logger, Info, "Relayed response"); rmsgstub.state_ = MessageStub::StateE::RelayResponse; cache(rmsgstub.deserializer_ptr_); - } else if (!_receiver.isRelayEnabled() || rmsgstub.message_header_.relay_.empty()) { + } else if (!_receiver.isRelayEnabled() || !rmsgstub.message_header_.relay_.has_value()) { solid_log(logger, Info, "Read Body"); rmsgstub.state_ = MessageStub::StateE::ReadBodyStart; rmsgstub.deserializer_ptr_->clear(); diff --git a/solid/frame/mprpc/src/mprpcmessagewriter.cpp b/solid/frame/mprpc/src/mprpcmessagewriter.cpp index 0068ace9..ef81fdc2 100644 --- a/solid/frame/mprpc/src/mprpcmessagewriter.cpp +++ b/solid/frame/mprpc/src/mprpcmessagewriter.cpp @@ -675,10 +675,12 @@ char* MessageWriter::doWriteMessageHead( _rsender.context().request_id.index = static_cast(_msgidx + 1); _rsender.context().request_id.unique = rmsgstub.unique_; _rsender.context().message_flags = rmsgstub.msgbundle_.message_flags; - if (!rmsgstub.msgbundle_.message_relay_header_.empty()) { + if (rmsgstub.msgbundle_.message_relay_header_.has_value()) { _rsender.context().message_flags.set(MessageFlagsE::Relayed); + _rsender.context().pmessage_relay_header_ = &rmsgstub.msgbundle_.message_relay_header_.value(); + } else { + _rsender.context().pmessage_relay_header_ = nullptr; } - _rsender.context().pmessage_relay_header_ = &rmsgstub.msgbundle_.message_relay_header_; const ptrdiff_t rv = rmsgstub.state_ == MessageStub::StateE::WriteHeadStart ? rmsgstub.serializer_ptr_->run(_rsender.context(), _pbufpos, _pbufend - _pbufpos, rmsgstub.msgbundle_.message_ptr->header_) : rmsgstub.serializer_ptr_->run(_rsender.context(), _pbufpos, _pbufend - _pbufpos); rmsgstub.state_ = MessageStub::StateE::WriteHeadContinue; @@ -724,10 +726,12 @@ char* MessageWriter::doWriteMessageBody( _rsender.context().request_id.index = static_cast(_msgidx + 1); _rsender.context().request_id.unique = rmsgstub.unique_; _rsender.context().message_flags = rmsgstub.msgbundle_.message_flags; - if (!rmsgstub.msgbundle_.message_relay_header_.empty()) { + if (rmsgstub.msgbundle_.message_relay_header_.has_value()) { _rsender.context().message_flags.set(MessageFlagsE::Relayed); + _rsender.context().pmessage_relay_header_ = &rmsgstub.msgbundle_.message_relay_header_.value(); + } else { + _rsender.context().pmessage_relay_header_ = nullptr; } - _rsender.context().pmessage_relay_header_ = &rmsgstub.msgbundle_.message_relay_header_; const ptrdiff_t rv = rmsgstub.state_ == MessageStub::StateE::WriteBodyStart ? rmsgstub.serializer_ptr_->run(_rsender.context(), _pbufpos, _pbufend - _pbufpos, rmsgstub.msgbundle_.message_ptr, rmsgstub.msgbundle_.message_type_id) : rmsgstub.serializer_ptr_->run(_rsender.context(), _pbufpos, _pbufend - _pbufpos); rmsgstub.state_ = MessageStub::StateE::WriteBodyContinue; @@ -784,7 +788,7 @@ char* MessageWriter::doWriteRelayedHead( _rsender.context().request_id.unique = rmsgstub.unique_; _rsender.context().message_flags = rmsgstub.prelay_data_->pmessage_header_->flags_; _rsender.context().message_flags.set(MessageFlagsE::Relayed); - _rsender.context().pmessage_relay_header_ = &rmsgstub.prelay_data_->pmessage_header_->relay_; + _rsender.context().pmessage_relay_header_ = &rmsgstub.prelay_data_->pmessage_header_->relay_.value(); const ptrdiff_t rv = rmsgstub.state_ == MessageStub::StateE::RelayedHeadStart ? rmsgstub.serializer_ptr_->run(_rsender.context(), _pbufpos, _pbufend - _pbufpos, *rmsgstub.prelay_data_->pmessage_header_) : rmsgstub.serializer_ptr_->run(_rsender.context(), _pbufpos, _pbufend - _pbufpos); rmsgstub.state_ = MessageStub::StateE::RelayedHeadContinue; diff --git a/solid/frame/mprpc/src/mprpcmessagewriter.hpp b/solid/frame/mprpc/src/mprpcmessagewriter.hpp index 7d8a07c8..f162de36 100644 --- a/solid/frame/mprpc/src/mprpcmessagewriter.hpp +++ b/solid/frame/mprpc/src/mprpcmessagewriter.hpp @@ -344,10 +344,10 @@ inline MessageWriter::MessageStub::MessageStub( : inner::Node(std::move(_rmsgstub)) , msgbundle_(std::move(_rmsgstub.msgbundle_)) , unique_(_rmsgstub.unique_) + , state_(_rmsgstub.state_) , packet_count_(_rmsgstub.packet_count_) , serializer_ptr_(std::move(_rmsgstub.serializer_ptr_)) , pool_msg_id_(_rmsgstub.pool_msg_id_) - , state_(_rmsgstub.state_) { } //----------------------------------------------------------------------------- @@ -386,7 +386,7 @@ inline bool MessageWriter::MessageStub::isStop() const noexcept //----------------------------------------------------------------------------- inline bool MessageWriter::MessageStub::isRelay() const noexcept { - return !msgbundle_.message_relay_header_.empty() || Message::is_relayed(msgbundle_.message_flags); // TODO: optimize!! + return msgbundle_.message_relay_header_.has_value(); //|| Message::is_relayed(msgbundle_.message_flags); // TODO: optimize!! } //----------------------------------------------------------------------------- inline bool MessageWriter::MessageStub::isRelayed() const noexcept diff --git a/solid/frame/mprpc/src/mprpcrelayengine.cpp b/solid/frame/mprpc/src/mprpcrelayengine.cpp index 406b7286..693c3532 100644 --- a/solid/frame/mprpc/src/mprpcrelayengine.cpp +++ b/solid/frame/mprpc/src/mprpcrelayengine.cpp @@ -218,7 +218,7 @@ using RelayDataDequeT = std::deque; using RelayDataStackT = std::stack; using SizeTStackT = std::stack; using ConnectionDequeT = std::deque; -using ConnectionMapT = std::unordered_map; +using ConnectionMapT = std::unordered_map; } // namespace std::ostream& operator<<(std::ostream& _ros, const ConnectionPrintStub& _rps) @@ -231,7 +231,7 @@ struct EngineCore::Data { mutex mtx_; MessageDequeT msg_dq_; RelayDataDequeT reldata_dq_; - RelayData* prelay_data_cache_top_; + RelayData* prelay_data_cache_top_ = nullptr; SendInnerListT msg_cache_inner_list_; ConnectionDequeT con_dq_; ConnectionMapT con_umap_; @@ -239,7 +239,6 @@ struct EngineCore::Data { Data(Manager& _rm) : rm_(_rm) - , prelay_data_cache_top_(nullptr) , msg_cache_inner_list_(msg_dq_) { } @@ -495,10 +494,10 @@ size_t EngineCore::doRegisterUnnamedConnection(const ActorIdT& _rcon_uid, Unique return conidx; } //----------------------------------------------------------------------------- -size_t EngineCore::doRegisterNamedConnection(std::string&& _uname) +size_t EngineCore::doRegisterNamedConnection(MessageRelayHeader&& _relay) { Proxy proxy(*this); - size_t conidx = registerConnection(proxy, std::move(_uname)); + size_t conidx = 0; // TODO:relay: registerConnection(proxy, std::move(_uname)); solid_log(logger, Info, conidx << ' ' << plot(impl_->con_dq_[conidx])); return conidx; } @@ -537,7 +536,7 @@ bool EngineCore::doRelayStart( _rrelay_id = MessageId(msgidx, rmsg.unique_); - const size_t rcv_conidx = doRegisterNamedConnection(std::move(rmsg.header_.relay_.uri_)); + const size_t rcv_conidx = doRegisterNamedConnection(std::move(rmsg.header_.relay_.value())); ConnectionStub& rrcvcon = impl_->con_dq_[rcv_conidx]; ConnectionStub& rsndcon = impl_->con_dq_[snd_conidx]; @@ -557,7 +556,7 @@ bool EngineCore::doRelayStart( rmsg.push(impl_->createRelayData(std::move(_rrelmsg))); - bool should_notify_connection = (rrcvcon.recv_msg_list_.empty() || !rrcvcon.recv_msg_list_.back().hasData()); + const bool should_notify_connection = (rrcvcon.recv_msg_list_.empty() || !rrcvcon.recv_msg_list_.back().hasData()); rrcvcon.recv_msg_list_.pushBack(msgidx); @@ -584,7 +583,7 @@ bool EngineCore::doRelay( if (_rrelay_id.index < impl_->msg_dq_.size() && impl_->msg_dq_[_rrelay_id.index].unique_ == _rrelay_id.unique) { const size_t msgidx = _rrelay_id.index; MessageStub& rmsg = impl_->msg_dq_[msgidx]; - bool is_msg_relay_data_queue_empty = (rmsg.pfront_ == nullptr); + const bool is_msg_relay_data_queue_empty = (rmsg.pfront_ == nullptr); size_t data_size = _rrelmsg.data_size_; auto flags = rmsg.last_message_flags_; diff --git a/solid/frame/mprpc/src/mprpcrelayengines.cpp b/solid/frame/mprpc/src/mprpcrelayengines.cpp index 4e0e2c22..9f8bd70d 100644 --- a/solid/frame/mprpc/src/mprpcrelayengines.cpp +++ b/solid/frame/mprpc/src/mprpcrelayengines.cpp @@ -30,7 +30,7 @@ namespace mprpc { namespace relay { //----------------------------------------------------------------------------- namespace { -using ConnectionMapT = std::unordered_map; +using ConnectionMapT = std::unordered_map; } // namespace struct SingleNameEngine::Data { diff --git a/solid/frame/mprpc/src/mprpcservice.cpp b/solid/frame/mprpc/src/mprpcservice.cpp index e7a96223..d8ba0d65 100644 --- a/solid/frame/mprpc/src/mprpcservice.cpp +++ b/solid/frame/mprpc/src/mprpcservice.cpp @@ -85,8 +85,7 @@ const LoggerT& service_logger() return logger; } //============================================================================= -// using NameMapT = std::unordered_map; -using NameMapT = std::unordered_map; +using NameMapT = std::unordered_map; using ActorIdQueueT = Queue; /*extern*/ const Event<> pool_event_connection_start = make_event(pool_event_category, PoolEvents::ConnectionStart); @@ -116,12 +115,12 @@ struct MessageStub : inner::Node { uint flags_; MessageStub( - MessagePointerT<>&& _rmsgptr, - const size_t _msg_type_idx, - MessageCompleteFunctionT& _rcomplete_fnc, - ulong _msgflags, - std::string&& _rmsg_url) - : message_bundle_(std::move(_rmsgptr), _msg_type_idx, _msgflags, _rcomplete_fnc, std::move(_rmsg_url)) + MessagePointerT<>&& _rmsgptr, + const size_t _msg_type_idx, + MessageCompleteFunctionT& _rcomplete_fnc, + ulong _msgflags, + OptionalMessageRelayHeaderT&& _relay) + : message_bundle_(std::move(_rmsgptr), _msg_type_idx, _msgflags, _rcomplete_fnc, std::move(_relay)) , unique_(0) , flags_(0) { @@ -291,11 +290,11 @@ struct ConnectionPoolStub : inner::Node& _rmsgptr, - const size_t _msg_type_idx, - MessageCompleteFunctionT& _rcomplete_fnc, - const MessageFlagsT& _flags, - std::string&& _msg_url) + MessagePointerT<>& _rmsgptr, + const size_t _msg_type_idx, + MessageCompleteFunctionT& _rcomplete_fnc, + const MessageFlagsT& _flags, + OptionalMessageRelayHeaderT&& _relay) { size_t idx; @@ -308,7 +307,7 @@ struct ConnectionPoolStub : inner::Node& _rmsgptr, - const size_t _msg_type_idx, - MessageCompleteFunctionT& _rcomplete_fnc, - const MessageFlagsT& _flags, - std::string&& _msg_url, - bool& _ris_first) + MessagePointerT<>& _rmsgptr, + const size_t _msg_type_idx, + MessageCompleteFunctionT& _rcomplete_fnc, + const MessageFlagsT& _flags, + OptionalMessageRelayHeaderT&& _relay, + bool& _ris_first) { - const MessageId msgid = insertMessage(_rmsgptr, _msg_type_idx, _rcomplete_fnc, _flags, std::move(_msg_url)); + const MessageId msgid = insertMessage(_rmsgptr, _msg_type_idx, _rcomplete_fnc, _flags, std::move(_relay)); _ris_first = message_order_inner_list_.empty(); @@ -341,13 +340,13 @@ struct ConnectionPoolStub : inner::Node& _rmsgptr, - const size_t _msg_type_idx, - MessageCompleteFunctionT& _rcomplete_fnc, - const MessageFlagsT& _flags, - std::string&& _msg_url) + MessagePointerT<>& _rmsgptr, + const size_t _msg_type_idx, + MessageCompleteFunctionT& _rcomplete_fnc, + const MessageFlagsT& _flags, + OptionalMessageRelayHeaderT&& _relay) { - const MessageId msgid = insertMessage(_rmsgptr, _msg_type_idx, _rcomplete_fnc, _flags, std::move(_msg_url)); + const MessageId msgid = insertMessage(_rmsgptr, _msg_type_idx, _rcomplete_fnc, _flags, std::move(_relay)); message_order_inner_list_.pushFront(msgid.index); @@ -364,18 +363,18 @@ struct ConnectionPoolStub : inner::Node& _rmsgptr, - const size_t _msg_type_idx, - MessageCompleteFunctionT& _rcomplete_fnc, - const MessageFlagsT& _flags, - std::string&& _msg_url) + MessageId const& _rmsgid, + MessagePointerT<>& _rmsgptr, + const size_t _msg_type_idx, + MessageCompleteFunctionT& _rcomplete_fnc, + const MessageFlagsT& _flags, + OptionalMessageRelayHeaderT&& _relay) { MessageStub& rmsgstub(message_vec_[_rmsgid.index]); solid_assert_log(!rmsgstub.message_bundle_.message_ptr && rmsgstub.unique_ == _rmsgid.unique, logger); - rmsgstub.message_bundle_ = MessageBundle(std::move(_rmsgptr), _msg_type_idx, _flags, _rcomplete_fnc, std::move(_msg_url)); + rmsgstub.message_bundle_ = MessageBundle(std::move(_rmsgptr), _msg_type_idx, _flags, _rcomplete_fnc, std::move(_relay)); message_order_inner_list_.pushFront(_rmsgid.index); @@ -662,13 +661,13 @@ struct Service::Data { } ErrorConditionT doSendMessageToConnection( - Service& _rsvc, - const RecipientId& _rrecipient_id_in, - MessagePointerT<>& _rmsgptr, - MessageCompleteFunctionT& _rcomplete_fnc, - MessageId* _pmsg_id_out, - MessageFlagsT _flags, - std::string&& _msg_url); + Service& _rsvc, + const RecipientId& _rrecipient_id_in, + MessagePointerT<>& _rmsgptr, + MessageCompleteFunctionT& _rcomplete_fnc, + MessageId* _pmsg_id_out, + MessageFlagsT _flags, + OptionalMessageRelayHeaderT&& _relay); bool doTryCreateNewConnectionForPool(Service& _rsvc, const size_t _pool_index, ErrorConditionT& _rerror); @@ -745,12 +744,12 @@ struct Service::Data { ErrorConditionT doSendMessageToPool( Service& _rsvc, const ConnectionPoolId& _rpool_id, MessagePointerT<>& _rmsgptr, - MessageCompleteFunctionT& _rcomplete_fnc, - const size_t _msg_type_idx, - std::string&& _message_url, - RecipientId* _precipient_id_out, - MessageId* _pmsgid_out, - const MessageFlagsT& _flags); + MessageCompleteFunctionT& _rcomplete_fnc, + const size_t _msg_type_idx, + OptionalMessageRelayHeaderT&& _relay, + RecipientId* _precipient_id_out, + MessageId* _pmsgid_out, + const MessageFlagsT& _flags); }; //============================================================================= @@ -916,11 +915,12 @@ struct OnRelsolveF { //----------------------------------------------------------------------------- ErrorConditionT Service::doCreateConnectionPool( - const std::string_view& _recipient_url, - RecipientId& _rrecipient_id_out, - PoolOnEventFunctionT& _event_fnc, - const size_t _persistent_connection_count) + const RecipientUrl& _recipient_url, + RecipientId& _rrecipient_id_out, + PoolOnEventFunctionT& _event_fnc, + const size_t _persistent_connection_count) { +#if 0 static constexpr const char* empty_recipient_name = ":"; std::string message_url; shared_ptr locked_pimpl; @@ -981,6 +981,8 @@ ErrorConditionT Service::doCreateConnectionPool( _rrecipient_id_out.pool_id_ = pool_id; return error; +#endif + return {}; } //----------------------------------------------------------------------------- @@ -1027,6 +1029,7 @@ ErrorConditionT Service::Data::doLockPool( } } //----------------------------------------------------------------------------- +#if 0 ErrorConditionT Service::doSendMessage( ConnectionContext& _rctx, MessagePointerT<>& _rmsgptr, @@ -1065,11 +1068,10 @@ ErrorConditionT Service::doSendMessage( return doSendMessage(nullptr, _rctx.recipientId(), _rmsgptr, _rcomplete_fnc, nullptr, nullptr, _flags); } - +#endif //----------------------------------------------------------------------------- ErrorConditionT Service::doSendMessage( - const char* _recipient_url, - const RecipientId& _rrecipient_id_in, + const RecipientUri& _rrecipient_uri, MessagePointerT<>& _rmsgptr, MessageCompleteFunctionT& _rcomplete_fnc, RecipientId* _precipient_id_out, @@ -1078,7 +1080,7 @@ ErrorConditionT Service::doSendMessage( { solid_log(logger, Verbose, this); solid_statistic_inc(pimpl_->statistic_.send_message_count_); - +#if 0 std::string message_url; shared_ptr locked_pimpl; @@ -1179,18 +1181,20 @@ ErrorConditionT Service::doSendMessage( solid_assert(pool_lock.owns_lock()); return locked_pimpl->doSendMessageToPool(*this, pool_id, _rmsgptr, _rcomplete_fnc, msg_type_idx, std::move(message_url), _precipient_id_out, _pmsgid_out, _flags); +#endif + return {}; } //----------------------------------------------------------------------------- ErrorConditionT Service::Data::doSendMessageToConnection( - Service& _rsvc, - const RecipientId& _rrecipient_id_in, - MessagePointerT<>& _rmsgptr, - MessageCompleteFunctionT& _rcomplete_fnc, - MessageId* _pmsgid_out, - MessageFlagsT _flags, - std::string&& _msg_url) + Service& _rsvc, + const RecipientId& _rrecipient_id_in, + MessagePointerT<>& _rmsgptr, + MessageCompleteFunctionT& _rcomplete_fnc, + MessageId* _pmsgid_out, + MessageFlagsT _flags, + OptionalMessageRelayHeaderT&& _relay) { solid_log(logger, Verbose, &_rsvc); solid_statistic_inc(statistic_.send_message_to_connection_count_); @@ -1231,7 +1235,7 @@ ErrorConditionT Service::Data::doSendMessageToConnection( if (is_server_side_pool) { bool should_notify = false; - const auto msgid = rpool.pushBackMessage(_rmsgptr, msg_type_idx, _rcomplete_fnc, _flags, std::move(_msg_url), should_notify); + const auto msgid = rpool.pushBackMessage(_rmsgptr, msg_type_idx, _rcomplete_fnc, _flags, std::move(_relay), should_notify); if (_pmsgid_out != nullptr) { *_pmsgid_out = msgid; @@ -1246,7 +1250,7 @@ ErrorConditionT Service::Data::doSendMessageToConnection( } } else { - const auto msgid = rpool.insertMessage(_rmsgptr, msg_type_idx, _rcomplete_fnc, _flags, std::move(_msg_url)); + const auto msgid = rpool.insertMessage(_rmsgptr, msg_type_idx, _rcomplete_fnc, _flags, std::move(_relay)); if (_pmsgid_out != nullptr) { *_pmsgid_out = msgid; @@ -1265,12 +1269,12 @@ ErrorConditionT Service::Data::doSendMessageToConnection( //----------------------------------------------------------------------------- ErrorConditionT Service::Data::doSendMessageToPool( Service& _rsvc, const ConnectionPoolId& _rpool_id, MessagePointerT<>& _rmsgptr, - MessageCompleteFunctionT& _rcomplete_fnc, - const size_t _msg_type_idx, - std::string&& _message_url, - RecipientId* _precipient_id_out, - MessageId* _pmsgid_out, - const MessageFlagsT& _flags) + MessageCompleteFunctionT& _rcomplete_fnc, + const size_t _msg_type_idx, + OptionalMessageRelayHeaderT&& _relay, + RecipientId* _precipient_id_out, + MessageId* _pmsgid_out, + const MessageFlagsT& _flags) { solid_log(logger, Verbose, &_rsvc << " " << _rpool_id); solid_statistic_inc(statistic_.send_message_to_pool_count_); @@ -1294,7 +1298,7 @@ ErrorConditionT Service::Data::doSendMessageToPool( // At this point we can fetch the message from user's pointer // because from now on we can call complete on the message - const MessageId msgid = rpool.pushBackMessage(_rmsgptr, _msg_type_idx, _rcomplete_fnc, _flags, std::move(_message_url), is_first); + const MessageId msgid = rpool.pushBackMessage(_rmsgptr, _msg_type_idx, _rcomplete_fnc, _flags, std::move(_relay), is_first); (void)is_first; if (_pmsgid_out != nullptr) { @@ -1634,7 +1638,7 @@ ErrorConditionT Service::doDelayCloseConnectionPool( MessagePointerT<> empty_msg_ptr; bool is_first; - const MessageId msgid = rpool.pushBackMessage(empty_msg_ptr, 0, _rcomplete_fnc, 0, std::string{}, is_first); + const MessageId msgid = rpool.pushBackMessage(empty_msg_ptr, 0, _rcomplete_fnc, 0, {}, is_first); (void)msgid; // notify all waiting connections about the new message @@ -1685,7 +1689,7 @@ ErrorConditionT Service::doForceCloseConnectionPool( MessagePointerT<> empty_msg_ptr; bool is_first; - const MessageId msgid = rpool.pushBackMessage(empty_msg_ptr, 0, _rcomplete_fnc, {MessageFlagsE::Synchronous}, std::string{}, is_first); + const MessageId msgid = rpool.pushBackMessage(empty_msg_ptr, 0, _rcomplete_fnc, {MessageFlagsE::Synchronous}, {}, is_first); (void)msgid; // no reason to cancel all messages - they'll be handled on connection stop. @@ -2525,7 +2529,7 @@ void Service::Data::doPushFrontMessageToPool( _rmsgbundle.message_type_id, _rmsgbundle.complete_fnc, _rmsgbundle.message_flags, - std::move(_rmsgbundle.message_relay_header_.uri_)); + std::move(_rmsgbundle.message_relay_header_)); } else { rpool.reinsertFrontMessage( _rmsgid, @@ -2533,7 +2537,7 @@ void Service::Data::doPushFrontMessageToPool( _rmsgbundle.message_type_id, _rmsgbundle.complete_fnc, _rmsgbundle.message_flags, - std::move(_rmsgbundle.message_relay_header_.uri_)); + std::move(_rmsgbundle.message_relay_header_)); } } } @@ -2770,7 +2774,17 @@ std::ostream& operator<<(std::ostream& _ros, MessageId const& _msg_id) //----------------------------------------------------------------------------- std::ostream& operator<<(std::ostream& _ros, const MessageRelayHeader& _header) { - _ros << "uri = " << _header.uri_; + _ros << "group_id = " << _header.group_id_ << " replica_id = " << _header.replica_id_; + return _ros; +} +//----------------------------------------------------------------------------- +std::ostream& operator<<(std::ostream& _ros, const OptionalMessageRelayHeaderT& _header) +{ + if (_header.has_value()) { + _ros << _header; + } else { + _ros << "null"; + } return _ros; } //============================================================================= diff --git a/solid/frame/mprpc/src/mprpcutility.hpp b/solid/frame/mprpc/src/mprpcutility.hpp index a1569498..c335a798 100644 --- a/solid/frame/mprpc/src/mprpcutility.hpp +++ b/solid/frame/mprpc/src/mprpcutility.hpp @@ -165,28 +165,24 @@ class PacketHeader { }; struct MessageBundle { - size_t message_type_id; - MessageFlagsT message_flags; - MessagePointerT<> message_ptr; - MessageCompleteFunctionT complete_fnc; - MessageRelayHeader message_relay_header_; + size_t message_type_id = InvalidIndex(); + MessageFlagsT message_flags = 0; + MessagePointerT<> message_ptr; + MessageCompleteFunctionT complete_fnc; + OptionalMessageRelayHeaderT message_relay_header_; - MessageBundle() - : message_type_id(InvalidIndex()) - , message_flags(0) - { - } + MessageBundle() = default; MessageBundle( - MessagePointerT<>&& _rmsgptr, - const size_t _msg_type_idx, - const MessageFlagsT& _flags, - MessageCompleteFunctionT& _complete_fnc, - std::string&& _rmessage_uri) + MessagePointerT<>&& _rmsgptr, + const size_t _msg_type_idx, + const MessageFlagsT& _flags, + MessageCompleteFunctionT& _complete_fnc, + OptionalMessageRelayHeaderT&& _relay) : message_type_id(_msg_type_idx) , message_flags(_flags) , message_ptr(std::move(_rmsgptr)) - , message_relay_header_(std::move(_rmessage_uri)) + , message_relay_header_(std::move(_relay)) { std::swap(complete_fnc, _complete_fnc); } @@ -229,7 +225,7 @@ struct MessageBundle { message_type_id = InvalidIndex(); message_flags.reset(); message_ptr.reset(); - message_relay_header_.clear(); + message_relay_header_.reset(); solid_function_clear(complete_fnc); } }; diff --git a/solid/frame/mprpc/test/multiprotocol_basic/alpha/client/alphaclient.cpp b/solid/frame/mprpc/test/multiprotocol_basic/alpha/client/alphaclient.cpp index bd40cae2..08e010da 100644 --- a/solid/frame/mprpc/test/multiprotocol_basic/alpha/client/alphaclient.cpp +++ b/solid/frame/mprpc/test/multiprotocol_basic/alpha/client/alphaclient.cpp @@ -140,19 +140,19 @@ ErrorConditionT start( _rctx.rwait_count += 3; err = mprpcclient_ptr->sendMessage( - "localhost", frame::mprpc::make_message(100000UL, make_string(100000)), + {"localhost"}, frame::mprpc::make_message(100000UL, make_string(100000)), {frame::mprpc::MessageFlagsE::AwaitResponse}); if (err) { return err; } err = mprpcclient_ptr->sendMessage( - "localhost", frame::mprpc::make_message(200000UL, make_string(200000)), + {"localhost"}, frame::mprpc::make_message(200000UL, make_string(200000)), {frame::mprpc::MessageFlagsE::AwaitResponse}); if (err) { return err; } err = mprpcclient_ptr->sendMessage( - "localhost", frame::mprpc::make_message(30000UL, make_string(30000)), + {"localhost"}, frame::mprpc::make_message(30000UL, make_string(30000)), {frame::mprpc::MessageFlagsE::AwaitResponse}); if (err) { return err; diff --git a/solid/frame/mprpc/test/multiprotocol_basic/beta/client/betaclient.cpp b/solid/frame/mprpc/test/multiprotocol_basic/beta/client/betaclient.cpp index 7930345f..2fd7f107 100644 --- a/solid/frame/mprpc/test/multiprotocol_basic/beta/client/betaclient.cpp +++ b/solid/frame/mprpc/test/multiprotocol_basic/beta/client/betaclient.cpp @@ -134,19 +134,19 @@ ErrorConditionT start( _rctx.rwait_count += 3; err = mprpcclient_ptr->sendMessage( - "localhost", frame::mprpc::make_message(100000UL, make_string(100000)), + {"localhost"}, frame::mprpc::make_message(100000UL, make_string(100000)), {frame::mprpc::MessageFlagsE::AwaitResponse}); if (err) { return err; } err = mprpcclient_ptr->sendMessage( - "localhost", frame::mprpc::make_message(200000UL, make_string(200000)), + {"localhost"}, frame::mprpc::make_message(200000UL, make_string(200000)), {frame::mprpc::MessageFlagsE::AwaitResponse}); if (err) { return err; } err = mprpcclient_ptr->sendMessage( - "localhost", frame::mprpc::make_message(30000UL, make_string(30000)), + {"localhost"}, frame::mprpc::make_message(30000UL, make_string(30000)), {frame::mprpc::MessageFlagsE::AwaitResponse}); if (err) { return err; diff --git a/solid/frame/mprpc/test/multiprotocol_basic/gamma/client/gammaclient.cpp b/solid/frame/mprpc/test/multiprotocol_basic/gamma/client/gammaclient.cpp index 9de9f58d..bd7b1d7e 100644 --- a/solid/frame/mprpc/test/multiprotocol_basic/gamma/client/gammaclient.cpp +++ b/solid/frame/mprpc/test/multiprotocol_basic/gamma/client/gammaclient.cpp @@ -89,19 +89,19 @@ ErrorConditionT start( _rctx.rwait_count += 3; err = mprpcclient_ptr->sendMessage( - "localhost", frame::mprpc::make_message(100000UL, make_string(100000)), + {"localhost"}, frame::mprpc::make_message(100000UL, make_string(100000)), {frame::mprpc::MessageFlagsE::AwaitResponse}); if (err) { return err; } err = mprpcclient_ptr->sendMessage( - "localhost", frame::mprpc::make_message(200000UL, make_string(200000)), + {"localhost"}, frame::mprpc::make_message(200000UL, make_string(200000)), {frame::mprpc::MessageFlagsE::AwaitResponse}); if (err) { return err; } err = mprpcclient_ptr->sendMessage( - "localhost", frame::mprpc::make_message(30000UL, make_string(30000)), + {"localhost"}, frame::mprpc::make_message(30000UL, make_string(30000)), {frame::mprpc::MessageFlagsE::AwaitResponse}); if (err) { return err; diff --git a/solid/frame/mprpc/test/test_clientfrontback_download.cpp b/solid/frame/mprpc/test/test_clientfrontback_download.cpp index cfb87e38..6a49af0a 100644 --- a/solid/frame/mprpc/test/test_clientfrontback_download.cpp +++ b/solid/frame/mprpc/test/test_clientfrontback_download.cpp @@ -526,7 +526,7 @@ int test_clientfrontback_download(int argc, char* argv[]) auto msg_ptr = frame::mprpc::make_message(f); msg_ptr->ofs_.open(string("client_storage/") + f); - mprpc_front_client.sendRequest("localhost", msg_ptr, front::on_client_receive_response); + mprpc_front_client.sendRequest({"localhost"}, msg_ptr, front::on_client_receive_response); } auto fut = prom.get_future(); @@ -721,7 +721,7 @@ void on_server_receive_first_request( req_ptr->res_ptr_ = frame::mprpc::make_message(*_rrecv_msg_ptr); req_ptr->await_response_ = true; - pmprpc_back_client->sendRequest("localhost", req_ptr, back::on_client_receive_response, flags); + pmprpc_back_client->sendRequest({"localhost"}, req_ptr, back::on_client_receive_response, flags); } } // namespace front diff --git a/solid/frame/mprpc/test/test_clientfrontback_upload.cpp b/solid/frame/mprpc/test/test_clientfrontback_upload.cpp index bb641686..93bf69e0 100644 --- a/solid/frame/mprpc/test/test_clientfrontback_upload.cpp +++ b/solid/frame/mprpc/test/test_clientfrontback_upload.cpp @@ -517,7 +517,7 @@ int test_clientfrontback_upload(int argc, char* argv[]) auto msg_ptr = frame::mprpc::make_message(f); msg_ptr->ifs_.open(string("client_storage/") + f); - mprpc_front_client.sendRequest("localhost", msg_ptr, front::on_client_receive_first_response); + mprpc_front_client.sendRequest({"localhost"}, msg_ptr, front::on_client_receive_first_response); } auto fut = prom.get_future(); @@ -769,7 +769,7 @@ void on_server_receive_first_request( req_ptr->recipient_id_ = _rctx.recipientId(); req_ptr->res_ptr_ = frame::mprpc::make_message(*_rrecv_msg_ptr); - pmprpc_back_client->sendRequest("localhost", req_ptr, back::on_client_receive_first_response, flags); + pmprpc_back_client->sendRequest({"localhost"}, req_ptr, back::on_client_receive_first_response, flags); } } // namespace front diff --git a/solid/frame/mprpc/test/test_clientserver_basic.cpp b/solid/frame/mprpc/test/test_clientserver_basic.cpp index d9a15271..2d2fe3df 100644 --- a/solid/frame/mprpc/test/test_clientserver_basic.cpp +++ b/solid/frame/mprpc/test/test_clientserver_basic.cpp @@ -249,7 +249,7 @@ void server_complete_message( solid_dbg(generic_logger, Info, crtreadidx); if (crtwriteidx < writecount) { err = pmprpcclient->sendMessage( - "", frame::mprpc::make_message(crtwriteidx++), + {""}, frame::mprpc::make_message(crtwriteidx++), initarray[crtwriteidx % initarraysize].flags | frame::mprpc::MessageFlagsE::AwaitResponse); solid_check(!err, "Connection id should not be invalid! " << err.message()); } @@ -424,7 +424,7 @@ int test_clientserver_basic(int argc, char* argv[]) for (; crtwriteidx < start_count;) { mprpcclient.sendMessage( - "", frame::mprpc::make_message(crtwriteidx++), + {""}, frame::mprpc::make_message(crtwriteidx++), initarray[crtwriteidx % initarraysize].flags | frame::mprpc::MessageFlagsE::AwaitResponse); } diff --git a/solid/frame/mprpc/test/test_clientserver_cancel_client.cpp b/solid/frame/mprpc/test/test_clientserver_cancel_client.cpp index c23fc64d..d842e647 100644 --- a/solid/frame/mprpc/test/test_clientserver_cancel_client.cpp +++ b/solid/frame/mprpc/test/test_clientserver_cancel_client.cpp @@ -407,7 +407,7 @@ int test_clientserver_cancel_client(int argc, char* argv[]) frame::mprpc::MessageId msguid; ErrorConditionT err = mprpcclient.sendMessage( - "localhost", frame::mprpc::MessagePointerT<>(frame::mprpc::make_message(crtwriteidx)), + {"localhost"}, frame::mprpc::MessagePointerT<>(frame::mprpc::make_message(crtwriteidx)), recipient_id, msguid); diff --git a/solid/frame/mprpc/test/test_clientserver_cancel_server.cpp b/solid/frame/mprpc/test/test_clientserver_cancel_server.cpp index 3a8f2f50..42a90a1b 100644 --- a/solid/frame/mprpc/test/test_clientserver_cancel_server.cpp +++ b/solid/frame/mprpc/test/test_clientserver_cancel_server.cpp @@ -263,7 +263,7 @@ void server_complete_message( frame::mprpc::MessageId msguid; ErrorConditionT err = _rctx.service().sendMessage( - recipient_id, frame::mprpc::MessagePointerT<>(frame::mprpc::make_message(crtwriteidx)), + {recipient_id}, frame::mprpc::MessagePointerT<>(frame::mprpc::make_message(crtwriteidx)), msguid); solid_check(!err, "Connection id should not be invalid! " << err.message()); @@ -416,7 +416,7 @@ int test_clientserver_cancel_server(int argc, char* argv[]) // Step 1. auto msgptr(frame::mprpc::make_message(0)); mprpcclient.sendMessage( - "localhost", msgptr, + {"localhost"}, msgptr, initarray[0].flags); } diff --git a/solid/frame/mprpc/test/test_clientserver_delayed.cpp b/solid/frame/mprpc/test/test_clientserver_delayed.cpp index 66b1fb80..81cb69f9 100644 --- a/solid/frame/mprpc/test/test_clientserver_delayed.cpp +++ b/solid/frame/mprpc/test/test_clientserver_delayed.cpp @@ -359,14 +359,14 @@ int test_clientserver_delayed(int argc, char* argv[]) { MessagePointerT msgptr(frame::mprpc::make_message(0)); err = mprpcclient.sendMessage( - "localhost", msgptr); + {"localhost"}, msgptr); ++writecount; } { MessagePointerT msgptr(frame::mprpc::make_message(1)); err = mprpcclient.sendMessage( - "localhost", msgptr, {frame::mprpc::MessageFlagsE::OneShotSend}); + {"localhost"}, msgptr, {frame::mprpc::MessageFlagsE::OneShotSend}); //++writecount; // this message should not be sent } @@ -374,7 +374,7 @@ int test_clientserver_delayed(int argc, char* argv[]) { MessagePointerT msgptr(frame::mprpc::make_message(2)); err = mprpcclient.sendMessage( - "localhost", msgptr, {frame::mprpc::MessageFlagsE::AwaitResponse}); + {"localhost"}, msgptr, {frame::mprpc::MessageFlagsE::AwaitResponse}); ++writecount; } diff --git a/solid/frame/mprpc/test/test_clientserver_download.cpp b/solid/frame/mprpc/test/test_clientserver_download.cpp index f093f245..60ed60b2 100644 --- a/solid/frame/mprpc/test/test_clientserver_download.cpp +++ b/solid/frame/mprpc/test/test_clientserver_download.cpp @@ -323,7 +323,7 @@ int test_clientserver_download(int argc, char* argv[]) auto msg_ptr = frame::mprpc::make_message(f); msg_ptr->ofs_.open(string("client_storage/") + f); - mprpc_client.sendRequest("localhost", msg_ptr, on_client_receive_response); + mprpc_client.sendRequest({"localhost"}, msg_ptr, on_client_receive_response); } auto fut = prom.get_future(); diff --git a/solid/frame/mprpc/test/test_clientserver_idempotent.cpp b/solid/frame/mprpc/test/test_clientserver_idempotent.cpp index 6a7b4009..51776734 100644 --- a/solid/frame/mprpc/test/test_clientserver_idempotent.cpp +++ b/solid/frame/mprpc/test/test_clientserver_idempotent.cpp @@ -389,7 +389,7 @@ int test_clientserver_idempotent(int argc, char* argv[]) cfg.pool_max_active_connection_count = max_per_pool_connection_count; - cfg.client.name_resolve_fnc = frame::mprpc::InternetResolverF(resolver, server_port.c_str(), "localhost", SocketInfo::Inet4); + cfg.client.name_resolve_fnc = frame::mprpc::InternetResolverF(resolver, server_port.c_str(), {"localhost"}, SocketInfo::Inet4); if (secure) { solid_dbg(generic_logger, Info, "Configure SSL client ------------------------------------"); @@ -418,35 +418,35 @@ int test_clientserver_idempotent(int argc, char* argv[]) { ++crtwriteidx; mprpcclient.sendMessage( - "localhost", msg_vec[0], + {"localhost"}, msg_vec[0], {frame::mprpc::MessageFlagsE::AwaitResponse, frame::mprpc::MessageFlagsE::OneShotSend}); } { ++crtwriteidx; mprpcclient.sendMessage( - "localhost", msg_vec[1], + {"localhost"}, msg_vec[1], {frame::mprpc::MessageFlagsE::AwaitResponse, frame::mprpc::MessageFlagsE::Idempotent}); } { ++crtwriteidx; mprpcclient.sendMessage( - "localhost", msg_vec[2], + {"localhost"}, msg_vec[2], {frame::mprpc::MessageFlagsE::OneShotSend}); } { ++crtwriteidx; mprpcclient.sendMessage( - "localhost", msg_vec[3], + {"localhost"}, msg_vec[3], {frame::mprpc::MessageFlagsE::AwaitResponse, frame::mprpc::MessageFlagsE::Idempotent, frame::mprpc::MessageFlagsE::Synchronous}); } { ++crtwriteidx; mprpcclient.sendMessage( - "localhost", msg_vec[4], + {"localhost"}, msg_vec[4], {frame::mprpc::MessageFlagsE::AwaitResponse, frame::mprpc::MessageFlagsE::Synchronous}); } diff --git a/solid/frame/mprpc/test/test_clientserver_noserver.cpp b/solid/frame/mprpc/test/test_clientserver_noserver.cpp index 4c16ef74..8d2ca81f 100644 --- a/solid/frame/mprpc/test/test_clientserver_noserver.cpp +++ b/solid/frame/mprpc/test/test_clientserver_noserver.cpp @@ -271,7 +271,7 @@ int test_clientserver_noserver(int argc, char* argv[]) MessagePointerT msgptr(frame::mprpc::make_message(0)); err = mprpcclient.sendMessage( - "localhost", msgptr, + {"localhost"}, msgptr, recipient_id, message_id, {frame::mprpc::MessageFlagsE::AwaitResponse}); solid_check(!err); diff --git a/solid/frame/mprpc/test/test_clientserver_oneshot.cpp b/solid/frame/mprpc/test/test_clientserver_oneshot.cpp index 2cbeb869..6b592f31 100644 --- a/solid/frame/mprpc/test/test_clientserver_oneshot.cpp +++ b/solid/frame/mprpc/test/test_clientserver_oneshot.cpp @@ -272,7 +272,7 @@ int test_clientserver_oneshot(int argc, char* argv[]) MessagePointerT msgptr(frame::mprpc::make_message(0)); err = mprpcclient.sendMessage( - "localhost", msgptr, + {"localhost"}, msgptr, recipient_id, message_id, {frame::mprpc::MessageFlagsE::AwaitResponse, frame::mprpc::MessageFlagsE::OneShotSend}); solid_check(!err, "" << err.message()); diff --git a/solid/frame/mprpc/test/test_clientserver_sendrequest.cpp b/solid/frame/mprpc/test/test_clientserver_sendrequest.cpp index 3e25c092..a5283f4b 100644 --- a/solid/frame/mprpc/test/test_clientserver_sendrequest.cpp +++ b/solid/frame/mprpc/test/test_clientserver_sendrequest.cpp @@ -313,7 +313,7 @@ void server_complete_request( auto msgptr(frame::mprpc::make_message(crtwriteidx)); ++crtwriteidx; pmprpcclient->sendRequest( - "localhost", msgptr, + {"localhost"}, msgptr, // on_receive_response ResponseHandler() /*[](frame::mprpc::ConnectionContext &_rctx, ResponsePointerT &_rmsgptr, ErrorConditionT const &_rerr)->void{ @@ -482,7 +482,7 @@ int test_clientserver_sendrequest(int argc, char* argv[]) auto msgptr(frame::mprpc::make_message(crtwriteidx)); ++crtwriteidx; mprpcclient.sendRequest( - "localhost", msgptr, + {"localhost"}, msgptr, // ResponseHandler() []( diff --git a/solid/frame/mprpc/test/test_clientserver_split.cpp b/solid/frame/mprpc/test/test_clientserver_split.cpp index 6d81a4dd..53d38dbf 100644 --- a/solid/frame/mprpc/test/test_clientserver_split.cpp +++ b/solid/frame/mprpc/test/test_clientserver_split.cpp @@ -283,7 +283,7 @@ void server_complete_message( solid_dbg(generic_logger, Info, crtreadidx); if (crtwriteidx < writecount) { err = pmprpcclient->sendMessage( - "localhost", frame::mprpc::make_message(crtwriteidx++), + {"localhost"}, frame::mprpc::make_message(crtwriteidx++), initarray[crtwriteidx % initarraysize].flags | frame::mprpc::MessageFlagsE::AwaitResponse); solid_check(!err, "Connection id should not be invalid! " << err.message()); } @@ -451,7 +451,7 @@ int test_clientserver_split(int argc, char* argv[]) for (; crtwriteidx < start_count;) { mprpcclient.sendMessage( - "localhost", frame::mprpc::make_message(crtwriteidx++), {frame::mprpc::MessageFlagsE::AwaitResponse}); + {"localhost"}, frame::mprpc::make_message(crtwriteidx++), {frame::mprpc::MessageFlagsE::AwaitResponse}); } unique_lock lock(mtx); diff --git a/solid/frame/mprpc/test/test_clientserver_topic.cpp b/solid/frame/mprpc/test/test_clientserver_topic.cpp index 1a9f6cc9..3874ed25 100644 --- a/solid/frame/mprpc/test/test_clientserver_topic.cpp +++ b/solid/frame/mprpc/test/test_clientserver_topic.cpp @@ -455,7 +455,7 @@ int test_clientserver_topic(int argc, char* argv[]) } err = mprpcclient.createConnectionPool( - "127.0.0.1", client_id, + {"127.0.0.1"}, client_id, [](frame::mprpc::ConnectionContext& _rctx, EventBase&& _revent, const ErrorConditionT& _rerr) { solid_dbg(logger, Info, "client pool event: " << _revent << " error: " << _rerr.message()); if (_revent == frame::mprpc::pool_event_connection_activate) { diff --git a/solid/frame/mprpc/test/test_clientserver_upload.cpp b/solid/frame/mprpc/test/test_clientserver_upload.cpp index 70092f6f..e9c94686 100644 --- a/solid/frame/mprpc/test/test_clientserver_upload.cpp +++ b/solid/frame/mprpc/test/test_clientserver_upload.cpp @@ -313,7 +313,7 @@ int test_clientserver_upload(int argc, char* argv[]) auto msg_ptr = frame::mprpc::make_message(f); msg_ptr->ifs_.open(string("client_storage/") + f); - mprpc_client.sendRequest("localhost", msg_ptr, on_client_receive_first_response); + mprpc_client.sendRequest({"localhost"}, msg_ptr, on_client_receive_first_response); } auto fut = prom.get_future(); diff --git a/solid/frame/mprpc/test/test_clientserver_upload_single.cpp b/solid/frame/mprpc/test/test_clientserver_upload_single.cpp index 0db7c365..6fb5e5e0 100644 --- a/solid/frame/mprpc/test/test_clientserver_upload_single.cpp +++ b/solid/frame/mprpc/test/test_clientserver_upload_single.cpp @@ -317,7 +317,7 @@ int test_clientserver_upload_single(int argc, char* argv[]) auto msg_ptr = frame::mprpc::make_message(f); msg_ptr->ifs_.open(string("client_storage/") + f); - mprpc_client.sendRequest("localhost", msg_ptr, on_client_first_response); + mprpc_client.sendRequest({"localhost"}, msg_ptr, on_client_first_response); } auto fut = prom.get_future(); diff --git a/solid/frame/mprpc/test/test_clientserver_versioning.cpp b/solid/frame/mprpc/test/test_clientserver_versioning.cpp index 724f3232..4fc09652 100644 --- a/solid/frame/mprpc/test/test_clientserver_versioning.cpp +++ b/solid/frame/mprpc/test/test_clientserver_versioning.cpp @@ -172,7 +172,7 @@ void send_request(frame::mprpc::ServiceT& _rsvc) { auto req_ptr = frame::mprpc::make_message(); _rsvc.sendRequest( - "localhost", req_ptr, + {"localhost"}, req_ptr, []( frame::mprpc::ConnectionContext& _rctx, RequestPointerT& _rreqmsgptr, @@ -244,7 +244,7 @@ void send_request(frame::mprpc::ServiceT& _rsvc) auto req_ptr = frame::mprpc::make_message(); req_ptr->value_ = 11; _rsvc.sendRequest( - "localhost", req_ptr, + {"localhost"}, req_ptr, []( frame::mprpc::ConnectionContext& _rctx, RequestPointerT& _rreqmsgptr, @@ -319,7 +319,7 @@ void send_request(frame::mprpc::ServiceT& _rsvc) auto req_ptr = frame::mprpc::make_message(); req_ptr->values_ = "test"; _rsvc.sendRequest( - "localhost", req_ptr, + {"localhost"}, req_ptr, []( frame::mprpc::ConnectionContext& _rctx, RequestPointerT& _rreqmsgptr, @@ -394,7 +394,7 @@ void send_request(frame::mprpc::ServiceT& _rsvc) { auto req_ptr = frame::mprpc::make_message(); _rsvc.sendRequest( - "localhost", req_ptr, + {"localhost"}, req_ptr, []( frame::mprpc::ConnectionContext& _rctx, Request2PointerT& _rreqmsgptr, diff --git a/solid/frame/mprpc/test/test_connection_close.cpp b/solid/frame/mprpc/test/test_connection_close.cpp index 430eaf60..c9eba4b4 100644 --- a/solid/frame/mprpc/test/test_connection_close.cpp +++ b/solid/frame/mprpc/test/test_connection_close.cpp @@ -385,7 +385,7 @@ int test_connection_close(int argc, char* argv[]) { MessagePointerT msgptr(frame::mprpc::make_message(0)); mprpcclient.sendMessage( - "localhost", msgptr, + {"localhost"}, msgptr, initarray[0].flags | frame::mprpc::MessageFlagsE::AwaitResponse); } diff --git a/solid/frame/mprpc/test/test_keepalive_fail.cpp b/solid/frame/mprpc/test/test_keepalive_fail.cpp index d9ccf1e1..4cbfc0ea 100644 --- a/solid/frame/mprpc/test/test_keepalive_fail.cpp +++ b/solid/frame/mprpc/test/test_keepalive_fail.cpp @@ -254,7 +254,7 @@ void server_receive_message(frame::mprpc::ConnectionContext& _rctx, MessagePoint MessagePointerT msgptr(frame::mprpc::make_message(crtwriteidx)); ++crtwriteidx; pmprpcclient->sendMessage( - "localhost", msgptr, + {"localhost"}, msgptr, initarray[crtwriteidx % initarraysize].flags | frame::mprpc::MessageFlagsE::AwaitResponse); } } @@ -391,7 +391,7 @@ int test_keepalive_fail(int argc, char* argv[]) MessagePointerT msgptr(frame::mprpc::make_message(crtwriteidx)); ++crtwriteidx; mprpcclient.sendMessage( - "localhost", msgptr, + {"localhost"}, msgptr, initarray[crtwriteidx % initarraysize].flags | frame::mprpc::MessageFlagsE::AwaitResponse); } diff --git a/solid/frame/mprpc/test/test_keepalive_success.cpp b/solid/frame/mprpc/test/test_keepalive_success.cpp index 7e61a60c..c2f585f0 100644 --- a/solid/frame/mprpc/test/test_keepalive_success.cpp +++ b/solid/frame/mprpc/test/test_keepalive_success.cpp @@ -367,7 +367,7 @@ int test_keepalive_success(int argc, char* argv[]) MessagePointerT msgptr(frame::mprpc::make_message(crtwriteidx)); ++crtwriteidx; mprpcclient.sendMessage( - "localhost", msgptr, + {"localhost"}, msgptr, initarray[crtwriteidx % initarraysize].flags | frame::mprpc::MessageFlagsE::AwaitResponse); } solid_dbg(generic_logger, Info, "before sleep"); @@ -379,7 +379,7 @@ int test_keepalive_success(int argc, char* argv[]) MessagePointerT msgptr(frame::mprpc::make_message(crtwriteidx)); ++crtwriteidx; mprpcclient.sendMessage( - "localhost", msgptr, + {"localhost"}, msgptr, initarray[crtwriteidx % initarraysize].flags | frame::mprpc::MessageFlagsE::AwaitResponse); } diff --git a/solid/frame/mprpc/test/test_pool_basic.cpp b/solid/frame/mprpc/test/test_pool_basic.cpp index cc5a63fe..5fd29ccd 100644 --- a/solid/frame/mprpc/test/test_pool_basic.cpp +++ b/solid/frame/mprpc/test/test_pool_basic.cpp @@ -414,7 +414,7 @@ int test_pool_basic(int argc, char* argv[]) writecount = initarraysize * 10; // start_count;// err = mprpcclient.createConnectionPool( - "localhost", client_id, + {"localhost"}, client_id, [](frame::mprpc::ConnectionContext& _rctx, EventBase&& _revent, const ErrorConditionT& _rerr) { solid_dbg(generic_logger, Warning, "client pool event: " << _revent << " error: " << _rerr.message()); }, diff --git a/solid/frame/mprpc/test/test_pool_delay_close.cpp b/solid/frame/mprpc/test/test_pool_delay_close.cpp index 20c99ca9..02fa4044 100644 --- a/solid/frame/mprpc/test/test_pool_delay_close.cpp +++ b/solid/frame/mprpc/test/test_pool_delay_close.cpp @@ -368,7 +368,7 @@ int test_pool_delay_close(int argc, char* argv[]) { ++crtwriteidx; mprpcclient.sendMessage( - "localhost", *it, recipinet_id, {frame::mprpc::MessageFlagsE::AwaitResponse}); + {"localhost"}, *it, recipinet_id, {frame::mprpc::MessageFlagsE::AwaitResponse}); } ++it; diff --git a/solid/frame/mprpc/test/test_pool_force_close.cpp b/solid/frame/mprpc/test/test_pool_force_close.cpp index fae1450b..176e21d8 100644 --- a/solid/frame/mprpc/test/test_pool_force_close.cpp +++ b/solid/frame/mprpc/test/test_pool_force_close.cpp @@ -337,7 +337,7 @@ int test_pool_force_close(int argc, char* argv[]) { ++crtwriteidx; mprpcclient.sendMessage( - "localhost", *it, recipinet_id, 0); + {"localhost"}, *it, recipinet_id, 0); } ++it; diff --git a/solid/frame/mprpc/test/test_raw_basic.cpp b/solid/frame/mprpc/test/test_raw_basic.cpp index e6aec2c1..b0b324fe 100644 --- a/solid/frame/mprpc/test/test_raw_basic.cpp +++ b/solid/frame/mprpc/test/test_raw_basic.cpp @@ -428,7 +428,7 @@ int test_raw_basic(int argc, char* argv[]) MessagePointerT msgptr(frame::mprpc::make_message(crtwriteidx)); ++crtwriteidx; mprpcclient.sendMessage( - "localhost", msgptr, + {"localhost"}, msgptr, initarray[crtwriteidx % initarraysize].flags | frame::mprpc::MessageFlagsE::AwaitResponse); } diff --git a/solid/frame/mprpc/test/test_raw_proxy.cpp b/solid/frame/mprpc/test/test_raw_proxy.cpp index e3237922..638f61c0 100644 --- a/solid/frame/mprpc/test/test_raw_proxy.cpp +++ b/solid/frame/mprpc/test/test_raw_proxy.cpp @@ -427,7 +427,7 @@ int test_raw_proxy(int argc, char* argv[]) MessagePointerT msgptr(frame::mprpc::make_message(crtwriteidx)); ++crtwriteidx; mprpcclient.sendMessage( - "localhost", msgptr, + {"localhost"}, msgptr, initarray[crtwriteidx % initarraysize].flags | frame::mprpc::MessageFlagsE::AwaitResponse); } diff --git a/solid/frame/mprpc/test/test_relay_basic.cpp b/solid/frame/mprpc/test/test_relay_basic.cpp index be7e4e80..2bb920fb 100644 --- a/solid/frame/mprpc/test/test_relay_basic.cpp +++ b/solid/frame/mprpc/test/test_relay_basic.cpp @@ -313,7 +313,7 @@ void peerb_complete_message( solid_dbg(generic_logger, Info, crtreadidx << " < " << writecount); if (crtwriteidx < writecount) { err = pmprpcpeera->sendMessage( - "localhost/b", frame::mprpc::make_message(crtwriteidx++), + {"localhost", 0}, frame::mprpc::make_message(crtwriteidx++), initarray[crtwriteidx % initarraysize].flags | frame::mprpc::MessageFlagsE::AwaitResponse); solid_check(!err, "Connection id should not be invalid! " << err.message()); @@ -553,12 +553,12 @@ int test_relay_basic(int argc, char* argv[]) writecount = initarraysize * 2; // start_count;// // ensure we have provisioned connections on peerb - err = mprpcpeerb.createConnectionPool("localhost"); + err = mprpcpeerb.createConnectionPool({"localhost"}); solid_check(!err, "failed create connection from peerb: " << err.message()); for (; crtwriteidx < start_count;) { mprpcpeera.sendMessage( - "localhost/b", frame::mprpc::make_message(crtwriteidx++), + {"localhost", 0}, frame::mprpc::make_message(crtwriteidx++), initarray[crtwriteidx % initarraysize].flags | frame::mprpc::MessageFlagsE::AwaitResponse); } diff --git a/solid/frame/mprpc/test/test_relay_cancel_request.cpp b/solid/frame/mprpc/test/test_relay_cancel_request.cpp index 190e887c..153adf44 100644 --- a/solid/frame/mprpc/test/test_relay_cancel_request.cpp +++ b/solid/frame/mprpc/test/test_relay_cancel_request.cpp @@ -360,7 +360,7 @@ void peerb_complete_message( auto& back_msg_id = msgid_vec.back(); mtx.unlock(); err = pmprpcpeera->sendMessage( - "localhost/b", frame::mprpc::make_message(crtwriteidx++), + {"localhost", 0}, frame::mprpc::make_message(crtwriteidx++), back_msg_id.first, back_msg_id.second, initarray[crtwriteidx % initarraysize].flags | frame::mprpc::MessageFlagsE::AwaitResponse); @@ -602,7 +602,7 @@ int test_relay_cancel_request(int argc, char* argv[]) writecount = initarraysize * 2; // start_count;// // ensure we have provisioned connections on peerb - err = mprpcpeerb.createConnectionPool("localhost"); + err = mprpcpeerb.createConnectionPool({"localhost"}); solid_check(!err, "failed create connection from peerb: " << err.message()); for (; crtwriteidx < start_count;) { @@ -611,7 +611,7 @@ int test_relay_cancel_request(int argc, char* argv[]) auto& back_msg_id = msgid_vec.back(); mtx.unlock(); mprpcpeera.sendMessage( - "localhost/b", frame::mprpc::make_message(crtwriteidx++), + {"localhost", 0}, frame::mprpc::make_message(crtwriteidx++), back_msg_id.first, back_msg_id.second, initarray[crtwriteidx % initarraysize].flags | frame::mprpc::MessageFlagsE::AwaitResponse); diff --git a/solid/frame/mprpc/test/test_relay_cancel_response.cpp b/solid/frame/mprpc/test/test_relay_cancel_response.cpp index 5037d7bd..64432702 100644 --- a/solid/frame/mprpc/test/test_relay_cancel_response.cpp +++ b/solid/frame/mprpc/test/test_relay_cancel_response.cpp @@ -357,7 +357,7 @@ void peerb_complete_message( auto& back_msg_id = msgid_vec.back(); mtx.unlock(); err = pmprpcpeera->sendMessage( - "localhost/b", frame::mprpc::make_message(crtwriteidx++), + {"localhost", 0}, frame::mprpc::make_message(crtwriteidx++), back_msg_id.first, back_msg_id.second, initarray[crtwriteidx % initarraysize].flags | frame::mprpc::MessageFlagsE::AwaitResponse); @@ -595,7 +595,7 @@ int test_relay_cancel_response(int argc, char* argv[]) writecount = 2 * initarraysize; // initarraysize * 2; //start_count;// // ensure we have provisioned connections on peerb - err = mprpcpeerb.createConnectionPool("localhost"); + err = mprpcpeerb.createConnectionPool({"localhost"}); solid_check(!err, "failed create connection from peerb: " << err.message()); for (; crtwriteidx < start_count;) { @@ -604,7 +604,7 @@ int test_relay_cancel_response(int argc, char* argv[]) auto& back_msg_id = msgid_vec.back(); mtx.unlock(); mprpcpeera.sendMessage( - "localhost/b", frame::mprpc::make_message(crtwriteidx++), + {"localhost", 0}, frame::mprpc::make_message(crtwriteidx++), back_msg_id.first, back_msg_id.second, initarray[crtwriteidx % initarraysize].flags | frame::mprpc::MessageFlagsE::AwaitResponse); diff --git a/solid/frame/mprpc/test/test_relay_close_request.cpp b/solid/frame/mprpc/test/test_relay_close_request.cpp index 7809719f..23ea7f3b 100644 --- a/solid/frame/mprpc/test/test_relay_close_request.cpp +++ b/solid/frame/mprpc/test/test_relay_close_request.cpp @@ -554,7 +554,7 @@ int test_relay_close_request(int argc, char* argv[]) writecount = initarraysize; // start_count;// // ensure we have provisioned connections on peerb - err = mprpcpeerb.createConnectionPool("localhost"); + err = mprpcpeerb.createConnectionPool({"localhost"}); solid_check(!err, "failed create connection from peerb: " << err.message()); for (; crtwriteidx < start_count;) { @@ -563,7 +563,7 @@ int test_relay_close_request(int argc, char* argv[]) auto& back_msg_id = msgid_vec.back(); mtx.unlock(); mprpcpeera.sendMessage( - "localhost/b", frame::mprpc::make_message(crtwriteidx++), + {"localhost", 0}, frame::mprpc::make_message(crtwriteidx++), back_msg_id.first, back_msg_id.second, initarray[crtwriteidx % initarraysize].flags | frame::mprpc::MessageFlagsE::AwaitResponse); diff --git a/solid/frame/mprpc/test/test_relay_close_response.cpp b/solid/frame/mprpc/test/test_relay_close_response.cpp index 8f1511c3..60882a15 100644 --- a/solid/frame/mprpc/test/test_relay_close_response.cpp +++ b/solid/frame/mprpc/test/test_relay_close_response.cpp @@ -560,7 +560,7 @@ int test_relay_close_response(int argc, char* argv[]) writecount = initarraysize; // initarraysize * 2; //start_count;// // ensure we have provisioned connections on peerb - err = mprpcpeerb.createConnectionPool("localhost"); + err = mprpcpeerb.createConnectionPool({"localhost"}); solid_check(!err, "failed create connection from peerb: " << err.message()); for (; crtwriteidx < start_count;) { @@ -569,7 +569,7 @@ int test_relay_close_response(int argc, char* argv[]) auto& back_msg_id = msgid_vec.back(); mtx.unlock(); mprpcpeera.sendMessage( - "localhost/b", frame::mprpc::make_message(crtwriteidx++), + {"localhost", 0}, frame::mprpc::make_message(crtwriteidx++), back_msg_id.first, back_msg_id.second, initarray[crtwriteidx % initarraysize].flags | frame::mprpc::MessageFlagsE::AwaitResponse); diff --git a/solid/frame/mprpc/test/test_relay_detect_close.cpp b/solid/frame/mprpc/test/test_relay_detect_close.cpp index b651d81f..4a30fe28 100644 --- a/solid/frame/mprpc/test/test_relay_detect_close.cpp +++ b/solid/frame/mprpc/test/test_relay_detect_close.cpp @@ -128,7 +128,7 @@ void peera_complete_message( solid_check(_rsent_msg_ptr, "Error: there should be a request message"); if (_rrecv_msg_ptr) { _rrecv_msg_ptr->clearHeader(); - _rctx.service().sendMessage("localhost/b", std::move(_rrecv_msg_ptr), {frame::mprpc::MessageFlagsE::AwaitResponse}); + _rctx.service().sendMessage({"localhost", 0}, std::move(_rrecv_msg_ptr), {frame::mprpc::MessageFlagsE::AwaitResponse}); } } @@ -425,13 +425,13 @@ int test_relay_detect_close(int argc, char* argv[]) } // ensure we have provisioned connections on peerb - err = mprpcpeerb.createConnectionPool("localhost"); + err = mprpcpeerb.createConnectionPool({"localhost"}); solid_check(!err, "failed create connection from peerb: " << err.message()); - mprpcpeera.sendMessage("localhost/b", frame::mprpc::make_message(), {frame::mprpc::MessageFlagsE::AwaitResponse}); + mprpcpeera.sendMessage({"localhost", 0}, frame::mprpc::make_message(), {frame::mprpc::MessageFlagsE::AwaitResponse}); mprpcpeera.sendMessage( - "localhost/b", frame::mprpc::make_message(), + {"localhost", 0}, frame::mprpc::make_message(), {frame::mprpc::MessageFlagsE::AwaitResponse}); unique_lock lock(mtx); diff --git a/solid/frame/mprpc/test/test_relay_detect_close_while_response.cpp b/solid/frame/mprpc/test/test_relay_detect_close_while_response.cpp index e101bbf5..91240dc8 100644 --- a/solid/frame/mprpc/test/test_relay_detect_close_while_response.cpp +++ b/solid/frame/mprpc/test/test_relay_detect_close_while_response.cpp @@ -185,7 +185,7 @@ void peera_complete_detect_close( if (_rrecv_msg_ptr) { solid_dbg(generic_logger, Info, _rctx.recipientId() << " peera received DetectCloseMessage " << _rrecv_msg_ptr->idx); solid_check(!_rrecv_msg_ptr->isResponseLast()); - pmprpcpeera->sendMessage("localhost/b", frame::mprpc::make_message(_rrecv_msg_ptr->idx, generate_big_data(1024 * 10))); + pmprpcpeera->sendMessage({"localhost", 0}, frame::mprpc::make_message(_rrecv_msg_ptr->idx, generate_big_data(1024 * 10))); } if (_rerror == frame::mprpc::error_message_canceled_peer) { @@ -464,11 +464,11 @@ int test_relay_detect_close_while_response(int argc, char* argv[]) } // ensure we have provisioned connections on peerb - err = mprpcpeerb.createConnectionPool("localhost"); + err = mprpcpeerb.createConnectionPool({"localhost"}); solid_check(!err, "failed create connection from peerb: " << err.message()); pmprpcpeera = &mprpcpeera; - mprpcpeera.sendMessage("localhost/b", frame::mprpc::make_message(), {frame::mprpc::MessageFlagsE::AwaitResponse}); + mprpcpeera.sendMessage({"localhost", 0}, frame::mprpc::make_message(), {frame::mprpc::MessageFlagsE::AwaitResponse}); unique_lock lock(mtx); diff --git a/solid/frame/mprpc/test/test_relay_disabled.cpp b/solid/frame/mprpc/test/test_relay_disabled.cpp index 1e3c29d7..82db0306 100644 --- a/solid/frame/mprpc/test/test_relay_disabled.cpp +++ b/solid/frame/mprpc/test/test_relay_disabled.cpp @@ -286,7 +286,7 @@ void peerb_complete_message( solid_dbg(generic_logger, Info, crtreadidx); if (crtwriteidx < writecount) { err = pmprpcpeera->sendMessage( - "localhost/b", frame::mprpc::make_message(crtwriteidx++), + {"localhost", 0}, frame::mprpc::make_message(crtwriteidx++), initarray[crtwriteidx % initarraysize].flags | frame::mprpc::MessageFlagsE::AwaitResponse); solid_check(!err, "Connection id should not be invalid! " << err.message()); @@ -504,7 +504,7 @@ int test_relay_disabled(int argc, char* argv[]) if (1) { for (; crtwriteidx < writecount;) { mprpcpeera.sendMessage( - "localhost/b", frame::mprpc::make_message(crtwriteidx++), + {"localhost", 0}, frame::mprpc::make_message(crtwriteidx++), initarray[crtwriteidx % initarraysize].flags | frame::mprpc::MessageFlagsE::AwaitResponse); } } diff --git a/solid/frame/mprpc/test/test_relay_split.cpp b/solid/frame/mprpc/test/test_relay_split.cpp index ccd8f494..337c9fbd 100644 --- a/solid/frame/mprpc/test/test_relay_split.cpp +++ b/solid/frame/mprpc/test/test_relay_split.cpp @@ -338,7 +338,7 @@ void peerb_complete_message( solid_dbg(generic_logger, Info, crtreadidx << " < " << writecount); if (crtwriteidx < writecount) { err = pmprpcpeera->sendMessage( - "localhost/b", frame::mprpc::make_message(crtwriteidx++), + {"localhost", 0}, frame::mprpc::make_message(crtwriteidx++), initarray[crtwriteidx % initarraysize].flags | frame::mprpc::MessageFlagsE::AwaitResponse); solid_check(!err, "Connection id should not be invalid! " << err.message()); @@ -578,12 +578,12 @@ int test_relay_split(int argc, char* argv[]) writecount = initarraysize; // initarraysize * 2; // // ensure we have provisioned connections on peerb - err = mprpcpeerb.createConnectionPool("localhost"); + err = mprpcpeerb.createConnectionPool({"localhost"}); solid_check(!err, "failed create connection from peerb: " << err.message()); for (; crtwriteidx < start_count;) { mprpcpeera.sendMessage( - "localhost/b", frame::mprpc::make_message(crtwriteidx++), + {"localhost", 0}, frame::mprpc::make_message(crtwriteidx++), initarray[crtwriteidx % initarraysize].flags | frame::mprpc::MessageFlagsE::AwaitResponse); } diff --git a/solid/utility/string.hpp b/solid/utility/string.hpp index 9d9a6250..4d52dc40 100644 --- a/solid/utility/string.hpp +++ b/solid/utility/string.hpp @@ -19,32 +19,6 @@ namespace solid { -struct CStringHash { - size_t operator()(const char* _s) const - { - size_t hash = 0; - - for (; *_s; ++_s) { - hash += *_s; - hash += (hash << 10); - hash ^= (hash >> 6); - } - - hash += (hash << 3); - hash ^= (hash >> 11); - hash += (hash << 15); - - return hash; - } -}; - -struct CStringEqual { - bool operator()(const char* _val1, const char* _val2) const - { - return ::strcmp(_val1, _val2) == 0; - } -}; - inline void to_lower(std::string& _rstr) { std::transform(_rstr.begin(), _rstr.end(), _rstr.begin(), [](unsigned char c) { return std::tolower(c); }); diff --git a/tutorials/mprpc_echo/mprpc_echo_client.cpp b/tutorials/mprpc_echo/mprpc_echo_client.cpp index df926ce6..378e715c 100644 --- a/tutorials/mprpc_echo/mprpc_echo_client.cpp +++ b/tutorials/mprpc_echo/mprpc_echo_client.cpp @@ -110,7 +110,7 @@ int main(int argc, char* argv[]) size_t offset = line.find(' '); if (offset != string::npos) { recipient = line.substr(0, offset); - rpcservice.sendMessage(recipient.c_str(), frame::mprpc::make_message(line.substr(offset + 1)), {frame::mprpc::MessageFlagsE::AwaitResponse}); + rpcservice.sendMessage({recipient}, frame::mprpc::make_message(line.substr(offset + 1)), {frame::mprpc::MessageFlagsE::AwaitResponse}); } else { cout << "No recipient specified. E.g:" << endl << "localhost:4444 Some text to send" << endl; diff --git a/tutorials/mprpc_echo/mprpc_echo_client_pool.cpp b/tutorials/mprpc_echo/mprpc_echo_client_pool.cpp index 7e4e6a7c..06cadf1c 100644 --- a/tutorials/mprpc_echo/mprpc_echo_client_pool.cpp +++ b/tutorials/mprpc_echo/mprpc_echo_client_pool.cpp @@ -104,7 +104,7 @@ int main(int argc, char* argv[]) frame::mprpc::RecipientId recipient_id; rpcservice.createConnectionPool( - p.server_addr.c_str(), + {p.server_addr}, recipient_id, [](frame::mprpc::ConnectionContext& _rctx, EventBase&& _revt, const ErrorConditionT& _rerr) { solid_log(generic_logger, Verbose, "Connection pool event: " << _revt); diff --git a/tutorials/mprpc_echo_relay/mprpc_echo_relay_client.cpp b/tutorials/mprpc_echo_relay/mprpc_echo_relay_client.cpp index 680d2ae9..14b272c8 100644 --- a/tutorials/mprpc_echo_relay/mprpc_echo_relay_client.cpp +++ b/tutorials/mprpc_echo_relay/mprpc_echo_relay_client.cpp @@ -136,7 +136,7 @@ int main(int argc, char* argv[]) rpcservice.start(std::move(cfg)); } - rpcservice.createConnectionPool(p.server_addr.c_str()); + rpcservice.createConnectionPool({p.server_addr}); while (true) { string line; @@ -150,7 +150,7 @@ int main(int argc, char* argv[]) size_t offset = line.find(' '); if (offset != string::npos) { recipient = p.server_addr + '/' + line.substr(0, offset); - rpcservice.sendMessage(recipient.c_str(), frame::mprpc::make_message(p.name, line.substr(offset + 1)), {frame::mprpc::MessageFlagsE::AwaitResponse}); + rpcservice.sendMessage({recipient}, frame::mprpc::make_message(p.name, line.substr(offset + 1)), {frame::mprpc::MessageFlagsE::AwaitResponse}); } else { cout << "No recipient name specified. E.g:" << endl << "alpha Some text to send" << endl; diff --git a/tutorials/mprpc_file/mprpc_file_client.cpp b/tutorials/mprpc_file/mprpc_file_client.cpp index 23efb7b4..4c2c2ce2 100644 --- a/tutorials/mprpc_file/mprpc_file_client.cpp +++ b/tutorials/mprpc_file/mprpc_file_client.cpp @@ -141,7 +141,7 @@ int main(int argc, char* argv[]) iss >> path; rpcservice.sendRequest( - recipient.c_str(), frame::mprpc::make_message(std::move(path)), + {recipient}, frame::mprpc::make_message(std::move(path)), []( frame::mprpc::ConnectionContext& _rctx, frame::mprpc::MessagePointerT& _rsent_msg_ptr, @@ -173,7 +173,7 @@ int main(int argc, char* argv[]) iss >> remote_path >> local_path; rpcservice.sendRequest( - recipient.c_str(), frame::mprpc::make_message(std::move(remote_path), std::move(local_path)), + {recipient}, frame::mprpc::make_message(std::move(remote_path), std::move(local_path)), []( frame::mprpc::ConnectionContext& _rctx, frame::mprpc::MessagePointerT& _rsent_msg_ptr, diff --git a/tutorials/mprpc_request/mprpc_request_client.cpp b/tutorials/mprpc_request/mprpc_request_client.cpp index b0689710..83870eb4 100644 --- a/tutorials/mprpc_request/mprpc_request_client.cpp +++ b/tutorials/mprpc_request/mprpc_request_client.cpp @@ -148,7 +148,7 @@ int main(int argc, char* argv[]) auto req_ptr = frame::mprpc::make_message(line.substr(offset + 1)); rpcservice.sendRequest( - recipient.c_str(), req_ptr, lambda, 0); + {recipient}, req_ptr, lambda, 0); } else { cout << "No recipient specified. E.g:" << endl << "localhost:4444 Some text to send" << endl; diff --git a/tutorials/mprpc_request_ssl/mprpc_request_client.cpp b/tutorials/mprpc_request_ssl/mprpc_request_client.cpp index 9a1ce092..427e5095 100644 --- a/tutorials/mprpc_request_ssl/mprpc_request_client.cpp +++ b/tutorials/mprpc_request_ssl/mprpc_request_client.cpp @@ -189,7 +189,7 @@ int main(int argc, char* argv[]) cout << endl; rpcservice.sendRequest( - recipient.c_str(), // frame::mprpc::make_message(line.substr(offset + 1)), + {recipient}, // frame::mprpc::make_message(line.substr(offset + 1)), req_ptr, lambda, 0); } else { cout << "No recipient specified. E.g:" << endl From 1d6f2d5a0eef3758ac7c545e45c0b16b5cace556 Mon Sep 17 00:00:00 2001 From: Valentin Palade Date: Mon, 29 Jan 2024 10:25:53 +0200 Subject: [PATCH 02/13] relay_multicast: setting the stage by introducing RecipientUrl which also` contains relay data --- solid/frame/mprpc/mprpcconfiguration.hpp | 2 - solid/frame/mprpc/mprpcmessage.hpp | 20 +- solid/frame/mprpc/mprpcrelayengine.hpp | 25 +- solid/frame/mprpc/mprpcrelayengines.hpp | 4 +- solid/frame/mprpc/mprpcservice.hpp | 161 ++++++------- solid/frame/mprpc/src/mprpcconfiguration.cpp | 22 -- solid/frame/mprpc/src/mprpcmessagereader.cpp | 2 +- solid/frame/mprpc/src/mprpcmessagewriter.cpp | 6 +- solid/frame/mprpc/src/mprpcmessagewriter.hpp | 2 +- solid/frame/mprpc/src/mprpcrelayengine.cpp | 8 +- solid/frame/mprpc/src/mprpcrelayengines.cpp | 73 +++--- solid/frame/mprpc/src/mprpcservice.cpp | 220 +++++++++--------- solid/frame/mprpc/src/mprpcutility.hpp | 14 ++ solid/frame/mprpc/test/test_pool_basic.cpp | 2 +- solid/frame/mprpc/test/test_relay_basic.cpp | 29 +-- .../mprpc/test/test_relay_cancel_request.cpp | 34 +-- .../mprpc/test/test_relay_cancel_response.cpp | 35 +-- .../mprpc/test/test_relay_close_request.cpp | 35 +-- .../mprpc/test/test_relay_close_response.cpp | 35 +-- .../mprpc/test/test_relay_detect_close.cpp | 35 +-- ...test_relay_detect_close_while_response.cpp | 35 +-- .../frame/mprpc/test/test_relay_disabled.cpp | 27 ++- solid/frame/mprpc/test/test_relay_split.cpp | 35 +-- .../mprpc_echo_relay_client.cpp | 32 +-- .../mprpc_echo_relay_register.hpp | 11 +- .../mprpc_echo_relay_server.cpp | 4 +- 26 files changed, 448 insertions(+), 460 deletions(-) diff --git a/solid/frame/mprpc/mprpcconfiguration.hpp b/solid/frame/mprpc/mprpcconfiguration.hpp index ba659545..63820757 100644 --- a/solid/frame/mprpc/mprpcconfiguration.hpp +++ b/solid/frame/mprpc/mprpcconfiguration.hpp @@ -288,7 +288,6 @@ using SendAllocateBufferFunctionT = solid_function_t(SharedBuffer( using RecvAllocateBufferFunctionT = solid_function_t(SharedBuffer(const uint32_t)); using CompressFunctionT = solid_function_t(size_t(char*, size_t, ErrorConditionT&)); using UncompressFunctionT = solid_function_t(size_t(char*, const char*, size_t, ErrorConditionT&)); -using ExtractRecipientNameFunctionT = solid_function_t(const char*(const char*, std::string&, std::string&)); using ConnectionEnterActiveCompleteFunctionT = solid_function_t(void(ConnectionContext&, ErrorConditionT const&)); using ConnectionPostCompleteFunctionT = solid_function_t(void(ConnectionContext&)); using ConnectionSendTimeoutSoftFunctionT = solid_function_t(void(ConnectionContext&)); @@ -352,7 +351,6 @@ class Configuration { uint8_t connection_send_buffer_start_capacity_kb = 0; uint8_t connection_send_buffer_max_capacity_kb = 64; uint16_t connection_relay_buffer_count = 8; - ExtractRecipientNameFunctionT extract_recipient_name_fnc; ConnectionStopFunctionT connection_stop_fnc; ConnectionOnEventFunctionT connection_on_event_fnc; ConnectionSendTimeoutSoftFunctionT connection_on_send_timeout_soft_ = [](ConnectionContext&) {}; diff --git a/solid/frame/mprpc/mprpcmessage.hpp b/solid/frame/mprpc/mprpcmessage.hpp index 2f3b2fa2..69a5da83 100644 --- a/solid/frame/mprpc/mprpcmessage.hpp +++ b/solid/frame/mprpc/mprpcmessage.hpp @@ -52,7 +52,7 @@ struct MessageRelayHeader { void clear() { - group_id_ = 0; + group_id_ = InvalidIndex(); replica_id_ = 0; } @@ -75,10 +75,10 @@ std::ostream& operator<<(std::ostream& _ros, const OptionalMessageRelayHeaderT& struct MessageHeader { using FlagsT = MessageFlagsValueT; - FlagsT flags_{0}; - RequestId sender_request_id_; - RequestId recipient_request_id_; - OptionalMessageRelayHeaderT relay_; + FlagsT flags_{0}; + RequestId sender_request_id_; + RequestId recipient_request_id_; + MessageRelayHeader relay_; static MessageFlagsT fetch_state_flags(const MessageFlagsT& _flags) { @@ -119,7 +119,7 @@ struct MessageHeader { flags_ = 0; sender_request_id_.clear(); recipient_request_id_.clear(); - relay_.reset(); + relay_.clear(); } SOLID_REFLECT_V1(_rr, _rthis, _rctx) @@ -132,8 +132,7 @@ struct MessageHeader { _rr.add(_rthis.sender_request_id_.index, _rctx, 4, "recipient_request_index"); _rr.add(_rthis.sender_request_id_.unique, _rctx, 5, "recipient_request_unique"); if (_rctx.message_flags.has(MessageFlagsE::Relayed)) { - solid_assert(_rthis.relay_.has_value()); - _rr.add(_rthis.relay_.value(), _rctx, 6, "relay"); + _rr.add(_rthis.relay_, _rctx, 6, "relay"); } } else { _rr.add(_rthis.flags_, _rctx, 1, "flags"); @@ -145,8 +144,7 @@ struct MessageHeader { [&_rthis](auto& _rr, auto& _rctx) { const MessageFlagsT flags(_rthis.flags_); if (flags.has(MessageFlagsE::Relayed)) { - solid_assert(_rthis.relay_.has_value()); - _rr.add(_rthis.relay_.value(), _rctx, 6, "relay"); + _rr.add(_rthis.relay_, _rctx, 6, "relay"); } }, _rctx); @@ -343,7 +341,7 @@ struct Message : IntrusiveCacheable { const auto& relay() const { - return header_.relay_.value(); + return header_.relay_; } void clearStateFlags() diff --git a/solid/frame/mprpc/mprpcrelayengine.hpp b/solid/frame/mprpc/mprpcrelayengine.hpp index 1dda3c3e..d7a7c59e 100644 --- a/solid/frame/mprpc/mprpcrelayengine.hpp +++ b/solid/frame/mprpc/mprpcrelayengine.hpp @@ -20,24 +20,27 @@ namespace mprpc { namespace relay { struct ConnectionStubBase { - ActorIdT id_; - std::string name_; - size_t next_ = InvalidIndex(); - size_t prev_ = InvalidIndex(); + ActorIdT id_; + uint32_t group_id_ = InvalidIndex(); + uint16_t replica_id_ = 0; + size_t next_ = InvalidIndex(); + size_t prev_ = InvalidIndex(); ConnectionStubBase() = default; - ConnectionStubBase(std::string&& _uname) - : name_(std::move(_uname)) + ConnectionStubBase(const uint32_t _group_id, const uint16_t _replica_id) + : group_id_(_group_id) + , replica_id_(_replica_id) { } void clear() { id_.clear(); - name_.clear(); - next_ = InvalidIndex(); - prev_ = InvalidIndex(); + group_id_ = InvalidIndex(); + replica_id_ = 0; + next_ = InvalidIndex(); + prev_ = InvalidIndex(); } }; @@ -101,8 +104,8 @@ class EngineCore : public RelayEngine { } private: - virtual void unregisterConnectionName(Proxy& _proxy, size_t _conidx) = 0; - virtual size_t registerConnection(Proxy& _proxy, std::string&& _uname) = 0; + virtual void unregisterConnectionName(Proxy& _proxy, size_t _conidx) = 0; + virtual size_t registerConnection(Proxy& _proxy, const uint32_t _group_id, const uint16_t _replica_id) = 0; private: using ExecuteFunctionT = solid_function_t(void(Proxy&)); diff --git a/solid/frame/mprpc/mprpcrelayengines.hpp b/solid/frame/mprpc/mprpcrelayengines.hpp index 575afc37..5778dcb3 100644 --- a/solid/frame/mprpc/mprpcrelayengines.hpp +++ b/solid/frame/mprpc/mprpcrelayengines.hpp @@ -26,11 +26,11 @@ class SingleNameEngine : public EngineCore { public: SingleNameEngine(Manager& _rm); ~SingleNameEngine(); - ErrorConditionT registerConnection(const ConnectionContext& _rconctx, std::string&& _uname); + ErrorConditionT registerConnection(const ConnectionContext& _rconctx, const uint32_t _group_id, const uint16_t _replica_id); private: void unregisterConnectionName(Proxy& _proxy, size_t _conidx) override; - size_t registerConnection(Proxy& _proxy, std::string&& _uname) override; + size_t registerConnection(Proxy& _proxy, const uint32_t _group_id, const uint16_t _replica_id) override; std::ostream& print(std::ostream& _ros, const ConnectionStubBase& _rcon) const override; }; diff --git a/solid/frame/mprpc/mprpcservice.hpp b/solid/frame/mprpc/mprpcservice.hpp index 2d3194aa..23c7787b 100644 --- a/solid/frame/mprpc/mprpcservice.hpp +++ b/solid/frame/mprpc/mprpcservice.hpp @@ -168,78 +168,60 @@ struct ServiceStatistic : solid::Statistic { std::ostream& print(std::ostream& _ros) const override; }; -class RecipientUrl { - using ImplOptionalRelayT = std::optional; - +class RecipientUrl final { + friend class Service; + using ImplOptionalRelayT = std::optional; + const RecipientId* pid_ = nullptr; + ConnectionContext* pctx_ = nullptr; const std::string_view* purl_ = nullptr; ImplOptionalRelayT relay_; public: using RelayT = MessageRelayHeader; using OptionalRelayT = ImplOptionalRelayT; - RecipientUrl( - const std::string_view& _url) - : purl_(&_url) + RecipientId const& _id) + : pid_(&_id) { } RecipientUrl( - const std::string_view& _url, const RelayT& _relay) + const std::string_view& _url) : purl_(&_url) - , relay_(_relay) { } -protected: RecipientUrl( - const RelayT& _relay) - : relay_(_relay) - { - } - RecipientUrl() = default; -}; - -class RecipientUri : public RecipientUrl { - const RecipientId* pid_ = nullptr; - ConnectionContext* pctx_ = nullptr; - -public: - RecipientUri( - RecipientId const& _id) - : pid_(&_id) - { - } - - RecipientUri( - const std::string_view _url) - : RecipientUrl(_url) - { - } - - RecipientUri( ConnectionContext& _rctx) : pctx_(&_rctx) { } - RecipientUri( + RecipientUrl( RecipientId const& _id, const RelayT& _relay) - : RecipientUrl(_relay) - , pid_(&_id) + : pid_(&_id) + , relay_(_relay) { } - RecipientUri( + RecipientUrl( const std::string_view& _url, const RelayT& _relay) - : RecipientUrl(_url, _relay) + : purl_(&_url) + , relay_(_relay) { } - RecipientUri( + RecipientUrl( ConnectionContext& _rctx, const RelayT& _relay) - : RecipientUrl(_relay) - , pctx_(&_rctx) + : pctx_(&_rctx) + , relay_(_relay) + { + } + +private: + RecipientUrl(RecipientId const& _id, const OptionalRelayT& _relay) + : pid_(&_id) + , relay_(_relay) { } }; @@ -293,32 +275,32 @@ class Service : public frame::Service { const ServiceStatistic& statistic() const; - ErrorConditionT createConnectionPool(const RecipientUrl& _recipient_url, const size_t _persistent_connection_count = 1); + ErrorConditionT createConnectionPool(const std::string_view& _url, const size_t _persistent_connection_count = 1); template ErrorConditionT createConnectionPool( - const RecipientUrl& _recipient_uri, - RecipientId& _rrecipient_id, - const F _event_fnc, - const size_t _persistent_connection_count = 1); + const std::string_view& _url, + RecipientId& _rrecipient_id, + const F _event_fnc, + const size_t _persistent_connection_count = 1); // send message using recipient name -------------------------------------- template ErrorConditionT sendMessage( - const RecipientUri& _recipient_uri, + const RecipientUrl& _recipient_url, MessagePointerT const& _rmsgptr, const MessageFlagsT& _flags = 0); template ErrorConditionT sendMessage( - const RecipientUri& _recipient_uri, + const RecipientUrl& _recipient_url, MessagePointerT const& _rmsgptr, RecipientId& _rrecipient_id, const MessageFlagsT& _flags = 0); template ErrorConditionT sendMessage( - const RecipientUri& _recipient_uri, + const RecipientUrl& _recipient_url, MessagePointerT const& _rmsgptr, RecipientId& _rrecipient_id, MessageId& _rmsg_id, @@ -334,7 +316,7 @@ class Service : public frame::Service { #endif template ErrorConditionT sendMessage( - const RecipientUri& _recipient_uri, + const RecipientUrl& _recipient_url, MessagePointerT const& _rmsgptr, MessageId& _rmsg_id, const MessageFlagsT& _flags = 0); @@ -343,14 +325,14 @@ class Service : public frame::Service { template ErrorConditionT sendRequest( - const RecipientUri& _recipient_uri, + const RecipientUrl& _recipient_url, MessagePointerT const& _rmsgptr, Fnc _complete_fnc, const MessageFlagsT& _flags = 0); template ErrorConditionT sendRequest( - const RecipientUri& _recipient_uri, + const RecipientUrl& _recipient_url, MessagePointerT const& _rmsgptr, Fnc _complete_fnc, RecipientId& _rrecipient_id, @@ -358,7 +340,7 @@ class Service : public frame::Service { template ErrorConditionT sendRequest( - const RecipientUri& _recipient_uri, + const RecipientUrl& _recipient_url, MessagePointerT const& _rmsgptr, Fnc _complete_fnc, RecipientId& _rrecipient_id, @@ -399,14 +381,14 @@ class Service : public frame::Service { template ErrorConditionT sendMessage( - const RecipientUri& _recipient_uri, + const RecipientUrl& _recipient_url, MessagePointerT const& _rmsgptr, Fnc _complete_fnc, const MessageFlagsT& _flags); template ErrorConditionT sendMessage( - const RecipientUri& _recipient_uri, + const RecipientUrl& _recipient_url, MessagePointerT const& _rmsgptr, Fnc _complete_fnc, RecipientId& _rrecipient_id, @@ -414,7 +396,7 @@ class Service : public frame::Service { template ErrorConditionT sendMessage( - const RecipientUri& _recipient_uri, + const RecipientUrl& _recipient_url, MessagePointerT const& _rmsgptr, Fnc _complete_fnc, RecipientId& _rrecipient_id, @@ -648,23 +630,22 @@ class Service : public frame::Service { void forwardResolveMessage(ConnectionPoolId const& _rconpoolid, EventBase& _revent); ErrorConditionT doSendMessage( - const RecipientUri& _recipient_uri, + const RecipientUrl& _recipient_url, MessagePointerT<>& _rmsgptr, MessageCompleteFunctionT& _rcomplete_fnc, RecipientId* _precipient_id_out, MessageId* _pmsg_id_out, const MessageFlagsT& _flags); -#if 0 - ErrorConditionT doSendMessage( - ConnectionContext& _rctx, + ErrorConditionT doSendMessageUsingConnectionContext( + const RecipientUrl& _recipient_url, MessagePointerT<>& _rmsgptr, MessageCompleteFunctionT& _rcomplete_fnc, RecipientId* _precipient_id_out, MessageId* _pmsg_id_out, MessageFlagsT _flags); -#endif + ErrorConditionT doCreateConnectionPool( - const RecipientUrl& _recipient_url, + std::string_view _url, RecipientId& _rrecipient_id_out, PoolOnEventFunctionT& _event_fnc, const size_t _persistent_connection_count); @@ -686,30 +667,30 @@ using ServiceT = frame::ServiceShell; //------------------------------------------------------------------------- template ErrorConditionT Service::sendMessage( - const RecipientUri& _recipient_uri, + const RecipientUrl& _recipient_url, MessagePointerT const& _rmsgptr, const MessageFlagsT& _flags) { auto msgptr(solid::static_pointer_cast(_rmsgptr)); MessageCompleteFunctionT complete_handler; - return doSendMessage(_recipient_uri, msgptr, complete_handler, nullptr, nullptr, _flags); + return doSendMessage(_recipient_url, msgptr, complete_handler, nullptr, nullptr, _flags); } //------------------------------------------------------------------------- template ErrorConditionT Service::sendMessage( - const RecipientUri& _recipient_uri, + const RecipientUrl& _recipient_url, MessagePointerT const& _rmsgptr, RecipientId& _rrecipient_id, const MessageFlagsT& _flags) { auto msgptr(solid::static_pointer_cast(_rmsgptr)); MessageCompleteFunctionT complete_handler; - return doSendMessage(_recipient_uri, msgptr, complete_handler, &_rrecipient_id, nullptr, _flags); + return doSendMessage(_recipient_url, msgptr, complete_handler, &_rrecipient_id, nullptr, _flags); } //------------------------------------------------------------------------- template ErrorConditionT Service::sendMessage( - const RecipientUri& _recipient_uri, + const RecipientUrl& _recipient_url, MessagePointerT const& _rmsgptr, RecipientId& _rrecipient_id, MessageId& _rmsg_id, @@ -717,7 +698,7 @@ ErrorConditionT Service::sendMessage( { auto msgptr(solid::static_pointer_cast(_rmsgptr)); MessageCompleteFunctionT complete_handler; - return doSendMessage(_recipient_uri, msgptr, complete_handler, &_rrecipient_id, &_rmsg_id, _flags); + return doSendMessage(_recipient_url, msgptr, complete_handler, &_rrecipient_id, &_rmsg_id, _flags); } // send message using connection uid ------------------------------------- #if 0 @@ -735,20 +716,20 @@ ErrorConditionT Service::sendMessage( //------------------------------------------------------------------------- template ErrorConditionT Service::sendMessage( - const RecipientUri& _recipient_uri, + const RecipientUrl& _recipient_url, MessagePointerT const& _rmsgptr, MessageId& _rmsg_id, const MessageFlagsT& _flags) { auto msgptr(solid::static_pointer_cast(_rmsgptr)); MessageCompleteFunctionT complete_handler; - return doSendMessage(_recipient_uri, msgptr, complete_handler, nullptr, &_rmsg_id, _flags); + return doSendMessage(_recipient_url, msgptr, complete_handler, nullptr, &_rmsg_id, _flags); } //------------------------------------------------------------------------- // send request using recipient name -------------------------------------- template ErrorConditionT Service::sendRequest( - const RecipientUri& _recipient_uri, + const RecipientUrl& _recipient_url, MessagePointerT const& _rmsgptr, Fnc _complete_fnc, const MessageFlagsT& _flags) @@ -761,12 +742,12 @@ ErrorConditionT Service::sendRequest( CompleteHandlerT fnc(std::forward(_complete_fnc)); MessageCompleteFunctionT complete_handler(std::move(fnc)); - return doSendMessage(_recipient_uri, msgptr, complete_handler, nullptr, nullptr, _flags | MessageFlagsE::AwaitResponse); + return doSendMessage(_recipient_url, msgptr, complete_handler, nullptr, nullptr, _flags | MessageFlagsE::AwaitResponse); } //------------------------------------------------------------------------- template ErrorConditionT Service::sendRequest( - const RecipientUri& _recipient_uri, + const RecipientUrl& _recipient_url, MessagePointerT const& _rmsgptr, Fnc _complete_fnc, RecipientId& _rrecipient_id, @@ -780,12 +761,12 @@ ErrorConditionT Service::sendRequest( CompleteHandlerT fnc(std::forward(_complete_fnc)); MessageCompleteFunctionT complete_handler(std::move(fnc)); - return doSendMessage(_recipient_uri, msgptr, complete_handler, &_rrecipient_id, nullptr, _flags | MessageFlagsE::AwaitResponse); + return doSendMessage(_recipient_url, msgptr, complete_handler, &_rrecipient_id, nullptr, _flags | MessageFlagsE::AwaitResponse); } //------------------------------------------------------------------------- template ErrorConditionT Service::sendRequest( - const RecipientUri& _recipient_uri, + const RecipientUrl& _recipient_url, MessagePointerT const& _rmsgptr, Fnc _complete_fnc, RecipientId& _rrecipient_id, @@ -800,7 +781,7 @@ ErrorConditionT Service::sendRequest( CompleteHandlerT fnc(std::forward(_complete_fnc)); MessageCompleteFunctionT complete_handler(std::move(fnc)); - return doSendMessage(_recipient_uri, msgptr, complete_handler, &_rrecipient_id, &_rmsguid, _flags | MessageFlagsE::AwaitResponse); + return doSendMessage(_recipient_url, msgptr, complete_handler, &_rrecipient_id, &_rmsguid, _flags | MessageFlagsE::AwaitResponse); } //------------------------------------------------------------------------- #if 0 @@ -874,7 +855,7 @@ ErrorConditionT Service::sendResponse( // send message with complete using recipient name ------------------------ template ErrorConditionT Service::sendMessage( - const RecipientUri& _recipient_uri, + const RecipientUrl& _recipient_url, MessagePointerT const& _rmsgptr, Fnc _complete_fnc, const MessageFlagsT& _flags) @@ -887,12 +868,12 @@ ErrorConditionT Service::sendMessage( CompleteHandlerT fnc(std::forward(_complete_fnc)); MessageCompleteFunctionT complete_handler(std::move(fnc)); - return doSendMessage(_recipient_uri, msgptr, complete_handler, nullptr, nullptr, _flags); + return doSendMessage(_recipient_url, msgptr, complete_handler, nullptr, nullptr, _flags); } //------------------------------------------------------------------------- template ErrorConditionT Service::sendMessage( - const RecipientUri& _recipient_uri, + const RecipientUrl& _recipient_url, MessagePointerT const& _rmsgptr, Fnc _complete_fnc, RecipientId& _rrecipient_id, @@ -906,12 +887,12 @@ ErrorConditionT Service::sendMessage( CompleteHandlerT fnc(std::forward(_complete_fnc)); MessageCompleteFunctionT complete_handler(std::move(fnc)); - return doSendMessage(_recipient_uri, msgptr, complete_handler, &_rrecipient_id, nullptr, _flags); + return doSendMessage(_recipient_url, msgptr, complete_handler, &_rrecipient_id, nullptr, _flags); } //------------------------------------------------------------------------- template ErrorConditionT Service::sendMessage( - const RecipientUri& _recipient_uri, + const RecipientUrl& _recipient_url, MessagePointerT const& _rmsgptr, Fnc _complete_fnc, RecipientId& _rrecipient_id, @@ -926,7 +907,7 @@ ErrorConditionT Service::sendMessage( CompleteHandlerT fnc(std::forward(_complete_fnc)); MessageCompleteFunctionT complete_handler(std::move(fnc)); - return doSendMessage(_recipient_uri, msgptr, complete_handler, &_rrecipient_id, &_rmsguid, _flags); + return doSendMessage(_recipient_url, msgptr, complete_handler, &_rrecipient_id, &_rmsguid, _flags); } //------------------------------------------------------------------------- // send message with complete using connection uid ------------------------ @@ -1120,22 +1101,22 @@ ErrorConditionT Service::connectionNotifyRecvSomeRawData( return doConnectionNotifyRecvRawData(_rrecipient_id, std::move(complete_fnc)); } //------------------------------------------------------------------------- -inline ErrorConditionT Service::createConnectionPool(const RecipientUrl& _recipient_url, const size_t _persistent_connection_count) +inline ErrorConditionT Service::createConnectionPool(const std::string_view& _url, const size_t _persistent_connection_count) { RecipientId recipient_id; PoolOnEventFunctionT fnc([](ConnectionContext& _rctx, EventBase&&, const ErrorConditionT&) {}); - return doCreateConnectionPool(_recipient_url, recipient_id, fnc, _persistent_connection_count); + return doCreateConnectionPool(_url, recipient_id, fnc, _persistent_connection_count); } //------------------------------------------------------------------------- template ErrorConditionT Service::createConnectionPool( - const RecipientUrl& _recipient_url, - RecipientId& _rrecipient_id, - const F _event_fnc, - const size_t _persistent_connection_count) + const std::string_view& _url, + RecipientId& _rrecipient_id, + const F _event_fnc, + const size_t _persistent_connection_count) { PoolOnEventFunctionT fnc(_event_fnc); - return doCreateConnectionPool(_recipient_url, _rrecipient_id, fnc, _persistent_connection_count); + return doCreateConnectionPool(_url, _rrecipient_id, fnc, _persistent_connection_count); } //------------------------------------------------------------------------- //------------------------------------------------------------------------- diff --git a/solid/frame/mprpc/src/mprpcconfiguration.cpp b/solid/frame/mprpc/src/mprpcconfiguration.cpp index cd5284f5..348c5488 100644 --- a/solid/frame/mprpc/src/mprpcconfiguration.cpp +++ b/solid/frame/mprpc/src/mprpcconfiguration.cpp @@ -110,26 +110,6 @@ SocketStubPtrT default_create_server_socket(Configuration const& _rcfg, frame::a return plain::create_server_socket(_rcfg, _rproxy, std::move(_usd), _emplace_buf); } -const char* default_extract_recipient_name(const char* _purl, std::string& _msgurl, std::string& _tmpstr) -{ - solid_assert_log(_purl != nullptr, service_logger()); - - const char* const p = strchr(_purl, '/'); - - if (p == nullptr) { - return _purl; - } - _msgurl = (p + 1); - _tmpstr.assign(_purl, p - _purl); - return _tmpstr.c_str(); -} - -// const char* default_fast_extract_recipient_name(const char* _purl, std::string& _msgurl, std::string& _tmpstr) -// { -// -// return _purl; -// } - bool default_setup_socket_device(SocketDevice& _rsd) { _rsd.enableNoDelay(); @@ -259,8 +239,6 @@ void Configuration::init() server.socket_device_setup_fnc = &default_setup_socket_device; client.socket_device_setup_fnc = &default_setup_socket_device; - extract_recipient_name_fnc = &default_extract_recipient_name; - pool_max_active_connection_count = 1; pool_max_pending_connection_count = 1; pool_max_message_queue_size = 1024; diff --git a/solid/frame/mprpc/src/mprpcmessagereader.cpp b/solid/frame/mprpc/src/mprpcmessagereader.cpp index fdef28c1..a61be0bf 100644 --- a/solid/frame/mprpc/src/mprpcmessagereader.cpp +++ b/solid/frame/mprpc/src/mprpcmessagereader.cpp @@ -242,7 +242,7 @@ bool MessageReader::doConsumeMessageHeader( solid_log(logger, Info, "Relayed response"); rmsgstub.state_ = MessageStub::StateE::RelayResponse; cache(rmsgstub.deserializer_ptr_); - } else if (!_receiver.isRelayEnabled() || !rmsgstub.message_header_.relay_.has_value()) { + } else if (!_receiver.isRelayEnabled() || !Message::is_relayed(rmsgstub.message_header_.flags_)) { solid_log(logger, Info, "Read Body"); rmsgstub.state_ = MessageStub::StateE::ReadBodyStart; rmsgstub.deserializer_ptr_->clear(); diff --git a/solid/frame/mprpc/src/mprpcmessagewriter.cpp b/solid/frame/mprpc/src/mprpcmessagewriter.cpp index ef81fdc2..3224ea70 100644 --- a/solid/frame/mprpc/src/mprpcmessagewriter.cpp +++ b/solid/frame/mprpc/src/mprpcmessagewriter.cpp @@ -141,7 +141,7 @@ bool MessageWriter::enqueue( order_inner_list_.pushBack(idx); doWriteQueuePushBack(idx, __LINE__); - solid_log(logger, Verbose, "is_relayed = " << Message::is_relayed(rmsgstub.msgbundle_.message_ptr->flags()) << ' ' << MessageWriterPrintPairT(*this, PrintInnerListsE)); + solid_log(logger, Verbose, "is_relayed = " << Message::is_relayed(rmsgstub.msgbundle_.message_ptr->flags()) << ' ' << MessageWriterPrintPairT(*this, PrintInnerListsE) << " relay " << rmsgstub.msgbundle_.message_relay_header_ << " " << _rmsgbundle.message_relay_header_); return true; } @@ -679,6 +679,7 @@ char* MessageWriter::doWriteMessageHead( _rsender.context().message_flags.set(MessageFlagsE::Relayed); _rsender.context().pmessage_relay_header_ = &rmsgstub.msgbundle_.message_relay_header_.value(); } else { + _rsender.context().message_flags.reset(MessageFlagsE::Relayed); _rsender.context().pmessage_relay_header_ = nullptr; } @@ -730,6 +731,7 @@ char* MessageWriter::doWriteMessageBody( _rsender.context().message_flags.set(MessageFlagsE::Relayed); _rsender.context().pmessage_relay_header_ = &rmsgstub.msgbundle_.message_relay_header_.value(); } else { + _rsender.context().message_flags.reset(MessageFlagsE::Relayed); _rsender.context().pmessage_relay_header_ = nullptr; } @@ -788,7 +790,7 @@ char* MessageWriter::doWriteRelayedHead( _rsender.context().request_id.unique = rmsgstub.unique_; _rsender.context().message_flags = rmsgstub.prelay_data_->pmessage_header_->flags_; _rsender.context().message_flags.set(MessageFlagsE::Relayed); - _rsender.context().pmessage_relay_header_ = &rmsgstub.prelay_data_->pmessage_header_->relay_.value(); + _rsender.context().pmessage_relay_header_ = &rmsgstub.prelay_data_->pmessage_header_->relay_; const ptrdiff_t rv = rmsgstub.state_ == MessageStub::StateE::RelayedHeadStart ? rmsgstub.serializer_ptr_->run(_rsender.context(), _pbufpos, _pbufend - _pbufpos, *rmsgstub.prelay_data_->pmessage_header_) : rmsgstub.serializer_ptr_->run(_rsender.context(), _pbufpos, _pbufend - _pbufpos); rmsgstub.state_ = MessageStub::StateE::RelayedHeadContinue; diff --git a/solid/frame/mprpc/src/mprpcmessagewriter.hpp b/solid/frame/mprpc/src/mprpcmessagewriter.hpp index f162de36..c4ba3221 100644 --- a/solid/frame/mprpc/src/mprpcmessagewriter.hpp +++ b/solid/frame/mprpc/src/mprpcmessagewriter.hpp @@ -386,7 +386,7 @@ inline bool MessageWriter::MessageStub::isStop() const noexcept //----------------------------------------------------------------------------- inline bool MessageWriter::MessageStub::isRelay() const noexcept { - return msgbundle_.message_relay_header_.has_value(); //|| Message::is_relayed(msgbundle_.message_flags); // TODO: optimize!! + return msgbundle_.message_relay_header_.has_value() || Message::is_relayed(msgbundle_.message_flags); // TODO: optimize!! } //----------------------------------------------------------------------------- inline bool MessageWriter::MessageStub::isRelayed() const noexcept diff --git a/solid/frame/mprpc/src/mprpcrelayengine.cpp b/solid/frame/mprpc/src/mprpcrelayengine.cpp index 693c3532..a4238ece 100644 --- a/solid/frame/mprpc/src/mprpcrelayengine.cpp +++ b/solid/frame/mprpc/src/mprpcrelayengine.cpp @@ -194,8 +194,8 @@ struct ConnectionStub : ConnectionStubBase { { } - ConnectionStub(MessageDequeT& _rmsg_dq, std::string&& _uname) - : ConnectionStubBase(std::move(_uname)) + ConnectionStub(MessageDequeT& _rmsg_dq, const uint32_t _group_id, const uint16_t _replica_id) + : ConnectionStubBase(_group_id, _replica_id) , unique_(0) , pdone_relay_data_top_(nullptr) , send_msg_list_(_rmsg_dq) @@ -497,7 +497,7 @@ size_t EngineCore::doRegisterUnnamedConnection(const ActorIdT& _rcon_uid, Unique size_t EngineCore::doRegisterNamedConnection(MessageRelayHeader&& _relay) { Proxy proxy(*this); - size_t conidx = 0; // TODO:relay: registerConnection(proxy, std::move(_uname)); + size_t conidx = registerConnection(proxy, _relay.group_id_, _relay.replica_id_); solid_log(logger, Info, conidx << ' ' << plot(impl_->con_dq_[conidx])); return conidx; } @@ -536,7 +536,7 @@ bool EngineCore::doRelayStart( _rrelay_id = MessageId(msgidx, rmsg.unique_); - const size_t rcv_conidx = doRegisterNamedConnection(std::move(rmsg.header_.relay_.value())); + const size_t rcv_conidx = doRegisterNamedConnection(std::move(rmsg.header_.relay_)); ConnectionStub& rrcvcon = impl_->con_dq_[rcv_conidx]; ConnectionStub& rsndcon = impl_->con_dq_[snd_conidx]; diff --git a/solid/frame/mprpc/src/mprpcrelayengines.cpp b/solid/frame/mprpc/src/mprpcrelayengines.cpp index 9f8bd70d..9a281b09 100644 --- a/solid/frame/mprpc/src/mprpcrelayengines.cpp +++ b/solid/frame/mprpc/src/mprpcrelayengines.cpp @@ -30,7 +30,7 @@ namespace mprpc { namespace relay { //----------------------------------------------------------------------------- namespace { -using ConnectionMapT = std::unordered_map; +using ConnectionMapT = std::unordered_map; } // namespace struct SingleNameEngine::Data { @@ -46,55 +46,57 @@ SingleNameEngine::~SingleNameEngine() { } //----------------------------------------------------------------------------- -ErrorConditionT SingleNameEngine::registerConnection(const ConnectionContext& _rconctx, std::string&& _uname) +ErrorConditionT SingleNameEngine::registerConnection(const ConnectionContext& _rconctx, const uint32_t _group_id, const uint16_t _replica_id) { - solid_assert_log(!_uname.empty(), logger); ErrorConditionT err; - auto lambda = [&_uname, this, &_rconctx /*, &err*/](EngineCore::Proxy& _proxy) { - size_t conidx = static_cast(_rconctx.relayId().index); - size_t nameidx = InvalidIndex(); + auto lambda = [_group_id, _replica_id, this, &_rconctx /*, &err*/](EngineCore::Proxy& _proxy) { + size_t conidx = static_cast(_rconctx.relayId().index); + size_t idx = InvalidIndex(); { - const auto it = impl_->con_umap_.find(_uname.c_str()); + const auto it = impl_->con_umap_.find(_group_id); if (it != impl_->con_umap_.end()) { - nameidx = it->second; + idx = it->second; } } if (conidx == InvalidIndex()) { - if (nameidx == InvalidIndex()) { + if (idx == InvalidIndex()) { // do full registration - conidx = _proxy.createConnection(); - ConnectionStubBase& rcon = _proxy.connection(conidx); - rcon.name_ = std::move(_uname); - impl_->con_umap_[rcon.name_.c_str()] = conidx; + conidx = _proxy.createConnection(); + ConnectionStubBase& rcon = _proxy.connection(conidx); + rcon.group_id_ = _group_id; + rcon.replica_id_ = _replica_id; + impl_->con_umap_[rcon.group_id_] = conidx; } else { - if (_proxy.connection(nameidx).id_.isInvalid() || _proxy.connection(nameidx).id_ == _rconctx.connectionId()) { + if (_proxy.connection(idx).id_.isInvalid() || _proxy.connection(idx).id_ == _rconctx.connectionId()) { // use the connection already registered by name - conidx = nameidx; + conidx = idx; } else { // for now the most basic option - replace the existing connection with new one // TODO: add support for multiple chained connections, sharing the same name - impl_->con_umap_.erase(_proxy.connection(nameidx).name_.c_str()); - _proxy.connection(nameidx).name_.clear(); - conidx = _proxy.createConnection(); - ConnectionStubBase& rcon = _proxy.connection(conidx); - rcon.name_ = std::move(_uname); - impl_->con_umap_[rcon.name_.c_str()] = conidx; + impl_->con_umap_.erase(_proxy.connection(idx).group_id_); + _proxy.connection(idx).group_id_ = InvalidIndex(); + conidx = _proxy.createConnection(); + ConnectionStubBase& rcon = _proxy.connection(conidx); + rcon.group_id_ = _group_id; + rcon.replica_id_ = _replica_id; + impl_->con_umap_[rcon.group_id_] = conidx; } } - } else if (nameidx != InvalidIndex()) { + } else if (idx != InvalidIndex()) { // conflicting situation // - the connection was used for sending relayed messages - thus was registered without a name // - also the name was associated to another connection stub _proxy.stopConnection(conidx); - conidx = nameidx; + conidx = idx; } else { // simply register the name for existing connection - ConnectionStubBase& rcon = _proxy.connection(conidx); - rcon.name_ = std::move(_uname); - impl_->con_umap_[rcon.name_.c_str()] = conidx; + ConnectionStubBase& rcon = _proxy.connection(conidx); + rcon.group_id_ = _group_id; + rcon.replica_id_ = _replica_id; + impl_->con_umap_[rcon.group_id_] = conidx; } ConnectionStubBase& rcon = _proxy.connection(conidx); @@ -104,8 +106,6 @@ ErrorConditionT SingleNameEngine::registerConnection(const ConnectionContext& _r solid_check_log(_proxy.notifyConnection(_proxy.connection(conidx).id_, RelayEngineNotification::NewData), logger, "Connection should be alive"); }; - to_lower(_uname); - execute(lambda); return err; @@ -113,28 +113,29 @@ ErrorConditionT SingleNameEngine::registerConnection(const ConnectionContext& _r //----------------------------------------------------------------------------- void SingleNameEngine::unregisterConnectionName(Proxy& _proxy, size_t _conidx) /*override*/ { - impl_->con_umap_.erase(_proxy.connection(_conidx).name_.c_str()); + impl_->con_umap_.erase(_proxy.connection(_conidx).group_id_); } //----------------------------------------------------------------------------- -size_t SingleNameEngine::registerConnection(Proxy& _proxy, std::string&& _uname) /*override*/ +size_t SingleNameEngine::registerConnection(Proxy& _proxy, const uint32_t _group_id, const uint16_t _replica_id) /*override*/ { size_t conidx = InvalidIndex(); - const auto it = impl_->con_umap_.find(_uname.c_str()); + const auto it = impl_->con_umap_.find(_group_id); if (it != impl_->con_umap_.end()) { conidx = it->second; } else { - conidx = _proxy.createConnection(); - ConnectionStubBase& rcon = _proxy.connection(conidx); - rcon.name_ = std::move(_uname); - impl_->con_umap_[rcon.name_.c_str()] = conidx; + conidx = _proxy.createConnection(); + ConnectionStubBase& rcon = _proxy.connection(conidx); + rcon.group_id_ = _group_id; + rcon.replica_id_ = _replica_id; + impl_->con_umap_[rcon.group_id_] = conidx; } return conidx; } //----------------------------------------------------------------------------- std::ostream& SingleNameEngine::print(std::ostream& _ros, const ConnectionStubBase& _rcon) const /*override*/ { - return _ros << "con.id = " << _rcon.id_ << " con.name = " << _rcon.name_; + return _ros << "con.id = " << _rcon.id_ << " con = " << _rcon.group_id_ << ", " << _rcon.replica_id_; } //----------------------------------------------------------------------------- } // namespace relay diff --git a/solid/frame/mprpc/src/mprpcservice.cpp b/solid/frame/mprpc/src/mprpcservice.cpp index d8ba0d65..de2407a6 100644 --- a/solid/frame/mprpc/src/mprpcservice.cpp +++ b/solid/frame/mprpc/src/mprpcservice.cpp @@ -126,6 +126,18 @@ struct MessageStub : inner::Node { { } + MessageStub( + MessagePointerT<>&& _rmsgptr, + const size_t _msg_type_idx, + MessageCompleteFunctionT& _rcomplete_fnc, + ulong _msgflags, + const OptionalMessageRelayHeaderT& _relay) + : message_bundle_(std::move(_rmsgptr), _msg_type_idx, _msgflags, _rcomplete_fnc, _relay) + , unique_(0) + , flags_(0) + { + } + MessageStub( MessageStub&& _rmsg) noexcept : inner::Node(std::move(_rmsg)) @@ -290,11 +302,11 @@ struct ConnectionPoolStub : inner::Node& _rmsgptr, - const size_t _msg_type_idx, - MessageCompleteFunctionT& _rcomplete_fnc, - const MessageFlagsT& _flags, - OptionalMessageRelayHeaderT&& _relay) + MessagePointerT<>& _rmsgptr, + const size_t _msg_type_idx, + MessageCompleteFunctionT& _rcomplete_fnc, + const MessageFlagsT& _flags, + const OptionalMessageRelayHeaderT& _relay) { size_t idx; @@ -307,7 +319,7 @@ struct ConnectionPoolStub : inner::Node& _rmsgptr, - const size_t _msg_type_idx, - MessageCompleteFunctionT& _rcomplete_fnc, - const MessageFlagsT& _flags, - OptionalMessageRelayHeaderT&& _relay, - bool& _ris_first) + MessagePointerT<>& _rmsgptr, + const size_t _msg_type_idx, + MessageCompleteFunctionT& _rcomplete_fnc, + const MessageFlagsT& _flags, + const OptionalMessageRelayHeaderT& _relay, + bool& _ris_first) { - const MessageId msgid = insertMessage(_rmsgptr, _msg_type_idx, _rcomplete_fnc, _flags, std::move(_relay)); + const MessageId msgid = insertMessage(_rmsgptr, _msg_type_idx, _rcomplete_fnc, _flags, _relay); _ris_first = message_order_inner_list_.empty(); @@ -340,13 +352,13 @@ struct ConnectionPoolStub : inner::Node& _rmsgptr, - const size_t _msg_type_idx, - MessageCompleteFunctionT& _rcomplete_fnc, - const MessageFlagsT& _flags, - OptionalMessageRelayHeaderT&& _relay) + MessagePointerT<>& _rmsgptr, + const size_t _msg_type_idx, + MessageCompleteFunctionT& _rcomplete_fnc, + const MessageFlagsT& _flags, + const OptionalMessageRelayHeaderT& _relay) { - const MessageId msgid = insertMessage(_rmsgptr, _msg_type_idx, _rcomplete_fnc, _flags, std::move(_relay)); + const MessageId msgid = insertMessage(_rmsgptr, _msg_type_idx, _rcomplete_fnc, _flags, _relay); message_order_inner_list_.pushFront(msgid.index); @@ -624,8 +636,8 @@ struct Service::Data { NameMapT name_map_; ConnectionPoolDequeT pool_dq_; ConnectionPoolInnerListT pool_free_list_; - std::string tmp_str_; - ServiceStatistic statistic_; + // std::string tmp_str_; + ServiceStatistic statistic_; Data(Service& _rsvc, Configuration&& _config) : rmutex_(_rsvc.mutex()) @@ -661,13 +673,13 @@ struct Service::Data { } ErrorConditionT doSendMessageToConnection( - Service& _rsvc, - const RecipientId& _rrecipient_id_in, - MessagePointerT<>& _rmsgptr, - MessageCompleteFunctionT& _rcomplete_fnc, - MessageId* _pmsg_id_out, - MessageFlagsT _flags, - OptionalMessageRelayHeaderT&& _relay); + Service& _rsvc, + const RecipientId& _rrecipient_id_in, + MessagePointerT<>& _rmsgptr, + MessageCompleteFunctionT& _rcomplete_fnc, + MessageId* _pmsg_id_out, + MessageFlagsT _flags, + const OptionalMessageRelayHeaderT& _relay); bool doTryCreateNewConnectionForPool(Service& _rsvc, const size_t _pool_index, ErrorConditionT& _rerror); @@ -738,18 +750,18 @@ struct Service::Data { MessageBundle& _rmsgbundle, MessageId const& _rmsgid); ErrorConditionT doLockPool( - Service& _rsvc, const bool _check_uid, const char* _recipient_name, + Service& _rsvc, const bool _check_uid, const string_view& _url, ConnectionPoolId& _rpool_id, unique_lock& _rlock); bool doTryNotifyPoolWaitingConnection(Service& _rsvc, const size_t _pool_index); ErrorConditionT doSendMessageToPool( Service& _rsvc, const ConnectionPoolId& _rpool_id, MessagePointerT<>& _rmsgptr, - MessageCompleteFunctionT& _rcomplete_fnc, - const size_t _msg_type_idx, - OptionalMessageRelayHeaderT&& _relay, - RecipientId* _precipient_id_out, - MessageId* _pmsgid_out, - const MessageFlagsT& _flags); + MessageCompleteFunctionT& _rcomplete_fnc, + const size_t _msg_type_idx, + const OptionalMessageRelayHeaderT& _relay, + RecipientId* _precipient_id_out, + MessageId* _pmsgid_out, + const MessageFlagsT& _flags); }; //============================================================================= @@ -915,17 +927,18 @@ struct OnRelsolveF { //----------------------------------------------------------------------------- ErrorConditionT Service::doCreateConnectionPool( - const RecipientUrl& _recipient_url, + std::string_view _url, RecipientId& _rrecipient_id_out, PoolOnEventFunctionT& _event_fnc, const size_t _persistent_connection_count) { -#if 0 - static constexpr const char* empty_recipient_name = ":"; - std::string message_url; - shared_ptr locked_pimpl; - ConnectionPoolId pool_id; - const char* recipient_name = empty_recipient_name; + + static constexpr const string_view empty_recipient_name = ":"; + shared_ptr locked_pimpl; + ConnectionPoolId pool_id; + if (_url.empty()) { + _url = empty_recipient_name; + } { unique_lock lock; @@ -937,23 +950,14 @@ ErrorConditionT Service::doCreateConnectionPool( return error_service_stopping; } - recipient_name = configuration().extract_recipient_name_fnc(_recipient_url.data(), message_url, locked_pimpl->tmp_str_); - - if (recipient_name == nullptr) { - solid_log(logger, Error, this << " failed extracting recipient name"); - return error_service_invalid_url; - } else if (recipient_name[0] == '\0') { - recipient_name = empty_recipient_name; - } - - if (pimpl_->name_map_.find(recipient_name) == pimpl_->name_map_.end()) { + if (pimpl_->name_map_.find(_url) == pimpl_->name_map_.end()) { // pool does not exist if (!pimpl_->pool_free_list_.empty()) { const auto pool_index{locked_pimpl->pool_free_list_.popFront()}; ConnectionPoolStub& rpool(pimpl_->pool_dq_[pool_index]); pool_id = ConnectionPoolId{pool_index, rpool.unique_}; - rpool.name_ = recipient_name; + rpool.name_ = _url; locked_pimpl->name_map_[rpool.name_.c_str()] = pool_id; } else { return error_service_connection_pool_count; @@ -964,7 +968,7 @@ ErrorConditionT Service::doCreateConnectionPool( } unique_lock pool_lock; - auto error = locked_pimpl->doLockPool(*this, false, recipient_name, pool_id, pool_lock); + auto error = locked_pimpl->doLockPool(*this, false, _url, pool_id, pool_lock); if (!error) { } else { return error; @@ -981,13 +985,11 @@ ErrorConditionT Service::doCreateConnectionPool( _rrecipient_id_out.pool_id_ = pool_id; return error; -#endif - return {}; } //----------------------------------------------------------------------------- ErrorConditionT Service::Data::doLockPool( - Service& _rsvc, const bool _check_uid, const char* _recipient_name, + Service& _rsvc, const bool _check_uid, const string_view& _url, ConnectionPoolId& _rpool_id, unique_lock& _rlock) { while (true) { @@ -1008,7 +1010,7 @@ ErrorConditionT Service::Data::doLockPool( { lock_guard lock{rmutex_}; - NameMapT::const_iterator it = name_map_.find(_recipient_name); + NameMapT::const_iterator it = name_map_.find(_url); if (it != name_map_.end()) { _rpool_id = it->second; @@ -1018,7 +1020,7 @@ ErrorConditionT Service::Data::doLockPool( ConnectionPoolStub& rpool(pool_dq_[pool_index]); _rpool_id = ConnectionPoolId{pool_index, rpool.unique_}; - rpool.name_ = _recipient_name; + rpool.name_ = _url; name_map_[rpool.name_.c_str()] = _rpool_id; } else { return error_service_connection_pool_count; @@ -1029,9 +1031,8 @@ ErrorConditionT Service::Data::doLockPool( } } //----------------------------------------------------------------------------- -#if 0 -ErrorConditionT Service::doSendMessage( - ConnectionContext& _rctx, +ErrorConditionT Service::doSendMessageUsingConnectionContext( + const RecipientUrl& _recipient_url, MessagePointerT<>& _rmsgptr, MessageCompleteFunctionT& _rcomplete_fnc, RecipientId* _precipient_id_out, @@ -1040,10 +1041,10 @@ ErrorConditionT Service::doSendMessage( { solid_log(logger, Verbose, this); // first we'll try to directly deliver the message to connection's Writer. + auto& rctx = *_recipient_url.pctx_; + auto& rcon = rctx.connection(); - auto& rcon = _rctx.connection(); - - if (!rcon.isFull(_rctx.configuration())) { + if (!rcon.isFull(rctx.configuration())) { _flags |= MessageFlagsE::OneShotSend; @@ -1063,15 +1064,15 @@ ErrorConditionT Service::doSendMessage( solid_statistic_inc(pimpl_->statistic_.send_message_context_count_); return ErrorConditionT{}; } - return doSendMessage(nullptr, _rctx.recipientId(), msgbundle.message_ptr, msgbundle.complete_fnc, nullptr, nullptr, _flags); + return doSendMessage({rctx.recipientId(), _recipient_url.relay_}, msgbundle.message_ptr, msgbundle.complete_fnc, nullptr, nullptr, _flags); } - return doSendMessage(nullptr, _rctx.recipientId(), _rmsgptr, _rcomplete_fnc, nullptr, nullptr, _flags); + return doSendMessage({rctx.recipientId(), _recipient_url.relay_}, _rmsgptr, _rcomplete_fnc, nullptr, nullptr, _flags); } -#endif + //----------------------------------------------------------------------------- ErrorConditionT Service::doSendMessage( - const RecipientUri& _rrecipient_uri, + const RecipientUrl& _recipient_url, MessagePointerT<>& _rmsgptr, MessageCompleteFunctionT& _rcomplete_fnc, RecipientId* _precipient_id_out, @@ -1080,12 +1081,14 @@ ErrorConditionT Service::doSendMessage( { solid_log(logger, Verbose, this); solid_statistic_inc(pimpl_->statistic_.send_message_count_); -#if 0 - std::string message_url; shared_ptr locked_pimpl; - if (_rrecipient_id_in.isValidConnection()) { - if (_rrecipient_id_in.isValidPool()) { + if (_recipient_url.pctx_) { + return doSendMessageUsingConnectionContext(_recipient_url, _rmsgptr, _rcomplete_fnc, nullptr, nullptr, _flags); + } + + if (_recipient_url.pid_ && _recipient_url.pid_->isValidConnection()) { + if (_recipient_url.pid_->isValidPool()) { locked_pimpl = acquire(); if (locked_pimpl) { @@ -1095,22 +1098,26 @@ ErrorConditionT Service::doSendMessage( } return locked_pimpl->doSendMessageToConnection( *this, - _rrecipient_id_in, + *_recipient_url.pid_, _rmsgptr, _rcomplete_fnc, _pmsgid_out, - _flags, - std::move(message_url)); + _flags, _recipient_url.relay_); } else { solid_assert_log(false, logger); return error_service_unknown_connection; } } - static constexpr const char* empty_recipient_name = ":"; - const char* recipient_name = _recipient_url; - ConnectionPoolId pool_id; - bool check_uid = false; + if (!_recipient_url.pid_ && !_recipient_url.purl_) { + solid_log(logger, Error, this << " wrong url"); + return error_service_invalid_url; + } + + static constexpr const string_view empty_url = ":"; + const string_view url = (_recipient_url.purl_ && !_recipient_url.purl_->empty()) ? *_recipient_url.purl_ : empty_url; + ConnectionPoolId pool_id; + bool check_uid = false; { unique_lock lock; @@ -1122,18 +1129,9 @@ ErrorConditionT Service::doSendMessage( return error_service_stopping; } - if (_recipient_url != nullptr) { - - recipient_name = configuration().extract_recipient_name_fnc(_recipient_url, message_url, pimpl_->tmp_str_); + if (_recipient_url.purl_) { - if (recipient_name == nullptr) { - solid_log(logger, Error, this << " failed extracting recipient name"); - return error_service_invalid_url; - } else if (recipient_name[0] == '\0') { - recipient_name = empty_recipient_name; - } - - NameMapT::const_iterator it = locked_pimpl->name_map_.find(recipient_name); + NameMapT::const_iterator it = locked_pimpl->name_map_.find(url); if (it != locked_pimpl->name_map_.end()) { pool_id = it->second; @@ -1147,17 +1145,17 @@ ErrorConditionT Service::doSendMessage( ConnectionPoolStub& rpool(pimpl_->pool_dq_[pool_index]); pool_id = ConnectionPoolId{pool_index, rpool.unique_}; - rpool.name_ = recipient_name; + rpool.name_ = url; locked_pimpl->name_map_[rpool.name_.c_str()] = pool_id; } else { return error_service_connection_pool_count; } } } else if ( - static_cast(_rrecipient_id_in.pool_id_.index) < pimpl_->pool_dq_.size()) { + static_cast(_recipient_url.pid_->pool_id_.index) < pimpl_->pool_dq_.size()) { // we cannot check the uid right now because we need a lock on the pool's mutex check_uid = true; - pool_id = _rrecipient_id_in.pool_id_; + pool_id = _recipient_url.pid_->pool_id_; } else { solid_log(logger, Error, this << " recipient does not exist"); return error_service_unknown_recipient; @@ -1172,7 +1170,7 @@ ErrorConditionT Service::doSendMessage( } unique_lock pool_lock; - const auto error = locked_pimpl->doLockPool(*this, check_uid, recipient_name, pool_id, pool_lock); + const auto error = locked_pimpl->doLockPool(*this, check_uid, url, pool_id, pool_lock); if (!error) { } else { return error; @@ -1180,21 +1178,21 @@ ErrorConditionT Service::doSendMessage( solid_assert(pool_lock.owns_lock()); - return locked_pimpl->doSendMessageToPool(*this, pool_id, _rmsgptr, _rcomplete_fnc, msg_type_idx, std::move(message_url), _precipient_id_out, _pmsgid_out, _flags); -#endif + return locked_pimpl->doSendMessageToPool(*this, pool_id, _rmsgptr, _rcomplete_fnc, msg_type_idx, _recipient_url.relay_, _precipient_id_out, _pmsgid_out, _flags); + return {}; } //----------------------------------------------------------------------------- ErrorConditionT Service::Data::doSendMessageToConnection( - Service& _rsvc, - const RecipientId& _rrecipient_id_in, - MessagePointerT<>& _rmsgptr, - MessageCompleteFunctionT& _rcomplete_fnc, - MessageId* _pmsgid_out, - MessageFlagsT _flags, - OptionalMessageRelayHeaderT&& _relay) + Service& _rsvc, + const RecipientId& _rrecipient_id_in, + MessagePointerT<>& _rmsgptr, + MessageCompleteFunctionT& _rcomplete_fnc, + MessageId* _pmsgid_out, + MessageFlagsT _flags, + const OptionalMessageRelayHeaderT& _relay) { solid_log(logger, Verbose, &_rsvc); solid_statistic_inc(statistic_.send_message_to_connection_count_); @@ -1269,12 +1267,12 @@ ErrorConditionT Service::Data::doSendMessageToConnection( //----------------------------------------------------------------------------- ErrorConditionT Service::Data::doSendMessageToPool( Service& _rsvc, const ConnectionPoolId& _rpool_id, MessagePointerT<>& _rmsgptr, - MessageCompleteFunctionT& _rcomplete_fnc, - const size_t _msg_type_idx, - OptionalMessageRelayHeaderT&& _relay, - RecipientId* _precipient_id_out, - MessageId* _pmsgid_out, - const MessageFlagsT& _flags) + MessageCompleteFunctionT& _rcomplete_fnc, + const size_t _msg_type_idx, + const OptionalMessageRelayHeaderT& _relay, + RecipientId* _precipient_id_out, + MessageId* _pmsgid_out, + const MessageFlagsT& _flags) { solid_log(logger, Verbose, &_rsvc << " " << _rpool_id); solid_statistic_inc(statistic_.send_message_to_pool_count_); @@ -2781,7 +2779,7 @@ std::ostream& operator<<(std::ostream& _ros, const MessageRelayHeader& _header) std::ostream& operator<<(std::ostream& _ros, const OptionalMessageRelayHeaderT& _header) { if (_header.has_value()) { - _ros << _header; + _ros << _header.value(); } else { _ros << "null"; } diff --git a/solid/frame/mprpc/src/mprpcutility.hpp b/solid/frame/mprpc/src/mprpcutility.hpp index c335a798..67201629 100644 --- a/solid/frame/mprpc/src/mprpcutility.hpp +++ b/solid/frame/mprpc/src/mprpcutility.hpp @@ -187,6 +187,20 @@ struct MessageBundle { std::swap(complete_fnc, _complete_fnc); } + MessageBundle( + MessagePointerT<>&& _rmsgptr, + const size_t _msg_type_idx, + const MessageFlagsT& _flags, + MessageCompleteFunctionT& _complete_fnc, + const OptionalMessageRelayHeaderT& _relay) + : message_type_id(_msg_type_idx) + , message_flags(_flags) + , message_ptr(std::move(_rmsgptr)) + , message_relay_header_(_relay) + { + std::swap(complete_fnc, _complete_fnc); + } + MessageBundle( MessagePointerT<>&& _rmsgptr, const size_t _msg_type_idx, diff --git a/solid/frame/mprpc/test/test_pool_basic.cpp b/solid/frame/mprpc/test/test_pool_basic.cpp index 5fd29ccd..cc5a63fe 100644 --- a/solid/frame/mprpc/test/test_pool_basic.cpp +++ b/solid/frame/mprpc/test/test_pool_basic.cpp @@ -414,7 +414,7 @@ int test_pool_basic(int argc, char* argv[]) writecount = initarraysize * 10; // start_count;// err = mprpcclient.createConnectionPool( - {"localhost"}, client_id, + "localhost", client_id, [](frame::mprpc::ConnectionContext& _rctx, EventBase&& _revent, const ErrorConditionT& _rerr) { solid_dbg(generic_logger, Warning, "client pool event: " << _revent << " error: " << _rerr.message()); }, diff --git a/solid/frame/mprpc/test/test_relay_basic.cpp b/solid/frame/mprpc/test/test_relay_basic.cpp index 2bb920fb..7232d429 100644 --- a/solid/frame/mprpc/test/test_relay_basic.cpp +++ b/solid/frame/mprpc/test/test_relay_basic.cpp @@ -88,17 +88,18 @@ size_t real_size(size_t _sz) } struct Register : frame::mprpc::Message { - std::string str; - uint32_t err; + uint32_t err_ = 0; + uint32_t group_id_ = 0; + uint16_t replica_id_ = 0; - Register(const std::string& _rstr, uint32_t _err = 0) - : str(_rstr) - , err(_err) + Register(const uint32_t _group_id, uint32_t _err = 0) + : group_id_(_group_id) + , err_(_err) { solid_dbg(generic_logger, Info, "CREATE ---------------- " << this); } - Register(uint32_t _err = -1) - : err(_err) + Register() + : err_(-1) { solid_dbg(generic_logger, Info, "CREATE ---------------- " << this); } @@ -110,7 +111,8 @@ struct Register : frame::mprpc::Message { SOLID_REFLECT_V1(_rr, _rthis, _rctx) { - _rr.add(_rthis.err, _rctx, 0, "err").add(_rthis.str, _rctx, 1, "str"); + _rr.add(_rthis.err_, _rctx, 0, "err").add(_rthis.group_id_, _rctx, 1, "group_id"); + _rr.add(_rthis.replica_id_, _rctx, 2, "replica_id"); } }; @@ -248,7 +250,7 @@ void peerb_connection_start(frame::mprpc::ConnectionContext& _rctx) { solid_dbg(generic_logger, Info, _rctx.recipientId()); - auto msgptr = frame::mprpc::make_message("b"); + auto msgptr = frame::mprpc::make_message(0); ErrorConditionT err = _rctx.service().sendMessage(_rctx.recipientId(), std::move(msgptr), {frame::mprpc::MessageFlagsE::AwaitResponse}); solid_check(!err, "failed send Register"); } @@ -266,7 +268,7 @@ void peerb_complete_register( solid_dbg(generic_logger, Info, _rctx.recipientId()); solid_check(!_rerror); - if (_rrecv_msg_ptr && _rrecv_msg_ptr->err == 0) { + if (_rrecv_msg_ptr && _rrecv_msg_ptr->err_ == 0) { auto lambda = [](frame::mprpc::ConnectionContext&, ErrorConditionT const& _rerror) { solid_dbg(generic_logger, Info, "peerb --- enter active error: " << _rerror.message()); }; @@ -417,11 +419,10 @@ int test_relay_basic(int argc, char* argv[]) if (_rrecv_msg_ptr) { solid_check(!_rsent_msg_ptr); - solid_dbg(generic_logger, Info, "recv register request: " << _rrecv_msg_ptr->str); + solid_dbg(generic_logger, Info, "recv register request: " << _rrecv_msg_ptr->group_id_ << ", " << _rrecv_msg_ptr->replica_id_); - relay_engine.registerConnection(_rctx, std::move(_rrecv_msg_ptr->str)); + relay_engine.registerConnection(_rctx, _rrecv_msg_ptr->group_id_, _rrecv_msg_ptr->replica_id_); - _rrecv_msg_ptr->str.clear(); ErrorConditionT err = _rctx.service().sendResponse(_rctx.recipientId(), std::move(_rrecv_msg_ptr)); solid_check(!err, "Failed sending register response: " << err.message()); @@ -553,7 +554,7 @@ int test_relay_basic(int argc, char* argv[]) writecount = initarraysize * 2; // start_count;// // ensure we have provisioned connections on peerb - err = mprpcpeerb.createConnectionPool({"localhost"}); + err = mprpcpeerb.createConnectionPool("localhost"); solid_check(!err, "failed create connection from peerb: " << err.message()); for (; crtwriteidx < start_count;) { diff --git a/solid/frame/mprpc/test/test_relay_cancel_request.cpp b/solid/frame/mprpc/test/test_relay_cancel_request.cpp index 153adf44..b6b32eeb 100644 --- a/solid/frame/mprpc/test/test_relay_cancel_request.cpp +++ b/solid/frame/mprpc/test/test_relay_cancel_request.cpp @@ -110,28 +110,31 @@ size_t real_size(size_t _sz) } struct Register : frame::mprpc::Message { - std::string str; - uint32_t err; + uint32_t err_ = 0; + uint32_t group_id_ = 0; + uint16_t replica_id_ = 0; - Register(const std::string& _rstr, uint32_t _err = 0) - : str(_rstr) - , err(_err) + Register(const uint32_t _group_id, uint32_t _err = 0) + : group_id_(_group_id) + , err_(_err) { - solid_dbg(generic_logger, Info, "CREATE ---------------- " << (void*)this); + solid_dbg(generic_logger, Info, "CREATE ---------------- " << this); } - Register(uint32_t _err = -1) - : err(_err) + Register() + : err_(-1) { + solid_dbg(generic_logger, Info, "CREATE ---------------- " << this); } ~Register() { - solid_dbg(generic_logger, Info, "DELETE ---------------- " << (void*)this); + solid_dbg(generic_logger, Info, "DELETE ---------------- " << this); } SOLID_REFLECT_V1(_rr, _rthis, _rctx) { - _rr.add(_rthis.err, _rctx, 0, "err").add(_rthis.str, _rctx, 1, "str"); + _rr.add(_rthis.err_, _rctx, 0, "err").add(_rthis.group_id_, _rctx, 1, "group_id"); + _rr.add(_rthis.replica_id_, _rctx, 2, "replica_id"); } }; @@ -293,7 +296,7 @@ void peerb_connection_start(frame::mprpc::ConnectionContext& _rctx) { solid_dbg(generic_logger, Info, _rctx.recipientId()); - auto msgptr = frame::mprpc::make_message("b"); + auto msgptr = frame::mprpc::make_message(0); ErrorConditionT err = _rctx.service().sendMessage(_rctx.recipientId(), std::move(msgptr), {frame::mprpc::MessageFlagsE::AwaitResponse}); solid_check(!err, "failed send Register"); } @@ -311,7 +314,7 @@ void peerb_complete_register( solid_dbg(generic_logger, Info, _rctx.recipientId()); solid_check(!_rerror); - if (_rrecv_msg_ptr && _rrecv_msg_ptr->err == 0) { + if (_rrecv_msg_ptr && _rrecv_msg_ptr->err_ == 0) { auto lambda = [](frame::mprpc::ConnectionContext&, ErrorConditionT const& _rerror) { solid_dbg(generic_logger, Info, "peerb --- enter active error: " << _rerror.message()); }; @@ -465,11 +468,10 @@ int test_relay_cancel_request(int argc, char* argv[]) solid_check(!_rerror); if (_rrecv_msg_ptr) { solid_check(!_rsent_msg_ptr); - solid_dbg(generic_logger, Info, "recv register request: " << _rrecv_msg_ptr->str); + solid_dbg(generic_logger, Info, "recv register request: " << _rrecv_msg_ptr->group_id_ << ", " << _rrecv_msg_ptr->replica_id_); - relay_engine.registerConnection(_rctx, std::move(_rrecv_msg_ptr->str)); + relay_engine.registerConnection(_rctx, _rrecv_msg_ptr->group_id_, _rrecv_msg_ptr->replica_id_); - _rrecv_msg_ptr->str.clear(); ErrorConditionT err = _rctx.service().sendResponse(_rctx.recipientId(), std::move(_rrecv_msg_ptr)); solid_check(!err, "Failed sending register response: " << err.message()); @@ -602,7 +604,7 @@ int test_relay_cancel_request(int argc, char* argv[]) writecount = initarraysize * 2; // start_count;// // ensure we have provisioned connections on peerb - err = mprpcpeerb.createConnectionPool({"localhost"}); + err = mprpcpeerb.createConnectionPool("localhost"); solid_check(!err, "failed create connection from peerb: " << err.message()); for (; crtwriteidx < start_count;) { diff --git a/solid/frame/mprpc/test/test_relay_cancel_response.cpp b/solid/frame/mprpc/test/test_relay_cancel_response.cpp index 64432702..268e015b 100644 --- a/solid/frame/mprpc/test/test_relay_cancel_response.cpp +++ b/solid/frame/mprpc/test/test_relay_cancel_response.cpp @@ -111,28 +111,31 @@ size_t real_size(size_t _sz) } struct Register : frame::mprpc::Message { - std::string str; - uint32_t err; + uint32_t err_ = 0; + uint32_t group_id_ = 0; + uint16_t replica_id_ = 0; - Register(const std::string& _rstr, uint32_t _err = 0) - : str(_rstr) - , err(_err) + Register(const uint32_t _group_id, uint32_t _err = 0) + : group_id_(_group_id) + , err_(_err) { - solid_dbg(generic_logger, Info, "CREATE ---------------- " << (void*)this); + solid_dbg(generic_logger, Info, "CREATE ---------------- " << this); } - Register(uint32_t _err = -1) - : err(_err) + Register() + : err_(-1) { + solid_dbg(generic_logger, Info, "CREATE ---------------- " << this); } ~Register() { - solid_dbg(generic_logger, Info, "DELETE ---------------- " << (void*)this); + solid_dbg(generic_logger, Info, "DELETE ---------------- " << this); } SOLID_REFLECT_V1(_rr, _rthis, _rctx) { - _rr.add(_rthis.err, _rctx, 0, "err").add(_rthis.str, _rctx, 1, "str"); + _rr.add(_rthis.err_, _rctx, 0, "err").add(_rthis.group_id_, _rctx, 1, "group_id"); + _rr.add(_rthis.replica_id_, _rctx, 2, "replica_id"); } }; @@ -289,7 +292,7 @@ void peerb_connection_start(frame::mprpc::ConnectionContext& _rctx) { solid_dbg(generic_logger, Info, _rctx.recipientId()); - auto msgptr = frame::mprpc::make_message("b"); + auto msgptr = frame::mprpc::make_message(0); ErrorConditionT err = _rctx.service().sendMessage(_rctx.recipientId(), std::move(msgptr), {frame::mprpc::MessageFlagsE::AwaitResponse}); solid_check(!err, "failed send Register"); } @@ -307,7 +310,7 @@ void peerb_complete_register( solid_dbg(generic_logger, Info, _rctx.recipientId()); solid_check(!_rerror); - if (_rrecv_msg_ptr && _rrecv_msg_ptr->err == 0) { + if (_rrecv_msg_ptr && _rrecv_msg_ptr->err_ == 0) { auto lambda = [](frame::mprpc::ConnectionContext&, ErrorConditionT const& _rerror) { solid_dbg(generic_logger, Info, "peerb --- enter active error: " << _rerror.message()); }; @@ -458,11 +461,9 @@ int test_relay_cancel_response(int argc, char* argv[]) solid_check(!_rerror); if (_rrecv_msg_ptr) { solid_check(!_rsent_msg_ptr); - solid_dbg(generic_logger, Info, "recv register request: " << _rrecv_msg_ptr->str); - - relay_engine.registerConnection(_rctx, std::move(_rrecv_msg_ptr->str)); + solid_dbg(generic_logger, Info, "recv register request: " << _rrecv_msg_ptr->group_id_ << ", " << _rrecv_msg_ptr->replica_id_); - _rrecv_msg_ptr->str.clear(); + relay_engine.registerConnection(_rctx, _rrecv_msg_ptr->group_id_, _rrecv_msg_ptr->replica_id_); ErrorConditionT err = _rctx.service().sendResponse(_rctx.recipientId(), std::move(_rrecv_msg_ptr)); solid_check(!err, "Failed sending register response: " << err.message()); @@ -595,7 +596,7 @@ int test_relay_cancel_response(int argc, char* argv[]) writecount = 2 * initarraysize; // initarraysize * 2; //start_count;// // ensure we have provisioned connections on peerb - err = mprpcpeerb.createConnectionPool({"localhost"}); + err = mprpcpeerb.createConnectionPool("localhost"); solid_check(!err, "failed create connection from peerb: " << err.message()); for (; crtwriteidx < start_count;) { diff --git a/solid/frame/mprpc/test/test_relay_close_request.cpp b/solid/frame/mprpc/test/test_relay_close_request.cpp index 23ea7f3b..dfc6c941 100644 --- a/solid/frame/mprpc/test/test_relay_close_request.cpp +++ b/solid/frame/mprpc/test/test_relay_close_request.cpp @@ -94,28 +94,31 @@ size_t real_size(size_t _sz) } struct Register : frame::mprpc::Message { - std::string str; - uint32_t err; + uint32_t err_ = 0; + uint32_t group_id_ = 0; + uint16_t replica_id_ = 0; - Register(const std::string& _rstr, uint32_t _err = 0) - : str(_rstr) - , err(_err) + Register(const uint32_t _group_id, uint32_t _err = 0) + : group_id_(_group_id) + , err_(_err) { - solid_dbg(generic_logger, Info, "CREATE ---------------- " << (void*)this); + solid_dbg(generic_logger, Info, "CREATE ---------------- " << this); } - Register(uint32_t _err = -1) - : err(_err) + Register() + : err_(-1) { + solid_dbg(generic_logger, Info, "CREATE ---------------- " << this); } ~Register() { - solid_dbg(generic_logger, Info, "DELETE ---------------- " << (void*)this); + solid_dbg(generic_logger, Info, "DELETE ---------------- " << this); } SOLID_REFLECT_V1(_rr, _rthis, _rctx) { - _rr.add(_rthis.err, _rctx, 0, "err").add(_rthis.str, _rctx, 1, "str"); + _rr.add(_rthis.err_, _rctx, 0, "err").add(_rthis.group_id_, _rctx, 1, "group_id"); + _rr.add(_rthis.replica_id_, _rctx, 2, "replica_id"); } }; @@ -260,7 +263,7 @@ void peerb_connection_start(frame::mprpc::ConnectionContext& _rctx) { solid_dbg(generic_logger, Info, _rctx.recipientId()); - auto msgptr = frame::mprpc::make_message("b"); + auto msgptr = frame::mprpc::make_message(0); ErrorConditionT err = _rctx.service().sendMessage(_rctx.recipientId(), std::move(msgptr), {frame::mprpc::MessageFlagsE::AwaitResponse}); solid_check(!err, "failed send Register"); } @@ -278,7 +281,7 @@ void peerb_complete_register( solid_dbg(generic_logger, Info, _rctx.recipientId()); solid_check(!_rerror); - if (_rrecv_msg_ptr && _rrecv_msg_ptr->err == 0) { + if (_rrecv_msg_ptr && _rrecv_msg_ptr->err_ == 0) { auto lambda = [](frame::mprpc::ConnectionContext&, ErrorConditionT const& _rerror) { solid_dbg(generic_logger, Info, "peerb --- enter active error: " << _rerror.message()); }; @@ -417,11 +420,9 @@ int test_relay_close_request(int argc, char* argv[]) solid_check(!_rerror); if (_rrecv_msg_ptr) { solid_check(!_rsent_msg_ptr); - solid_dbg(generic_logger, Info, "recv register request: " << _rrecv_msg_ptr->str); - - relay_engine.registerConnection(_rctx, std::move(_rrecv_msg_ptr->str)); + solid_dbg(generic_logger, Info, "recv register request: " << _rrecv_msg_ptr->group_id_ << ", " << _rrecv_msg_ptr->replica_id_); - _rrecv_msg_ptr->str.clear(); + relay_engine.registerConnection(_rctx, _rrecv_msg_ptr->group_id_, _rrecv_msg_ptr->replica_id_); ErrorConditionT err = _rctx.service().sendResponse(_rctx.recipientId(), std::move(_rrecv_msg_ptr)); solid_check(!err, "Failed sending register response: " << err.message()); @@ -554,7 +555,7 @@ int test_relay_close_request(int argc, char* argv[]) writecount = initarraysize; // start_count;// // ensure we have provisioned connections on peerb - err = mprpcpeerb.createConnectionPool({"localhost"}); + err = mprpcpeerb.createConnectionPool("localhost"); solid_check(!err, "failed create connection from peerb: " << err.message()); for (; crtwriteidx < start_count;) { diff --git a/solid/frame/mprpc/test/test_relay_close_response.cpp b/solid/frame/mprpc/test/test_relay_close_response.cpp index 60882a15..0eb832c3 100644 --- a/solid/frame/mprpc/test/test_relay_close_response.cpp +++ b/solid/frame/mprpc/test/test_relay_close_response.cpp @@ -97,28 +97,31 @@ size_t real_size(size_t _sz) } struct Register : frame::mprpc::Message { - std::string str; - uint32_t err; + uint32_t err_ = 0; + uint32_t group_id_ = 0; + uint16_t replica_id_ = 0; - Register(const std::string& _rstr, uint32_t _err = 0) - : str(_rstr) - , err(_err) + Register(const uint32_t _group_id, uint32_t _err = 0) + : group_id_(_group_id) + , err_(_err) { - solid_dbg(generic_logger, Info, "CREATE ---------------- " << (void*)this); + solid_dbg(generic_logger, Info, "CREATE ---------------- " << this); } - Register(uint32_t _err = -1) - : err(_err) + Register() + : err_(-1) { + solid_dbg(generic_logger, Info, "CREATE ---------------- " << this); } ~Register() { - solid_dbg(generic_logger, Info, "DELETE ---------------- " << (void*)this); + solid_dbg(generic_logger, Info, "DELETE ---------------- " << this); } SOLID_REFLECT_V1(_rr, _rthis, _rctx) { - _rr.add(_rthis.err, _rctx, 0, "err").add(_rthis.str, _rctx, 1, "str"); + _rr.add(_rthis.err_, _rctx, 0, "err").add(_rthis.group_id_, _rctx, 1, "group_id"); + _rr.add(_rthis.replica_id_, _rctx, 2, "replica_id"); } }; @@ -261,7 +264,7 @@ void peerb_connection_start(frame::mprpc::ConnectionContext& _rctx) { solid_dbg(generic_logger, Info, _rctx.recipientId()); - auto msgptr = frame::mprpc::make_message("b"); + auto msgptr = frame::mprpc::make_message(0); ErrorConditionT err = _rctx.service().sendMessage(_rctx.recipientId(), std::move(msgptr), {frame::mprpc::MessageFlagsE::AwaitResponse}); solid_check(!err, "failed send Register"); } @@ -279,7 +282,7 @@ void peerb_complete_register( solid_dbg(generic_logger, Info, _rctx.recipientId()); solid_check(!_rerror); - if (_rrecv_msg_ptr && _rrecv_msg_ptr->err == 0) { + if (_rrecv_msg_ptr && _rrecv_msg_ptr->err_ == 0) { auto lambda = [](frame::mprpc::ConnectionContext&, ErrorConditionT const& _rerror) { solid_dbg(generic_logger, Info, "peerb --- enter active error: " << _rerror.message()); }; @@ -417,11 +420,9 @@ int test_relay_close_response(int argc, char* argv[]) solid_check(!_rerror); if (_rrecv_msg_ptr) { solid_check(!_rsent_msg_ptr); - solid_dbg(generic_logger, Info, "recv register request: " << _rrecv_msg_ptr->str); - - relay_engine.registerConnection(_rctx, std::move(_rrecv_msg_ptr->str)); + solid_dbg(generic_logger, Info, "recv register request: " << _rrecv_msg_ptr->group_id_ << ", " << _rrecv_msg_ptr->replica_id_); - _rrecv_msg_ptr->str.clear(); + relay_engine.registerConnection(_rctx, _rrecv_msg_ptr->group_id_, _rrecv_msg_ptr->replica_id_); ErrorConditionT err = _rctx.service().sendResponse(_rctx.recipientId(), std::move(_rrecv_msg_ptr)); solid_check(!err, "Failed sending register response: " << err.message()); @@ -560,7 +561,7 @@ int test_relay_close_response(int argc, char* argv[]) writecount = initarraysize; // initarraysize * 2; //start_count;// // ensure we have provisioned connections on peerb - err = mprpcpeerb.createConnectionPool({"localhost"}); + err = mprpcpeerb.createConnectionPool("localhost"); solid_check(!err, "failed create connection from peerb: " << err.message()); for (; crtwriteidx < start_count;) { diff --git a/solid/frame/mprpc/test/test_relay_detect_close.cpp b/solid/frame/mprpc/test/test_relay_detect_close.cpp index 4a30fe28..73b19471 100644 --- a/solid/frame/mprpc/test/test_relay_detect_close.cpp +++ b/solid/frame/mprpc/test/test_relay_detect_close.cpp @@ -50,28 +50,31 @@ void done() } struct Register : frame::mprpc::Message { - std::string str; - uint32_t err; + uint32_t err_ = 0; + uint32_t group_id_ = 0; + uint16_t replica_id_ = 0; - Register(const std::string& _rstr, uint32_t _err = 0) - : str(_rstr) - , err(_err) + Register(const uint32_t _group_id, uint32_t _err = 0) + : group_id_(_group_id) + , err_(_err) { - solid_dbg(generic_logger, Info, "CREATE ---------------- " << (void*)this); + solid_dbg(generic_logger, Info, "CREATE ---------------- " << this); } - Register(uint32_t _err = -1) - : err(_err) + Register() + : err_(-1) { + solid_dbg(generic_logger, Info, "CREATE ---------------- " << this); } ~Register() { - solid_dbg(generic_logger, Info, "DELETE ---------------- " << (void*)this); + solid_dbg(generic_logger, Info, "DELETE ---------------- " << this); } SOLID_REFLECT_V1(_rr, _rthis, _rctx) { - _rr.add(_rthis.err, _rctx, 0, "err").add(_rthis.str, _rctx, 1, "str"); + _rr.add(_rthis.err_, _rctx, 0, "err").add(_rthis.group_id_, _rctx, 1, "group_id"); + _rr.add(_rthis.replica_id_, _rctx, 2, "replica_id"); } }; @@ -157,7 +160,7 @@ void peerb_connection_start(frame::mprpc::ConnectionContext& _rctx) { solid_dbg(generic_logger, Info, _rctx.recipientId()); - auto msgptr = frame::mprpc::make_message("b"); + auto msgptr = frame::mprpc::make_message(0); ErrorConditionT err = _rctx.service().sendMessage(_rctx.recipientId(), std::move(msgptr), {frame::mprpc::MessageFlagsE::AwaitResponse}); solid_check(!err, "failed send Register"); } @@ -175,7 +178,7 @@ void peerb_complete_register( solid_dbg(generic_logger, Info, _rctx.recipientId()); solid_check(!_rerror); - if (_rrecv_msg_ptr && _rrecv_msg_ptr->err == 0) { + if (_rrecv_msg_ptr && _rrecv_msg_ptr->err_ == 0) { auto lambda = [](frame::mprpc::ConnectionContext&, ErrorConditionT const& _rerror) { solid_dbg(generic_logger, Info, "peerb --- enter active error: " << _rerror.message()); }; @@ -287,11 +290,9 @@ int test_relay_detect_close(int argc, char* argv[]) solid_check(!_rerror); if (_rrecv_msg_ptr) { solid_check(!_rsent_msg_ptr); - solid_dbg(generic_logger, Info, "recv register request: " << _rrecv_msg_ptr->str); + solid_dbg(generic_logger, Info, "recv register request: " << _rrecv_msg_ptr->group_id_ << ", " << _rrecv_msg_ptr->replica_id_); - relay_engine.registerConnection(_rctx, std::move(_rrecv_msg_ptr->str)); - - _rrecv_msg_ptr->str.clear(); + relay_engine.registerConnection(_rctx, _rrecv_msg_ptr->group_id_, _rrecv_msg_ptr->replica_id_); ErrorConditionT err = _rctx.service().sendResponse(_rctx.recipientId(), std::move(_rrecv_msg_ptr)); solid_check(!err, "Failed sending register response: " << err.message()); @@ -425,7 +426,7 @@ int test_relay_detect_close(int argc, char* argv[]) } // ensure we have provisioned connections on peerb - err = mprpcpeerb.createConnectionPool({"localhost"}); + err = mprpcpeerb.createConnectionPool("localhost"); solid_check(!err, "failed create connection from peerb: " << err.message()); mprpcpeera.sendMessage({"localhost", 0}, frame::mprpc::make_message(), {frame::mprpc::MessageFlagsE::AwaitResponse}); diff --git a/solid/frame/mprpc/test/test_relay_detect_close_while_response.cpp b/solid/frame/mprpc/test/test_relay_detect_close_while_response.cpp index 91240dc8..7b483745 100644 --- a/solid/frame/mprpc/test/test_relay_detect_close_while_response.cpp +++ b/solid/frame/mprpc/test/test_relay_detect_close_while_response.cpp @@ -78,28 +78,31 @@ string generate_big_data(const size_t _size) } struct Register : frame::mprpc::Message { - std::string str; - uint32_t err; + uint32_t err_ = 0; + uint32_t group_id_ = 0; + uint16_t replica_id_ = 0; - Register(const std::string& _rstr, uint32_t _err = 0) - : str(_rstr) - , err(_err) + Register(const uint32_t _group_id, uint32_t _err = 0) + : group_id_(_group_id) + , err_(_err) { - solid_dbg(generic_logger, Info, "CREATE ---------------- " << (void*)this); + solid_dbg(generic_logger, Info, "CREATE ---------------- " << this); } - Register(uint32_t _err = -1) - : err(_err) + Register() + : err_(-1) { + solid_dbg(generic_logger, Info, "CREATE ---------------- " << this); } ~Register() { - solid_dbg(generic_logger, Info, "DELETE ---------------- " << (void*)this); + solid_dbg(generic_logger, Info, "DELETE ---------------- " << this); } SOLID_REFLECT_V1(_rr, _rthis, _rctx) { - _rr.add(_rthis.err, _rctx, 0, "err").add(_rthis.str, _rctx, 1, "str"); + _rr.add(_rthis.err_, _rctx, 0, "err").add(_rthis.group_id_, _rctx, 1, "group_id"); + _rr.add(_rthis.replica_id_, _rctx, 2, "replica_id"); } }; @@ -200,7 +203,7 @@ void peerb_connection_start(frame::mprpc::ConnectionContext& _rctx) { solid_dbg(generic_logger, Info, _rctx.recipientId()); - auto msgptr = frame::mprpc::make_message("b"); + auto msgptr = frame::mprpc::make_message(0); ErrorConditionT err = _rctx.service().sendMessage(_rctx.recipientId(), std::move(msgptr), {frame::mprpc::MessageFlagsE::AwaitResponse}); solid_check(!err, "failed send Register"); } @@ -218,7 +221,7 @@ void peerb_complete_register( solid_dbg(generic_logger, Info, _rctx.recipientId()); solid_check(!_rerror); - if (_rrecv_msg_ptr && _rrecv_msg_ptr->err == 0) { + if (_rrecv_msg_ptr && _rrecv_msg_ptr->err_ == 0) { auto lambda = [](frame::mprpc::ConnectionContext&, ErrorConditionT const& _rerror) { solid_dbg(generic_logger, Info, "peerb --- enter active error: " << _rerror.message()); }; @@ -326,11 +329,9 @@ int test_relay_detect_close_while_response(int argc, char* argv[]) solid_check(!_rerror); if (_rrecv_msg_ptr) { solid_check(!_rsent_msg_ptr); - solid_dbg(generic_logger, Info, "recv register request: " << _rrecv_msg_ptr->str); + solid_dbg(generic_logger, Info, "recv register request: " << _rrecv_msg_ptr->group_id_ << ", " << _rrecv_msg_ptr->replica_id_); - relay_engine.registerConnection(_rctx, std::move(_rrecv_msg_ptr->str)); - - _rrecv_msg_ptr->str.clear(); + relay_engine.registerConnection(_rctx, _rrecv_msg_ptr->group_id_, _rrecv_msg_ptr->replica_id_); ErrorConditionT err = _rctx.service().sendResponse(_rctx.recipientId(), std::move(_rrecv_msg_ptr)); solid_check(!err, "Failed sending register response: " << err.message()); @@ -464,7 +465,7 @@ int test_relay_detect_close_while_response(int argc, char* argv[]) } // ensure we have provisioned connections on peerb - err = mprpcpeerb.createConnectionPool({"localhost"}); + err = mprpcpeerb.createConnectionPool("localhost"); solid_check(!err, "failed create connection from peerb: " << err.message()); pmprpcpeera = &mprpcpeera; diff --git a/solid/frame/mprpc/test/test_relay_disabled.cpp b/solid/frame/mprpc/test/test_relay_disabled.cpp index 82db0306..09270a30 100644 --- a/solid/frame/mprpc/test/test_relay_disabled.cpp +++ b/solid/frame/mprpc/test/test_relay_disabled.cpp @@ -87,28 +87,31 @@ size_t real_size(size_t _sz) } struct Register : frame::mprpc::Message { - std::string str; - uint32_t err; + uint32_t err_ = 0; + uint32_t group_id_ = 0; + uint16_t replica_id_ = 0; - Register(const std::string& _rstr, uint32_t _err = 0) - : str(_rstr) - , err(_err) + Register(const uint32_t _group_id, uint32_t _err = 0) + : group_id_(_group_id) + , err_(_err) { - solid_dbg(generic_logger, Info, "CREATE ---------------- " << (void*)this); + solid_dbg(generic_logger, Info, "CREATE ---------------- " << this); } - Register(uint32_t _err = -1) - : err(_err) + Register() + : err_(-1) { + solid_dbg(generic_logger, Info, "CREATE ---------------- " << this); } ~Register() { - solid_dbg(generic_logger, Info, "DELETE ---------------- " << (void*)this); + solid_dbg(generic_logger, Info, "DELETE ---------------- " << this); } SOLID_REFLECT_V1(_rr, _rthis, _rctx) { - _rr.add(_rthis.err, _rctx, 0, "err").add(_rthis.str, _rctx, 1, "str"); + _rr.add(_rthis.err_, _rctx, 0, "err").add(_rthis.group_id_, _rctx, 1, "group_id"); + _rr.add(_rthis.replica_id_, _rctx, 2, "replica_id"); } }; @@ -230,7 +233,7 @@ void peerb_connection_start(frame::mprpc::ConnectionContext& _rctx) { solid_dbg(generic_logger, Info, _rctx.recipientId()); - auto msgptr = frame::mprpc::make_message("b"); + auto msgptr = frame::mprpc::make_message(0); ErrorConditionT err = _rctx.service().sendMessage(_rctx.recipientId(), std::move(msgptr), {frame::mprpc::MessageFlagsE::AwaitResponse}); solid_check(!err, "failed send Register"); } @@ -248,7 +251,7 @@ void peerb_complete_register( solid_dbg(generic_logger, Info, _rctx.recipientId()); solid_check(!_rerror); - if (_rrecv_msg_ptr && _rrecv_msg_ptr->err == 0) { + if (_rrecv_msg_ptr && _rrecv_msg_ptr->err_ == 0) { auto lambda = [](frame::mprpc::ConnectionContext&, ErrorConditionT const& _rerror) { solid_dbg(generic_logger, Info, "peerb --- enter active error: " << _rerror.message()); }; diff --git a/solid/frame/mprpc/test/test_relay_split.cpp b/solid/frame/mprpc/test/test_relay_split.cpp index 337c9fbd..ba15aca7 100644 --- a/solid/frame/mprpc/test/test_relay_split.cpp +++ b/solid/frame/mprpc/test/test_relay_split.cpp @@ -87,28 +87,31 @@ size_t real_size(size_t _sz) } struct Register : frame::mprpc::Message { - std::string str; - uint32_t err; + uint32_t err_ = 0; + uint32_t group_id_ = 0; + uint16_t replica_id_ = 0; - Register(const std::string& _rstr, uint32_t _err = 0) - : str(_rstr) - , err(_err) + Register(const uint32_t _group_id, uint32_t _err = 0) + : group_id_(_group_id) + , err_(_err) { - solid_dbg(generic_logger, Info, "CREATE ---------------- " << (void*)this); + solid_dbg(generic_logger, Info, "CREATE ---------------- " << this); } - Register(uint32_t _err = -1) - : err(_err) + Register() + : err_(-1) { + solid_dbg(generic_logger, Info, "CREATE ---------------- " << this); } ~Register() { - solid_dbg(generic_logger, Info, "DELETE ---------------- " << (void*)this); + solid_dbg(generic_logger, Info, "DELETE ---------------- " << this); } SOLID_REFLECT_V1(_rr, _rthis, _rctx) { - _rr.add(_rthis.err, _rctx, 0, "err").add(_rthis.str, _rctx, 1, "str"); + _rr.add(_rthis.err_, _rctx, 0, "err").add(_rthis.group_id_, _rctx, 1, "group_id"); + _rr.add(_rthis.replica_id_, _rctx, 2, "replica_id"); } }; @@ -273,7 +276,7 @@ void peerb_connection_start(frame::mprpc::ConnectionContext& _rctx) { solid_dbg(generic_logger, Info, _rctx.recipientId()); - auto msgptr = frame::mprpc::make_message("b"); + auto msgptr = frame::mprpc::make_message(0); ErrorConditionT err = _rctx.service().sendMessage(_rctx.recipientId(), std::move(msgptr), {frame::mprpc::MessageFlagsE::AwaitResponse}); solid_check(!err, "failed send Register"); } @@ -291,7 +294,7 @@ void peerb_complete_register( solid_dbg(generic_logger, Info, _rctx.recipientId()); solid_check(!_rerror); - if (_rrecv_msg_ptr && _rrecv_msg_ptr->err == 0) { + if (_rrecv_msg_ptr && _rrecv_msg_ptr->err_ == 0) { auto lambda = [](frame::mprpc::ConnectionContext&, ErrorConditionT const& _rerror) { solid_dbg(generic_logger, Info, "peerb --- enter active error: " << _rerror.message()); }; @@ -441,11 +444,9 @@ int test_relay_split(int argc, char* argv[]) if (_rrecv_msg_ptr) { solid_check(!_rsent_msg_ptr); - solid_dbg(generic_logger, Info, "recv register request: " << _rrecv_msg_ptr->str); - - relay_engine.registerConnection(_rctx, std::move(_rrecv_msg_ptr->str)); + solid_dbg(generic_logger, Info, "recv register request: " << _rrecv_msg_ptr->group_id_ << ", " << _rrecv_msg_ptr->replica_id_); - _rrecv_msg_ptr->str.clear(); + relay_engine.registerConnection(_rctx, _rrecv_msg_ptr->group_id_, _rrecv_msg_ptr->replica_id_); ErrorConditionT err = _rctx.service().sendResponse(_rctx.recipientId(), std::move(_rrecv_msg_ptr)); solid_check(!err, "Failed sending register response: " << err.message()); @@ -578,7 +579,7 @@ int test_relay_split(int argc, char* argv[]) writecount = initarraysize; // initarraysize * 2; // // ensure we have provisioned connections on peerb - err = mprpcpeerb.createConnectionPool({"localhost"}); + err = mprpcpeerb.createConnectionPool("localhost"); solid_check(!err, "failed create connection from peerb: " << err.message()); for (; crtwriteidx < start_count;) { diff --git a/tutorials/mprpc_echo_relay/mprpc_echo_relay_client.cpp b/tutorials/mprpc_echo_relay/mprpc_echo_relay_client.cpp index 14b272c8..ccb06fe3 100644 --- a/tutorials/mprpc_echo_relay/mprpc_echo_relay_client.cpp +++ b/tutorials/mprpc_echo_relay/mprpc_echo_relay_client.cpp @@ -27,26 +27,26 @@ struct Parameters { , server_addr("127.0.0.1") { } - string name; - string server_port; - string server_addr; + uint32_t group_id; + string server_port; + string server_addr; }; struct Message : solid::frame::mprpc::Message { - std::string name; + uint32_t group_id_; std::string data; Message() {} - Message(const std::string& _name, std::string&& _ustr) - : name(_name) + Message(uint32_t _group_id, std::string&& _ustr) + : group_id_(_group_id) , data(std::move(_ustr)) { } SOLID_REFLECT_V1(_rr, _rthis, _rctx) { - _rr.add(_rthis.name, _rctx, 1, "name"); + _rr.add(_rthis.group_id_, _rctx, 1, "group_id"); _rr.add(_rthis.data, _rctx, 2, "data"); } }; @@ -84,7 +84,7 @@ int main(int argc, char* argv[]) ErrorConditionT const& _rerror) { solid_check(!_rerror); - if (_rrecv_msg_ptr && _rrecv_msg_ptr->name.empty()) { + if (_rrecv_msg_ptr) { auto lambda = [](frame::mprpc::ConnectionContext&, ErrorConditionT const& _rerror) { solid_log(generic_logger, Info, "peerb --- enter active error: " << _rerror.message()); }; @@ -98,11 +98,11 @@ int main(int argc, char* argv[]) frame::mprpc::MessagePointerT& _rrecv_msg_ptr, ErrorConditionT const& _rerror) { if (_rrecv_msg_ptr) { - cout << _rrecv_msg_ptr->name << ": " << _rrecv_msg_ptr->data << endl; + cout << _rrecv_msg_ptr->group_id_ << ": " << _rrecv_msg_ptr->data << endl; if (!_rsent_msg_ptr) { // we're on peer - echo back the response - _rrecv_msg_ptr->name = p.name; - ErrorConditionT err = _rctx.service().sendResponse(_rctx.recipientId(), std::move(_rrecv_msg_ptr)); + _rrecv_msg_ptr->group_id_ = p.group_id; + ErrorConditionT err = _rctx.service().sendResponse(_rctx.recipientId(), std::move(_rrecv_msg_ptr)); (void)err; } @@ -111,7 +111,7 @@ int main(int argc, char* argv[]) auto on_connection_start = [&p](frame::mprpc::ConnectionContext& _rctx) { solid_log(generic_logger, Info, _rctx.recipientId()); - auto msgptr = frame::mprpc::make_message(p.name); + auto msgptr = frame::mprpc::make_message(p.group_id); ErrorConditionT err = _rctx.service().sendMessage(_rctx.recipientId(), std::move(msgptr), {frame::mprpc::MessageFlagsE::AwaitResponse}); solid_check(!err, "failed send Register"); }; @@ -150,7 +150,7 @@ int main(int argc, char* argv[]) size_t offset = line.find(' '); if (offset != string::npos) { recipient = p.server_addr + '/' + line.substr(0, offset); - rpcservice.sendMessage({recipient}, frame::mprpc::make_message(p.name, line.substr(offset + 1)), {frame::mprpc::MessageFlagsE::AwaitResponse}); + rpcservice.sendMessage({recipient}, frame::mprpc::make_message(p.group_id, line.substr(offset + 1)), {frame::mprpc::MessageFlagsE::AwaitResponse}); } else { cout << "No recipient name specified. E.g:" << endl << "alpha Some text to send" << endl; @@ -166,13 +166,13 @@ int main(int argc, char* argv[]) bool parseArguments(Parameters& _par, int argc, char* argv[]) { if (argc == 2) { - _par.name = argv[1]; + _par.group_id = stoul(argv[1]); return true; } if (argc == 3) { size_t pos; - _par.name = argv[1]; + _par.group_id = stoul(argv[1]); _par.server_addr = argv[2]; @@ -188,6 +188,6 @@ bool parseArguments(Parameters& _par, int argc, char* argv[]) return true; } cout << "Usage: " << endl - << argv[0] << " my_name [server_addr:server_port]" << endl; + << argv[0] << " my_group_id [server_addr:server_port]" << endl; return false; } diff --git a/tutorials/mprpc_echo_relay/mprpc_echo_relay_register.hpp b/tutorials/mprpc_echo_relay/mprpc_echo_relay_register.hpp index b40faa29..be946756 100644 --- a/tutorials/mprpc_echo_relay/mprpc_echo_relay_register.hpp +++ b/tutorials/mprpc_echo_relay/mprpc_echo_relay_register.hpp @@ -6,17 +6,20 @@ #include "solid/frame/mprpc/mprpcprotocol_serialization_v3.hpp" struct Register : solid::frame::mprpc::Message { - std::string name; + uint32_t group_id_ = 0; + uint16_t replica_id_ = 0; Register() {} - Register(const std::string& _name) - : name(_name) + Register(uint32_t _group_id, uint16_t _replica_id = 0) + : group_id_(_group_id) + , replica_id_(_replica_id) { } SOLID_REFLECT_V1(_rr, _rthis, _rctx) { - _rr.add(_rthis.name, _rctx, 1, "name"); + _rr.add(_rthis.group_id_, _rctx, 1, "group_id"); + _rr.add(_rthis.replica_id_, _rctx, 2, "replica_id"); } }; diff --git a/tutorials/mprpc_echo_relay/mprpc_echo_relay_server.cpp b/tutorials/mprpc_echo_relay/mprpc_echo_relay_server.cpp index 72dd7047..fb89b2d1 100644 --- a/tutorials/mprpc_echo_relay/mprpc_echo_relay_server.cpp +++ b/tutorials/mprpc_echo_relay/mprpc_echo_relay_server.cpp @@ -64,9 +64,9 @@ int main(int argc, char* argv[]) solid_check(!_rerror); if (_rrecv_msg_ptr) { solid_check(!_rsent_msg_ptr); - solid_log(generic_logger, Info, "recv register request: " << _rrecv_msg_ptr->name); + solid_log(generic_logger, Info, "recv register request: " << _rrecv_msg_ptr->group_id_); - relay_engine.registerConnection(_rctx, std::move(_rrecv_msg_ptr->name)); + relay_engine.registerConnection(_rctx, _rrecv_msg_ptr->group_id_, _rrecv_msg_ptr->replica_id_); ErrorConditionT err = _rctx.service().sendResponse(_rctx.recipientId(), std::move(_rrecv_msg_ptr)); From 06019429100ce59ed6591792b9a943b3bc01f2c0 Mon Sep 17 00:00:00 2001 From: Valentin Palade Date: Mon, 11 Mar 2024 20:20:57 +0200 Subject: [PATCH 03/13] relay_multicast: more log information --- solid/frame/mprpc/src/mprpcmessagewriter.cpp | 6 ++++-- solid/frame/mprpc/test/test_relay_basic.cpp | 2 +- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/solid/frame/mprpc/src/mprpcmessagewriter.cpp b/solid/frame/mprpc/src/mprpcmessagewriter.cpp index 3224ea70..b465ba55 100644 --- a/solid/frame/mprpc/src/mprpcmessagewriter.cpp +++ b/solid/frame/mprpc/src/mprpcmessagewriter.cpp @@ -72,7 +72,7 @@ void MessageWriter::doWriteQueuePushBack(const size_t _msgidx, const int _line) } MessageStub& rmsgstub(message_vec_[_msgidx]); - solid_log(logger, Info, this << " code line = " << _line << " idx = " << _msgidx << " is_relay = " << rmsgstub.isRelay()); + solid_log(logger, Info, this << " code line = " << _line << " idx = " << _msgidx << " is_relay = " << rmsgstub.isRelay() << " msg = " << rmsgstub.msgbundle_.message_ptr.get()); if (!rmsgstub.isRelay()) { ++write_queue_direct_count_; } @@ -141,7 +141,7 @@ bool MessageWriter::enqueue( order_inner_list_.pushBack(idx); doWriteQueuePushBack(idx, __LINE__); - solid_log(logger, Verbose, "is_relayed = " << Message::is_relayed(rmsgstub.msgbundle_.message_ptr->flags()) << ' ' << MessageWriterPrintPairT(*this, PrintInnerListsE) << " relay " << rmsgstub.msgbundle_.message_relay_header_ << " " << _rmsgbundle.message_relay_header_); + solid_log(logger, Verbose, "messgeptr = " << rmsgstub.msgbundle_.message_ptr.get() << " is_relayed = " << Message::is_relayed(rmsgstub.msgbundle_.message_ptr->flags()) << ' ' << MessageWriterPrintPairT(*this, PrintInnerListsE) << " relay " << rmsgstub.msgbundle_.message_relay_header_ << " " << _rmsgbundle.message_relay_header_); return true; } @@ -676,9 +676,11 @@ char* MessageWriter::doWriteMessageHead( _rsender.context().request_id.unique = rmsgstub.unique_; _rsender.context().message_flags = rmsgstub.msgbundle_.message_flags; if (rmsgstub.msgbundle_.message_relay_header_.has_value()) { + // solid_assert_log(_rsender.context().message_flags.isSet(MessageFlagsE::Relayed), logger, ""<senderRequestId() << " datasz = " << _rrecv_msg_ptr->str.size()); + solid_dbg(generic_logger, Info, _rctx.recipientId() << ' ' << _rrecv_msg_ptr.get() << " received message with id on sender " << _rrecv_msg_ptr->senderRequestId() << " datasz = " << _rrecv_msg_ptr->str.size() << " isRelay = " << _rrecv_msg_ptr->isRelayed()); if (!_rrecv_msg_ptr->check()) { solid_assert(false); From c22a9e553ccc16088ad4ec3248f3ce32de6fe73f Mon Sep 17 00:00:00 2001 From: Valentin Palade Date: Fri, 22 Mar 2024 18:52:05 +0200 Subject: [PATCH 04/13] threadpool: experimenting --- solid/utility/common.hpp | 2 +- solid/utility/src/utility.cpp | 3 + solid/utility/test/CMakeLists.txt | 1 + solid/utility/threadpool.hpp | 98 +++++++++++++++++++++---------- 4 files changed, 72 insertions(+), 32 deletions(-) diff --git a/solid/utility/common.hpp b/solid/utility/common.hpp index fbadf69e..b1235621 100644 --- a/solid/utility/common.hpp +++ b/solid/utility/common.hpp @@ -162,7 +162,7 @@ inline uint64_t bit_revert(const uint64_t _v) struct InvalidIndex { template - operator SizeT() const + constexpr operator SizeT() const { return (std::numeric_limits::max)(); } diff --git a/solid/utility/src/utility.cpp b/solid/utility/src/utility.cpp index 59f2179b..b55661b7 100644 --- a/solid/utility/src/utility.cpp +++ b/solid/utility/src/utility.cpp @@ -200,6 +200,9 @@ std::ostream& ThreadPoolStatistic::print(std::ostream& _ros) const _ros << " pop_one_wait_popping_count = " << pop_one_wait_popping_count_.load(std::memory_order_relaxed); _ros << " push_all_wait_lock_count = " << push_all_wait_lock_count_.load(std::memory_order_relaxed); _ros << " push_all_wait_pushing_count = " << push_all_wait_pushing_count_.load(std::memory_order_relaxed); + _ros << " push_one_latency_max_us = " << push_one_latency_max_us_.load(std::memory_order_relaxed); + _ros << " push_one_latency_min_us = " << push_one_latency_min_us_.load(std::memory_order_relaxed); + _ros << " push_one_latency_avg_us = " << push_one_latency_sum_us_.load(std::memory_order_relaxed)/(push_one_count_[0].load(std::memory_order_relaxed) + push_one_count_[1].load(std::memory_order_relaxed)); return _ros; } void ThreadPoolStatistic::clear() {} diff --git a/solid/utility/test/CMakeLists.txt b/solid/utility/test/CMakeLists.txt index afb951c9..835317c1 100644 --- a/solid/utility/test/CMakeLists.txt +++ b/solid/utility/test/CMakeLists.txt @@ -31,6 +31,7 @@ set( ThreadPoolTestSuite test_threadpool_basic.cpp test_threadpool_chain.cpp test_threadpool_pattern.cpp + test_threadpool_batch.cpp #test_threadpool_try.cpp ) diff --git a/solid/utility/threadpool.hpp b/solid/utility/threadpool.hpp index 789fb1cb..bbd22ada 100644 --- a/solid/utility/threadpool.hpp +++ b/solid/utility/threadpool.hpp @@ -50,6 +50,9 @@ struct ThreadPoolStatistic : solid::Statistic { std::atomic_uint_fast64_t pop_one_wait_popping_count_ = {0}; std::atomic_uint_fast64_t push_all_wait_lock_count_ = {0}; std::atomic_uint_fast64_t push_all_wait_pushing_count_ = {0}; + std::atomic_uint_fast64_t push_one_latency_min_us_ = {0}; + std::atomic_uint_fast64_t push_one_latency_max_us_ = {0}; + std::atomic_uint_fast64_t push_one_latency_sum_us_ = {0}; ThreadPoolStatistic(); @@ -98,9 +101,13 @@ struct ThreadPoolStatistic : solid::Statistic { solid_statistic_max(max_consume_all_count_, _count); } - void pushOne(const bool _with_context) + void pushOne(const bool _with_context, const uint64_t _duration_us) { ++push_one_count_[_with_context]; + + solid_statistic_min(push_one_latency_min_us_, _duration_us); + solid_statistic_max(push_one_latency_max_us_, _duration_us); + push_one_latency_sum_us_ += _duration_us; } void pushAll(const bool _should_wake) { @@ -239,7 +246,7 @@ class TaskData { std::aligned_storage_t data_; public: - Task& task() + Task& task() noexcept { return *std::launder(reinterpret_cast(&data_)); } @@ -372,7 +379,8 @@ class ThreadPool : NonCopyable { }; private: - enum struct LockE : uint8_t { + using AtomicLockT = std::atomic_size_t; + enum struct LockE : AtomicLockT::value_type { Empty = 0, Pushing, Filled, @@ -383,7 +391,7 @@ class ThreadPool : NonCopyable { Stop, Wake, }; - struct OneStub : TaskData { + struct OneStub { #if defined(__cpp_lib_atomic_wait) std::atomic_flag pushing_ = ATOMIC_FLAG_INIT; std::atomic_flag popping_ = ATOMIC_FLAG_INIT; @@ -391,11 +399,27 @@ class ThreadPool : NonCopyable { std::atomic_bool pushing_ = {false}; std::atomic_bool popping_ = {false}; #endif - std::atomic_uint8_t lock_ = {to_underlying(LockE::Empty)}; - std::uint8_t event_ = {to_underlying(EventE::Fill)}; - ContextStub* pcontext_ = nullptr; - uint64_t all_id_ = 0; - uint64_t context_produce_id_ = 0; + AtomicLockT lock_ = {to_underlying(LockE::Empty)}; + std::uint8_t event_ = {to_underlying(EventE::Fill)}; + ContextStub* pcontext_ = nullptr; + uint64_t all_id_ = 0; + uint64_t context_produce_id_ = 0; + std::unique_ptr> data_ptr_ = std::make_unique>(); + + auto& task() noexcept + { + return data_ptr_->task(); + } + template + void task(T&& _rt) + { + data_ptr_->task(std::forward(_rt)); + } + + void destroy() + { + data_ptr_->destroy(); + } void clear() noexcept { @@ -415,9 +439,10 @@ class ThreadPool : NonCopyable { #endif if (!already_pushing) { // wait for lock to be 0. - uint8_t value = to_underlying(LockE::Empty); + std::underlying_type_t value = to_underlying(LockE::Empty); if (!lock_.compare_exchange_weak(value, to_underlying(LockE::Pushing))) { + throw false; do { std::atomic_wait(&lock_, value); value = to_underlying(LockE::Empty); @@ -436,11 +461,13 @@ class ThreadPool : NonCopyable { } } - void notifyWhilePushOne() noexcept + void notifyWhilePushOne(std::chrono::time_point const& _start, uint64_t& _rduration) noexcept { + using namespace std::chrono; event_ = to_underlying(EventE::Fill); lock_.store(to_underlying(LockE::Filled)); std::atomic_notify_one(&lock_); + _rduration = duration_cast(steady_clock::now() - _start).count(); #if defined(__cpp_lib_atomic_wait) pushing_.clear(std::memory_order_release); pushing_.notify_one(); @@ -503,15 +530,14 @@ class ThreadPool : NonCopyable { #endif if (!already_popping) { // wait for lock to be 1 or 2. - uint8_t value = to_underlying(LockE::Filled); + std::underlying_type_t value; - if (!lock_.compare_exchange_weak(value, to_underlying(LockE::Popping))) { + if (!lock_.compare_exchange_weak(value= to_underlying(LockE::Filled), to_underlying(LockE::Popping))) { do { if (!_try_consume_an_all_fnc(&lock_, _all_fnc, std::forward(_args)...)) { std::atomic_wait(&lock_, value); } - value = to_underlying(LockE::Filled); - } while (!lock_.compare_exchange_weak(value, to_underlying(LockE::Popping))); + } while (!lock_.compare_exchange_weak(value= to_underlying(LockE::Filled), to_underlying(LockE::Popping))); _rstats.popOneWaitLock(); } return static_cast(event_); @@ -546,7 +572,7 @@ class ThreadPool : NonCopyable { #else std::atomic_bool pushing_ = {false}; #endif - std::atomic_uint8_t lock_ = {to_underlying(LockE::Empty)}; + AtomicLockT lock_ = {to_underlying(LockE::Empty)}; std::atomic_uint32_t use_count_ = {0}; std::atomic_uint64_t id_ = {0}; @@ -560,7 +586,7 @@ class ThreadPool : NonCopyable { const bool already_pushing = !pushing_.compare_exchange_strong(expected, true, std::memory_order_acquire); #endif if (!already_pushing) { - uint8_t value = to_underlying(LockE::Empty); + std::underlying_type_t value = to_underlying(LockE::Empty); if (!lock_.compare_exchange_weak(value, to_underlying(LockE::Pushing))) { do { @@ -623,8 +649,9 @@ class ThreadPool : NonCopyable { std::unique_ptr tasks_; } all_; /* alignas(hardware_constructive_interference_size) */ struct { - size_t capacity_{0}; - std::unique_ptr tasks_; + size_t capacity_{0}; + //std::unique_ptr tasks_; + std::vector> tasks_; } one_; Stats statistic_; alignas(hardware_destructive_interference_size) std::atomic_size_t push_one_index_{0}; @@ -727,7 +754,7 @@ class ThreadPool : NonCopyable { template < class AllFnc, typename... Args> - bool tryConsumeAnAllTask(std::atomic_uint8_t* _plock, LocalContext& _rlocal_context, AllFnc& _all_fnc, Args&&... _args); + bool tryConsumeAnAllTask(AtomicLockT* _plock, LocalContext& _rlocal_context, AllFnc& _all_fnc, Args&&... _args); template < class AllFnc, typename... Args> @@ -1019,7 +1046,14 @@ void ThreadPool::doStart( const auto thread_count = _thread_count ? _thread_count : std::thread::hardware_concurrency(); one_.capacity_ = _one_capacity >= thread_count ? _one_capacity : std::max(static_cast(1024), thread_count); +#if 0 one_.tasks_.reset(new OneStubT[one_.capacity_]); +#else + one_.tasks_.resize(one_.capacity_); + for (auto& task : one_.tasks_) { + task = std::make_unique(); + } +#endif all_.capacity_ = _all_capacity ? _all_capacity : 1; all_.tasks_.reset(new AllStubT[all_.capacity_]); @@ -1046,7 +1080,7 @@ void ThreadPool::doStop() } for (size_t i = 0; i < threads_.size(); ++i) { - auto& rstub = one_.tasks_[pushOneIndex()]; + auto& rstub = *one_.tasks_[pushOneIndex()]; rstub.waitWhileStop(statistic_); rstub.notifyWhileStop(); @@ -1072,13 +1106,13 @@ void ThreadPool::doRun( while (true) { const size_t index = popOneIndex(); - auto& rstub = one_.tasks_[index]; + auto& rstub = *one_.tasks_[index]; uint64_t local_one_context_count = 0; const auto event = rstub.waitWhilePop( statistic_, [this, &local_context]( - std::atomic_uint8_t* _plock, - AllFnc& _all_fnc, + AtomicLockT* _plock, + AllFnc& _all_fnc, Args&&... _args) { // we need to make sure that, after processing an all_task, no new one_task can have // the all_id less than the all task that we have just processed. @@ -1174,7 +1208,7 @@ template template < class AllFnc, typename... Args> -bool ThreadPool::tryConsumeAnAllTask(std::atomic_uint8_t* _plock, LocalContext& _rlocal_context, AllFnc& _all_fnc, Args&&... _args) +bool ThreadPool::tryConsumeAnAllTask(AtomicLockT* _plock, LocalContext& _rlocal_context, AllFnc& _all_fnc, Args&&... _args) { auto& rstub = all_.tasks_[_rlocal_context.next_all_id_ % all_.capacity_]; if (rstub.isFilled(_rlocal_context.next_all_id_)) { @@ -1235,8 +1269,10 @@ template template void ThreadPool::doPushOne(Tsk&& _task, ContextStub* _pctx) { + using namespace std::chrono; + const auto start = steady_clock::now(); const auto index = pushOneIndex(); - auto& rstub = one_.tasks_[index]; + auto& rstub = *one_.tasks_[index]; rstub.waitWhilePushOne(statistic_); @@ -1248,10 +1284,10 @@ void ThreadPool::doPushOne(Tsk&& _task, ContextStub* _p _pctx->acquire(); rstub.context_produce_id_ = _pctx->produce_id_.fetch_add(1); } - - rstub.notifyWhilePushOne(); - - statistic_.pushOne(_pctx != nullptr); + uint64_t duration; + rstub.notifyWhilePushOne(start, duration); + // const uint64_t duration = duration_cast(steady_clock::now() - start).count(); + statistic_.pushOne(_pctx != nullptr, duration); } //----------------------------------------------------------------------------- // NOTE: @@ -1277,7 +1313,7 @@ void ThreadPool::doPushAll(Tsk&& _task) if (should_wake_threads) { for (size_t i = 0; i < threads_.size(); ++i) { - auto& rstub = one_.tasks_[pushOneIndex()]; + auto& rstub = *one_.tasks_[pushOneIndex()]; rstub.waitWhilePushAll(statistic_); From e6459684fe7732c211922d685f96afa763e88964 Mon Sep 17 00:00:00 2001 From: Valentin Palade Date: Sat, 23 Mar 2024 10:13:00 +0200 Subject: [PATCH 05/13] threadpool: keep experimenting --- solid/utility/test/test_threadpool_batch.cpp | 300 +++++++++++++++++++ 1 file changed, 300 insertions(+) create mode 100644 solid/utility/test/test_threadpool_batch.cpp diff --git a/solid/utility/test/test_threadpool_batch.cpp b/solid/utility/test/test_threadpool_batch.cpp new file mode 100644 index 00000000..17e9a55d --- /dev/null +++ b/solid/utility/test/test_threadpool_batch.cpp @@ -0,0 +1,300 @@ +#include "solid/system/exception.hpp" +#include "solid/system/log.hpp" +#include "solid/system/statistic.hpp" +#include "solid/utility/function.hpp" +#include "solid/utility/threadpool.hpp" +#include +#include +#include +#include + +using namespace solid; +using namespace std; + +namespace { +const LoggerT logger("test"); +struct Context { + atomic min_{InvalidSize{}}; + atomic max_{0}; + atomic sum_{0}; + atomic count_{0}; + + ostream& print(ostream& _ros) const + { + const auto avg = sum_ / count_; + _ros << "min " << min_ << " max " << max_ << " avg " << avg; + return _ros; + } +}; + +ostream& operator<<(ostream& _ros, const Context& _rctx) +{ + return _rctx.print(_ros); +} + +constexpr size_t one_task_size = 64; + +using CallPoolT = ThreadPool, Function>; +struct Entry { + CallPoolT::SynchronizationContextT ctx_; +}; + +constexpr size_t thread_count = 10; + +#ifdef SOLID_ON_LINUX +vector isolcpus = {3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19}; + +void set_current_thread_affinity() +{ + if (std::thread::hardware_concurrency() < (thread_count + isolcpus[0])) { + return; + } + static std::atomic crtCore(0); + + const int isolCore = isolcpus[crtCore.fetch_add(1) % isolcpus.size()]; + cpu_set_t cpuset; + CPU_ZERO(&cpuset); + CPU_SET(isolCore, &cpuset); + int rc = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset); + // solid_check(rc == 0); + (void)rc; +} +#else +void set_current_thread_affinity() +{ +} +#endif + +} // namespace + +int test_threadpool_batch(int argc, char* argv[]) +{ + solid::log_start(std::cerr, {".*:EWXS", "test:VIEWS"}); + int wait_seconds = 500; + size_t entry_count = 300; +#if 0 + auto lambda = [&]() { + Context ctx; + { + solid_log(logger, Verbose, "start"); + CallPoolT wp{ + thread_count, 12000, 100, [](const size_t, Context&) {}, [](const size_t, Context& _rctx) {}, + std::ref(ctx)}; + solid_log(logger, Verbose, "create contexts"); + vector entries; + for (size_t i = 0; i < entry_count; ++i) { + entries.emplace_back(wp.createSynchronizationContext()); + } + + solid_log(logger, Verbose, "wp started"); + uint64_t tmin{InvalidSize{}}; + uint64_t tmax{0}; + uint64_t tsum{0}; + uint64_t tcnt{0}; + for (size_t i = 0; i < 40; ++i) { + auto start = chrono::steady_clock::now(); + + for (size_t j = 0; j < entries.size(); ++j) { + auto& entry{entries[j]}; + auto lambda = [&entry, start, j](Context& _rctx) mutable { + const auto now = chrono::steady_clock::now(); + const uint64_t duration = chrono::duration_cast(now - start).count(); + store_min(_rctx.min_, duration); + store_max(_rctx.max_, duration); + _rctx.sum_ += duration; + ++_rctx.count_; + //this_thread::sleep_for(chrono::microseconds(1)); + }; + static_assert(sizeof(lambda) <= one_task_size); + entry.ctx_.push(std::move(lambda)); + //wp.pushOne(std::move(lambda)); + } + { + const uint64_t duration = chrono::duration_cast(chrono::steady_clock::now() - start).count(); + store_min(tmin, duration); + store_max(tmax, duration); + tsum += duration; + ++tcnt; + } + //this_thread::sleep_for(chrono::milliseconds(100)); + } + solid_log(logger, Verbose, "min " << tmin << " max " << tmax << " avg " << tsum / tcnt << " " << ctx); + solid_log(logger, Statistic, "ThreadPool statistic: " << wp.statistic()); + } + solid_log(logger, Verbose, "after loop"); + }; +#elif 0 + static constexpr size_t stopping = InvalidIndex{}; + static constexpr size_t popping = 1; + static constexpr size_t pushing = 2; + static constexpr size_t empty = 3; + static constexpr size_t filled = 4; + struct /* alignas(std::hardware_destructive_interference_size) */ ThreadContext{ + thread thr_; + atomic_size_t lock_ = {empty}; + size_t value_ = 0; + + void push(){ + size_t value; + while(!lock_.compare_exchange_weak(value = empty, pushing)){ + std::atomic_wait(&lock_, value); + } + + ++value_; + + lock_.store(filled); + std::atomic_notify_one(&lock_); + } + + void stop(){ + size_t value; + while(!lock_.compare_exchange_weak(value = empty, pushing)){ + std::atomic_wait(&lock_, value); + } + + value_ = stopping; + + lock_.store(filled); + std::atomic_notify_one(&lock_); + } + + bool pop(size_t &_expected_value){ + size_t value; + while(!lock_.compare_exchange_weak(value = filled, popping)){ + std::atomic_wait(&lock_, value); + } + + if(value_ == stopping) return false; + + solid_check(value_ == _expected_value); + ++_expected_value; + + lock_.store(empty); + std::atomic_notify_one(&lock_); + + return true; + } + }; + auto lambda = [&]() { + set_current_thread_affinity(); + unique_ptr ctxs{new ThreadContext[thread_count]}; + for(size_t i= 0; i < thread_count;++i){ + auto &ctx = ctxs[i]; + ctx.thr_ = thread( + [](ThreadContext &_rctx){ + set_current_thread_affinity(); + size_t expected_val = 1; + while(_rctx.pop(expected_val)); + }, ref(ctx) + ); + } + uint64_t tmin{InvalidSize{}}; + uint64_t tmax{0}; + uint64_t tsum{0}; + uint64_t tcnt{0}; + for(size_t i = 0; i < 40; ++i){ + const auto start = chrono::steady_clock::now(); + for(size_t j = 0; j < entry_count; ++j){ + auto &rctx = ctxs[j % thread_count]; + rctx.push(); + } + { + const uint64_t duration = chrono::duration_cast(chrono::steady_clock::now() - start).count(); + store_min(tmin, duration); + store_max(tmax, duration); + tsum += duration; + ++tcnt; + } + } + for(size_t i= 0; i < thread_count;++i){ + auto &ctx = ctxs[i]; + ctx.stop(); + ctx.thr_.join(); + } + solid_log(logger, Verbose, "min " << tmin << " max " << tmax << " avg " << tsum / tcnt); + }; +#else + static constexpr size_t stopping = InvalidIndex{}; + static constexpr size_t popping = 1; + static constexpr size_t pushing = 2; + static constexpr size_t empty = 3; + static constexpr size_t filled = 4; + struct alignas(std::hardware_destructive_interference_size) ThreadContext{ + thread thr_; + binary_semaphore push_sem_{1}; + binary_semaphore pop_sem_{0}; + + size_t value_ = 0; + + void push(){ + push_sem_.acquire(); + + ++value_; + + pop_sem_.release(); + } + + void stop(){ + push_sem_.acquire(); + value_ = stopping; + pop_sem_.release(); + } + + bool pop(size_t &_expected_value){ + pop_sem_.acquire(); + if(value_ == stopping) return false; + + solid_check(value_ == _expected_value); + ++_expected_value; + push_sem_.release(); + return true; + } + }; + auto lambda = [&]() { + set_current_thread_affinity(); + unique_ptr ctxs{new ThreadContext[thread_count]}; + for(size_t i= 0; i < thread_count;++i){ + auto &ctx = ctxs[i]; + ctx.thr_ = thread( + [](ThreadContext &_rctx){ + set_current_thread_affinity(); + size_t expected_val = 1; + while(_rctx.pop(expected_val)); + }, ref(ctx) + ); + } + uint64_t tmin{InvalidSize{}}; + uint64_t tmax{0}; + uint64_t tsum{0}; + uint64_t tcnt{0}; + for(size_t i = 0; i < 40; ++i){ + const auto start = chrono::steady_clock::now(); + for(size_t j = 0; j < entry_count; ++j){ + auto &rctx = ctxs[j % thread_count]; + rctx.push(); + } + { + const uint64_t duration = chrono::duration_cast(chrono::steady_clock::now() - start).count(); + store_min(tmin, duration); + store_max(tmax, duration); + tsum += duration; + ++tcnt; + } + } + for(size_t i= 0; i < thread_count;++i){ + auto &ctx = ctxs[i]; + ctx.stop(); + ctx.thr_.join(); + } + solid_log(logger, Verbose, "min " << tmin << " max " << tmax << " avg " << tsum / tcnt); + }; +#endif + auto fut = async(launch::async, lambda); + if (fut.wait_for(chrono::seconds(wait_seconds)) != future_status::ready) { + solid_throw(" Test is taking too long - waited " << wait_seconds << " secs"); + } + fut.get(); + solid_log(logger, Verbose, "after async wait"); + + return 0; +} \ No newline at end of file From 4f72c7756fcf157126d6e9d6c58d975aab315280 Mon Sep 17 00:00:00 2001 From: Palade Valentin Date: Thu, 11 Apr 2024 19:38:57 +0530 Subject: [PATCH 06/13] Update threadpool.hpp - using spin while wait --- solid/utility/threadpool.hpp | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/solid/utility/threadpool.hpp b/solid/utility/threadpool.hpp index bbd22ada..cb76ffb4 100644 --- a/solid/utility/threadpool.hpp +++ b/solid/utility/threadpool.hpp @@ -349,6 +349,7 @@ struct LocalContext { template class ThreadPool : NonCopyable { public: + static constexpr size_t spin_count = 10000; struct ContextStub { using TaskQueueT = TaskList; std::atomic_size_t use_count_{1}; @@ -442,9 +443,12 @@ class ThreadPool : NonCopyable { std::underlying_type_t value = to_underlying(LockE::Empty); if (!lock_.compare_exchange_weak(value, to_underlying(LockE::Pushing))) { - throw false; + auto spin = spin_count; do { - std::atomic_wait(&lock_, value); + if(!spin--){ + spin = 0; + std::atomic_wait(&lock_, value); + } value = to_underlying(LockE::Empty); } while (!lock_.compare_exchange_weak(value, to_underlying(LockE::Pushing))); _rstats.pushOneWaitLock(); @@ -533,8 +537,10 @@ class ThreadPool : NonCopyable { std::underlying_type_t value; if (!lock_.compare_exchange_weak(value= to_underlying(LockE::Filled), to_underlying(LockE::Popping))) { + auto spin = spin_count; do { - if (!_try_consume_an_all_fnc(&lock_, _all_fnc, std::forward(_args)...)) { + if (!_try_consume_an_all_fnc(&lock_, _all_fnc, std::forward(_args)...) && !spin--) { + spin = 0; std::atomic_wait(&lock_, value); } } while (!lock_.compare_exchange_weak(value= to_underlying(LockE::Filled), to_underlying(LockE::Popping))); @@ -589,8 +595,12 @@ class ThreadPool : NonCopyable { std::underlying_type_t value = to_underlying(LockE::Empty); if (!lock_.compare_exchange_weak(value, to_underlying(LockE::Pushing))) { + auto spin = spin_count; do { - std::atomic_wait(&lock_, value); + if(!spin--){ + spin = 0; + std::atomic_wait(&lock_, value); + } value = to_underlying(LockE::Empty); } while (!lock_.compare_exchange_weak(value, to_underlying(LockE::Pushing))); _rstats.pushAllWaitLock(); @@ -1335,4 +1345,4 @@ typename ThreadPool::ContextStub* ThreadPool Date: Tue, 4 Jun 2024 20:18:30 +0300 Subject: [PATCH 07/13] threadpool: simplified and fixed thread notify wake mechanism --- .../frame/mprpc_echo/example_mprpc_echo.cpp | 2 +- solid/frame/aio/test/test_echo_tcp_stress.cpp | 6 +- solid/frame/mprpc/src/mprpclistener.cpp | 3 +- .../mprpc/test/test_clientserver_topic.cpp | 4 +- solid/frame/src/manager.cpp | 12 +- solid/system/src/stacktrace_windows.cpp | 3 +- solid/utility/src/utility.cpp | 3 +- solid/utility/test/test_collapse.cpp | 4 +- solid/utility/test/test_threadpool_batch.cpp | 337 +++++++++++++---- solid/utility/threadpool.hpp | 344 ++++++++---------- tutorials/aio_echo/aio_echo_server.cpp | 6 +- 11 files changed, 428 insertions(+), 296 deletions(-) diff --git a/examples/frame/mprpc_echo/example_mprpc_echo.cpp b/examples/frame/mprpc_echo/example_mprpc_echo.cpp index e98c33b1..c26bcafb 100644 --- a/examples/frame/mprpc_echo/example_mprpc_echo.cpp +++ b/examples/frame/mprpc_echo/example_mprpc_echo.cpp @@ -211,7 +211,7 @@ bool restart( reflection::v1::metadata::factory, [&](auto& _rmap) { _rmap.template registerMessage(1, "FirstMessage", - []( + []( frame::mprpc::ConnectionContext& _rctx, frame::mprpc::MessagePointerT& _rsend_msg, frame::mprpc::MessagePointerT& _rrecv_msg, diff --git a/solid/frame/aio/test/test_echo_tcp_stress.cpp b/solid/frame/aio/test/test_echo_tcp_stress.cpp index 5e00b430..5734534a 100644 --- a/solid/frame/aio/test/test_echo_tcp_stress.cpp +++ b/solid/frame/aio/test/test_echo_tcp_stress.cpp @@ -695,8 +695,7 @@ void Listener::onAccept(frame::aio::ReactorContext& _rctx, SocketDevice& _rsd) break; } --repeatcnt; - } while (repeatcnt != 0u && sock.accept( - _rctx, [this](frame::aio::ReactorContext& _rctx, SocketDevice& _rsd) { onAccept(_rctx, _rsd); }, _rsd)); + } while (repeatcnt != 0u && sock.accept(_rctx, [this](frame::aio::ReactorContext& _rctx, SocketDevice& _rsd) { onAccept(_rctx, _rsd); }, _rsd)); if (repeatcnt == 0u) { sock.postAccept( @@ -1004,8 +1003,7 @@ void Listener::onAccept(frame::aio::ReactorContext& _rctx, SocketDevice& _rsd) break; } --repeatcnt; - } while (repeatcnt != 0u && sock.accept( - _rctx, [this](frame::aio::ReactorContext& _rctx, SocketDevice& _rsd) { onAccept(_rctx, _rsd); }, _rsd)); + } while (repeatcnt != 0u && sock.accept(_rctx, [this](frame::aio::ReactorContext& _rctx, SocketDevice& _rsd) { onAccept(_rctx, _rsd); }, _rsd)); if (repeatcnt == 0u) { sock.postAccept( diff --git a/solid/frame/mprpc/src/mprpclistener.cpp b/solid/frame/mprpc/src/mprpclistener.cpp index b5b56545..0186b184 100644 --- a/solid/frame/mprpc/src/mprpclistener.cpp +++ b/solid/frame/mprpc/src/mprpclistener.cpp @@ -71,8 +71,7 @@ void Listener::onAccept(frame::aio::ReactorContext& _rctx, SocketDevice& _rsd) } --repeatcnt; } while ( - repeatcnt != 0u && sock.accept( - _rctx, [this](frame::aio::ReactorContext& _rctx, SocketDevice& _rsd) { onAccept(_rctx, _rsd); }, _rsd)); + repeatcnt != 0u && sock.accept(_rctx, [this](frame::aio::ReactorContext& _rctx, SocketDevice& _rsd) { onAccept(_rctx, _rsd); }, _rsd)); if (repeatcnt == 0u) { sock.postAccept( diff --git a/solid/frame/mprpc/test/test_clientserver_topic.cpp b/solid/frame/mprpc/test/test_clientserver_topic.cpp index 3874ed25..856939e2 100644 --- a/solid/frame/mprpc/test/test_clientserver_topic.cpp +++ b/solid/frame/mprpc/test/test_clientserver_topic.cpp @@ -3,9 +3,9 @@ #include #include #include -#ifdef __cpp_lib_ranges +// #ifdef __cpp_lib_ranges #include -#endif +// #endif #include "solid/frame/mprpc/mprpcsocketstub_openssl.hpp" diff --git a/solid/frame/src/manager.cpp b/solid/frame/src/manager.cpp index 52c9d6cf..94c7cac6 100644 --- a/solid/frame/src/manager.cpp +++ b/solid/frame/src/manager.cpp @@ -515,12 +515,12 @@ Manager::Manager( const size_t _actor_mutex_count, const size_t _chunk_mutex_count) : pimpl_( - _service_capacity, - _actor_capacity, - _actor_bucket_size == 0 ? (memory_page_size() - sizeof(ActorChunk) + sizeof(ActorStub)) / sizeof(ActorStub) : _actor_bucket_size, - _service_mutex_count == 0 ? _service_capacity : _service_mutex_count, - _actor_mutex_count == 0 ? 1024 : _actor_mutex_count, - _chunk_mutex_count == 0 ? 1024 : _chunk_mutex_count) + _service_capacity, + _actor_capacity, + _actor_bucket_size == 0 ? (memory_page_size() - sizeof(ActorChunk) + sizeof(ActorStub)) / sizeof(ActorStub) : _actor_bucket_size, + _service_mutex_count == 0 ? _service_capacity : _service_mutex_count, + _actor_mutex_count == 0 ? 1024 : _actor_mutex_count, + _chunk_mutex_count == 0 ? 1024 : _chunk_mutex_count) { solid_log(frame_logger, Verbose, "" << this); } diff --git a/solid/system/src/stacktrace_windows.cpp b/solid/system/src/stacktrace_windows.cpp index 9fa19dbd..1ad678a4 100644 --- a/solid/system/src/stacktrace_windows.cpp +++ b/solid/system/src/stacktrace_windows.cpp @@ -30,8 +30,7 @@ #define g3_MAP_PAIR_STRINGIFY(x) \ { \ - x, #x \ - } + x, #x} namespace { thread_local size_t g_thread_local_recursive_crash_check = 0; diff --git a/solid/utility/src/utility.cpp b/solid/utility/src/utility.cpp index b55661b7..1fd43b44 100644 --- a/solid/utility/src/utility.cpp +++ b/solid/utility/src/utility.cpp @@ -202,7 +202,8 @@ std::ostream& ThreadPoolStatistic::print(std::ostream& _ros) const _ros << " push_all_wait_pushing_count = " << push_all_wait_pushing_count_.load(std::memory_order_relaxed); _ros << " push_one_latency_max_us = " << push_one_latency_max_us_.load(std::memory_order_relaxed); _ros << " push_one_latency_min_us = " << push_one_latency_min_us_.load(std::memory_order_relaxed); - _ros << " push_one_latency_avg_us = " << push_one_latency_sum_us_.load(std::memory_order_relaxed)/(push_one_count_[0].load(std::memory_order_relaxed) + push_one_count_[1].load(std::memory_order_relaxed)); + const auto sum_ones = push_one_count_[0].load(std::memory_order_relaxed) + push_one_count_[1].load(std::memory_order_relaxed); + _ros << " push_one_latency_avg_us = " << sum_ones ? push_one_latency_sum_us_.load(std::memory_order_relaxed) / sum_ones : 0; return _ros; } void ThreadPoolStatistic::clear() {} diff --git a/solid/utility/test/test_collapse.cpp b/solid/utility/test/test_collapse.cpp index 8ea93fcd..9c6c7eb2 100644 --- a/solid/utility/test/test_collapse.cpp +++ b/solid/utility/test/test_collapse.cpp @@ -54,7 +54,7 @@ using SharedMessageT = IntrusivePtr; int test_collapse(int argc, char* argv[]) { - solid::log_start(std::cerr, {".*:VIEWXS"}); + solid::log_start(std::cerr, {".*:EWXS"}); char choice = 'B'; // B = basic, p = speed shared_ptr, b = speed SharedBuffer size_t repeat_count = 100; @@ -138,7 +138,7 @@ int test_collapse(int argc, char* argv[]) p.set_value(std::move(tmp_sm)); } { - if (f.wait_for(chrono::seconds(5)) != future_status::ready) { + if (f.wait_for(chrono::seconds(5000)) != future_status::ready) { solid_throw("Waited for too long"); } sm = f.get(); diff --git a/solid/utility/test/test_threadpool_batch.cpp b/solid/utility/test/test_threadpool_batch.cpp index 17e9a55d..df4fc8a6 100644 --- a/solid/utility/test/test_threadpool_batch.cpp +++ b/solid/utility/test/test_threadpool_batch.cpp @@ -21,7 +21,7 @@ struct Context { ostream& print(ostream& _ros) const { - const auto avg = sum_ / count_; + const auto avg = count_ ? sum_ / count_ : 0; _ros << "min " << min_ << " max " << max_ << " avg " << avg; return _ros; } @@ -42,7 +42,7 @@ struct Entry { constexpr size_t thread_count = 10; #ifdef SOLID_ON_LINUX -vector isolcpus = {3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19}; +vector isolcpus = {/*3, 4, 5, 6,*/ 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19}; void set_current_thread_affinity() { @@ -65,6 +65,25 @@ void set_current_thread_affinity() } #endif +template +void busy_for(const Dur& _dur) +{ + const auto stop = chrono::steady_clock::now() + _dur; + while (chrono::steady_clock::now() < stop) + ; +} +using AtomicCounterT = std::atomic; +using AtomicCounterValueT = AtomicCounterT::value_type; +atomic_size_t push_one_index{0}; +size_t capacity = 1024; + +std::tuple pushOneIndex() noexcept +{ + // return push_one_index_.fetch_add(1) % one_.capacity_; + const auto index = push_one_index.fetch_add(1); + return {index % capacity, (index / capacity) & std::numeric_limits::max()}; +} + } // namespace int test_threadpool_batch(int argc, char* argv[]) @@ -72,8 +91,30 @@ int test_threadpool_batch(int argc, char* argv[]) solid::log_start(std::cerr, {".*:EWXS", "test:VIEWS"}); int wait_seconds = 500; size_t entry_count = 300; -#if 0 - auto lambda = [&]() { + size_t repeat_count = 1000000; + { + vector cnt_vec(capacity, 0); + for (size_t i = 0; i < (capacity * std::numeric_limits::max() + capacity); ++i) { + const auto [index, counter] = pushOneIndex(); + solid_check(cnt_vec[index] == counter, "" << (int)cnt_vec[index] << " != " << (int)counter << " index = " << index << " i = " << i); + ++cnt_vec[index]; + } + + push_one_index = std::numeric_limits::max() - capacity + 1; + + for (auto& rv : cnt_vec) { + rv = 238; + } + + for (size_t i = 0; i < (capacity * std::numeric_limits::max() + capacity); ++i) { + const auto [index, counter] = pushOneIndex(); + solid_check(cnt_vec[index] == counter, "" << (int)cnt_vec[index] << " != " << (int)counter << " index = " << index << " i = " << i); + ++cnt_vec[index]; + } + } + +#if 1 + auto lambda = [&]() { Context ctx; { solid_log(logger, Verbose, "start"); @@ -103,11 +144,11 @@ int test_threadpool_batch(int argc, char* argv[]) store_max(_rctx.max_, duration); _rctx.sum_ += duration; ++_rctx.count_; - //this_thread::sleep_for(chrono::microseconds(1)); + // this_thread::sleep_for(chrono::microseconds(1)); }; static_assert(sizeof(lambda) <= one_task_size); entry.ctx_.push(std::move(lambda)); - //wp.pushOne(std::move(lambda)); + // wp.pushOne(std::move(lambda)); } { const uint64_t duration = chrono::duration_cast(chrono::steady_clock::now() - start).count(); @@ -116,7 +157,7 @@ int test_threadpool_batch(int argc, char* argv[]) tsum += duration; ++tcnt; } - //this_thread::sleep_for(chrono::milliseconds(100)); + // this_thread::sleep_for(chrono::milliseconds(100)); } solid_log(logger, Verbose, "min " << tmin << " max " << tmax << " avg " << tsum / tcnt << " " << ctx); solid_log(logger, Statistic, "ThreadPool statistic: " << wp.statistic()); @@ -124,48 +165,198 @@ int test_threadpool_batch(int argc, char* argv[]) solid_log(logger, Verbose, "after loop"); }; #elif 0 - static constexpr size_t stopping = InvalidIndex{}; - static constexpr size_t popping = 1; - static constexpr size_t pushing = 2; - static constexpr size_t empty = 3; - static constexpr size_t filled = 4; - struct /* alignas(std::hardware_destructive_interference_size) */ ThreadContext{ - thread thr_; - atomic_size_t lock_ = {empty}; - size_t value_ = 0; - - void push(){ + // clang-format off + vector busy_cons_vec{ + 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, + 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, + 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, + 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, + 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, + 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, + 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, + 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, + 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, + 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, + 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, + 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, + 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, + 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, + 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, + 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, + 30000, 30000, 30000, 30000, 30000, 30000, 30000, 30000, 30000, 30000, + 30000, 30000, 30000, 30000, 30000, 30000, 30000, 30000, 30000, 30000, + 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 30000, 30000, 30000, 30000, 30000, 30000, 30000, 30000, 30000, 30000, + 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 30000, 30000, 30000, 30000, 30000, 30000, 30000, 30000, 30000, 30000, + 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, + 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, + 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, + 30000, 30000, 30000, 30000, 30000, 30000, 30000, 30000, 30000, 30000, + 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 30000, 30000, 30000, 30000, 30000, 30000, 30000, 30000, 30000, 30000, + 30000, 30000, 30000, 30000, 30000, 30000, 30000, 30000, 30000, 30000, + 30000, 30000, 30000, 30000, 30000, 30000, 30000, 30000, 30000, 30000, + 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, + 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, + 30000, 30000, 30000, 30000, 30000, 30000, 30000, 30000, 30000, 30000, + 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, + 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, + 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, + 30000, 30000, 30000, 30000, 30000, 30000, 30000, 30000, 30000, 30000, + 30000, 30000, 30000, 30000, 30000, 30000, 30000, 30000, 30000, 30000, + 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, + 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, + 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, + }; + vector busy_prod_vec{ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, + 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, + }; + // clang-format on + auto lambda = [&]() { + Context ctx; + { + solid_log(logger, Verbose, "start"); + CallPoolT wp{ + thread_count, 12000, 100, [](const size_t, Context&) {}, [](const size_t, Context& _rctx) {}, + std::ref(ctx)}; + solid_log(logger, Verbose, "create contexts"); + vector entries; + for (size_t i = 0; i < entry_count; ++i) { + entries.emplace_back(wp.createSynchronizationContext()); + } + + solid_log(logger, Verbose, "wp started"); + uint64_t tmin{InvalidSize{}}; + uint64_t tmax{0}; + uint64_t tsum{0}; + uint64_t tcnt{0}; + uint64_t prod_dur_ns = 0; + uint64_t cons_dur_ns = 0; + for (size_t i = 0; i < repeat_count; ++i) { + const auto prod_ns = busy_prod_vec[i % busy_prod_vec.size()]; + busy_for(std::chrono::nanoseconds(prod_ns)); + prod_dur_ns += prod_ns; + + auto start = chrono::steady_clock::now(); + auto& entry{entries[i % entries.size()]}; + const auto busy_ns = std::chrono::nanoseconds(busy_cons_vec[i % busy_cons_vec.size()]); + cons_dur_ns += busy_ns.count(); + auto lambda = [&entry, start, busy_ns](Context& _rctx) mutable { + const auto now = chrono::steady_clock::now(); + const uint64_t duration = chrono::duration_cast(now - start).count(); + store_min(_rctx.min_, duration); + store_max(_rctx.max_, duration); + _rctx.sum_ += duration; + ++_rctx.count_; + // this_thread::sleep_for(chrono::microseconds(1)); + busy_for(busy_ns); + }; + entry.ctx_.push(std::move(lambda)); + { + const uint64_t duration = chrono::duration_cast(chrono::steady_clock::now() - start).count(); + store_min(tmin, duration); + store_max(tmax, duration); + tsum += duration; + ++tcnt; + } + // this_thread::sleep_for(chrono::milliseconds(100)); + } + + // this_thread::sleep_for(chrono::seconds(100)); + chrono::microseconds prod_dur = chrono::duration_cast(chrono::nanoseconds(prod_dur_ns)); + chrono::microseconds cons_dur = chrono::duration_cast(chrono::nanoseconds(cons_dur_ns)); + solid_log(logger, Verbose, "min " << tmin << " max " << tmax << " avg " << tsum / tcnt << " " << ctx << " prod_dur(us): " << prod_dur.count() << " cons_dur(us): " << cons_dur.count()); + solid_log(logger, Statistic, "ThreadPool statistic: " << wp.statistic()); + } + solid_log(logger, Verbose, "after loop"); + }; +#elif 0 + using LockT = atomic; + static constexpr LockT::value_type stopping = InvalidIndex{}; + static constexpr LockT::value_type popping = 1; + static constexpr LockT::value_type pushing = 2; + static constexpr LockT::value_type empty = 3; + static constexpr LockT::value_type filled = 4; + struct /* alignas(std::hardware_destructive_interference_size) */ ThreadContext { + thread thr_; + atomic_size_t lock_ = {empty}; + size_t value_ = 0; + + void push() + { size_t value; - while(!lock_.compare_exchange_weak(value = empty, pushing)){ + while (!lock_.compare_exchange_weak(value = empty, pushing)) { std::atomic_wait(&lock_, value); } - + ++value_; lock_.store(filled); std::atomic_notify_one(&lock_); } - void stop(){ + void stop() + { size_t value; - while(!lock_.compare_exchange_weak(value = empty, pushing)){ + while (!lock_.compare_exchange_weak(value = empty, pushing)) { std::atomic_wait(&lock_, value); } - + value_ = stopping; lock_.store(filled); std::atomic_notify_one(&lock_); } - bool pop(size_t &_expected_value){ + bool pop(size_t& _expected_value) + { size_t value; - while(!lock_.compare_exchange_weak(value = filled, popping)){ + while (!lock_.compare_exchange_weak(value = filled, popping)) { std::atomic_wait(&lock_, value); } - - if(value_ == stopping) return false; - + + if (value_ == stopping) + return false; + solid_check(value_ == _expected_value); ++_expected_value; @@ -175,27 +366,28 @@ int test_threadpool_batch(int argc, char* argv[]) return true; } }; - auto lambda = [&]() { + auto lambda = [&]() { set_current_thread_affinity(); unique_ptr ctxs{new ThreadContext[thread_count]}; - for(size_t i= 0; i < thread_count;++i){ - auto &ctx = ctxs[i]; - ctx.thr_ = thread( - [](ThreadContext &_rctx){ + for (size_t i = 0; i < thread_count; ++i) { + auto& ctx = ctxs[i]; + ctx.thr_ = thread( + [](ThreadContext& _rctx) { set_current_thread_affinity(); size_t expected_val = 1; - while(_rctx.pop(expected_val)); - }, ref(ctx) - ); + while (_rctx.pop(expected_val)) + ; + }, + ref(ctx)); } uint64_t tmin{InvalidSize{}}; uint64_t tmax{0}; uint64_t tsum{0}; uint64_t tcnt{0}; - for(size_t i = 0; i < 40; ++i){ - const auto start = chrono::steady_clock::now(); - for(size_t j = 0; j < entry_count; ++j){ - auto &rctx = ctxs[j % thread_count]; + for (size_t i = 0; i < 40; ++i) { + const auto start = chrono::steady_clock::now(); + for (size_t j = 0; j < entry_count; ++j) { + auto& rctx = ctxs[j % thread_count]; rctx.push(); } { @@ -206,8 +398,8 @@ int test_threadpool_batch(int argc, char* argv[]) ++tcnt; } } - for(size_t i= 0; i < thread_count;++i){ - auto &ctx = ctxs[i]; + for (size_t i = 0; i < thread_count; ++i) { + auto& ctx = ctxs[i]; ctx.stop(); ctx.thr_.join(); } @@ -215,62 +407,67 @@ int test_threadpool_batch(int argc, char* argv[]) }; #else static constexpr size_t stopping = InvalidIndex{}; - static constexpr size_t popping = 1; - static constexpr size_t pushing = 2; - static constexpr size_t empty = 3; - static constexpr size_t filled = 4; - struct alignas(std::hardware_destructive_interference_size) ThreadContext{ - thread thr_; + static constexpr size_t popping = 1; + static constexpr size_t pushing = 2; + static constexpr size_t empty = 3; + static constexpr size_t filled = 4; + struct alignas(std::hardware_destructive_interference_size) ThreadContext { + thread thr_; binary_semaphore push_sem_{1}; binary_semaphore pop_sem_{0}; size_t value_ = 0; - - void push(){ + + void push() + { push_sem_.acquire(); - + ++value_; pop_sem_.release(); } - void stop(){ + void stop() + { push_sem_.acquire(); value_ = stopping; pop_sem_.release(); } - bool pop(size_t &_expected_value){ + bool pop(size_t& _expected_value) + { pop_sem_.acquire(); - if(value_ == stopping) return false; - + if (value_ == stopping) + return false; + solid_check(value_ == _expected_value); ++_expected_value; push_sem_.release(); return true; } }; - auto lambda = [&]() { + auto lambda = [&]() { set_current_thread_affinity(); unique_ptr ctxs{new ThreadContext[thread_count]}; - for(size_t i= 0; i < thread_count;++i){ - auto &ctx = ctxs[i]; - ctx.thr_ = thread( - [](ThreadContext &_rctx){ + for (size_t i = 0; i < thread_count; ++i) { + auto& ctx = ctxs[i]; + ctx.thr_ = thread( + [](ThreadContext& _rctx) { set_current_thread_affinity(); size_t expected_val = 1; - while(_rctx.pop(expected_val)); - }, ref(ctx) - ); + while (_rctx.pop(expected_val)) + ; + }, + ref(ctx)); } uint64_t tmin{InvalidSize{}}; uint64_t tmax{0}; uint64_t tsum{0}; uint64_t tcnt{0}; - for(size_t i = 0; i < 40; ++i){ - const auto start = chrono::steady_clock::now(); - for(size_t j = 0; j < entry_count; ++j){ - auto &rctx = ctxs[j % thread_count]; + for (size_t i = 0; i < 40; ++i) { + const auto start = chrono::steady_clock::now(); + for (size_t j = 0; j < entry_count; ++j) { + auto& rctx = ctxs[j % thread_count]; rctx.push(); } { @@ -281,8 +478,8 @@ int test_threadpool_batch(int argc, char* argv[]) ++tcnt; } } - for(size_t i= 0; i < thread_count;++i){ - auto &ctx = ctxs[i]; + for (size_t i = 0; i < thread_count; ++i) { + auto& ctx = ctxs[i]; ctx.stop(); ctx.thr_.join(); } @@ -297,4 +494,4 @@ int test_threadpool_batch(int argc, char* argv[]) solid_log(logger, Verbose, "after async wait"); return 0; -} \ No newline at end of file +} diff --git a/solid/utility/threadpool.hpp b/solid/utility/threadpool.hpp index cb76ffb4..e7ba9441 100644 --- a/solid/utility/threadpool.hpp +++ b/solid/utility/threadpool.hpp @@ -242,7 +242,7 @@ class SynchronizationContext { namespace tpimpl { template -class TaskData { +class alignas(hardware_destructive_interference_size) TaskData { std::aligned_storage_t data_; public: @@ -349,7 +349,7 @@ struct LocalContext { template class ThreadPool : NonCopyable { public: - static constexpr size_t spin_count = 10000; + static constexpr size_t spin_count = 10; struct ContextStub { using TaskQueueT = TaskList; std::atomic_size_t use_count_{1}; @@ -380,7 +380,8 @@ class ThreadPool : NonCopyable { }; private: - using AtomicLockT = std::atomic_size_t; + using AtomicLockT = std::atomic; + enum struct LockE : AtomicLockT::value_type { Empty = 0, Pushing, @@ -392,20 +393,23 @@ class ThreadPool : NonCopyable { Stop, Wake, }; + + using AtomicCounterT = std::atomic; + using AtomicCounterValueT = AtomicCounterT::value_type; + template + inline constexpr static auto computeCounter(const IndexT _index, const size_t _capacity) noexcept + { + return (_index / _capacity) & std::numeric_limits::max(); + } + struct OneStub { -#if defined(__cpp_lib_atomic_wait) - std::atomic_flag pushing_ = ATOMIC_FLAG_INIT; - std::atomic_flag popping_ = ATOMIC_FLAG_INIT; -#else - std::atomic_bool pushing_ = {false}; - std::atomic_bool popping_ = {false}; -#endif - AtomicLockT lock_ = {to_underlying(LockE::Empty)}; - std::uint8_t event_ = {to_underlying(EventE::Fill)}; - ContextStub* pcontext_ = nullptr; - uint64_t all_id_ = 0; - uint64_t context_produce_id_ = 0; - std::unique_ptr> data_ptr_ = std::make_unique>(); + AtomicCounterT produce_count_{0}; + AtomicCounterT consume_count_{static_cast(-1)}; + std::uint8_t event_ = {to_underlying(EventE::Fill)}; + TaskData* data_ptr_ = nullptr; + ContextStub* pcontext_ = nullptr; + uint64_t all_id_ = 0; + uint64_t context_produce_id_ = 0; auto& task() noexcept { @@ -429,38 +433,15 @@ class ThreadPool : NonCopyable { context_produce_id_ = 0; } - void waitWhilePushOne(Stats& _rstats) noexcept + void waitWhilePushOne(Stats& _rstats, const AtomicCounterValueT _count) noexcept { while (true) { -#if defined(__cpp_lib_atomic_wait) - const bool already_pushing = pushing_.test_and_set(std::memory_order_acquire); -#else - bool expected = false; - const bool already_pushing = !pushing_.compare_exchange_strong(expected, true, std::memory_order_acquire); -#endif - if (!already_pushing) { - // wait for lock to be 0. - std::underlying_type_t value = to_underlying(LockE::Empty); - - if (!lock_.compare_exchange_weak(value, to_underlying(LockE::Pushing))) { - auto spin = spin_count; - do { - if(!spin--){ - spin = 0; - std::atomic_wait(&lock_, value); - } - value = to_underlying(LockE::Empty); - } while (!lock_.compare_exchange_weak(value, to_underlying(LockE::Pushing))); - _rstats.pushOneWaitLock(); - } - return; + const auto cnt = produce_count_.load(); + if (cnt == _count) { + break; } else { -#if defined(__cpp_lib_atomic_wait) - pushing_.wait(true); -#else - std::atomic_wait(&pushing_, true); -#endif - _rstats.pushOneWaitPushing(); + _rstats.pushOneWaitLock(); + std::atomic_wait_explicit(&produce_count_, cnt, std::memory_order_relaxed); } } } @@ -469,90 +450,49 @@ class ThreadPool : NonCopyable { { using namespace std::chrono; event_ = to_underlying(EventE::Fill); - lock_.store(to_underlying(LockE::Filled)); - std::atomic_notify_one(&lock_); + ++consume_count_; + std::atomic_notify_one(&consume_count_); _rduration = duration_cast(steady_clock::now() - _start).count(); -#if defined(__cpp_lib_atomic_wait) - pushing_.clear(std::memory_order_release); - pushing_.notify_one(); -#else - pushing_.store(false, std::memory_order_release); - std::atomic_notify_one(&pushing_); -#endif } - void waitWhileStop(Stats& _rstats) noexcept + void waitWhileStop(Stats& _rstats, const AtomicCounterValueT _count) noexcept { - waitWhilePushOne(_rstats); + waitWhilePushOne(_rstats, _count); } - void waitWhilePushAll(Stats& _rstats) noexcept + void waitWhilePushAll(Stats& _rstats, const AtomicCounterValueT _count) noexcept { - waitWhilePushOne(_rstats); + waitWhilePushOne(_rstats, _count); } void notifyWhileStop() noexcept { event_ = to_underlying(EventE::Stop); - lock_.store(to_underlying(LockE::Filled)); - std::atomic_notify_one(&lock_); -#if defined(__cpp_lib_atomic_wait) - pushing_.clear(std::memory_order_release); - pushing_.notify_one(); -#else - pushing_.store(false, std::memory_order_release); - std::atomic_notify_one(&pushing_); -#endif + ++consume_count_; + std::atomic_notify_one(&consume_count_); } void notifyWhilePushAll() noexcept { event_ = to_underlying(EventE::Wake); - lock_.store(to_underlying(LockE::Filled)); - std::atomic_notify_one(&lock_); -#if defined(__cpp_lib_atomic_wait) - pushing_.clear(std::memory_order_release); - pushing_.notify_one(); -#else - pushing_.store(false, std::memory_order_release); - std::atomic_notify_one(&pushing_); -#endif + ++consume_count_; + std::atomic_notify_one(&consume_count_); } template < class Fnc, class AllFnc, typename... Args> - EventE waitWhilePop(Stats& _rstats, const Fnc& _try_consume_an_all_fnc, AllFnc& _all_fnc, Args&&... _args) noexcept + EventE waitWhilePop(Stats& _rstats, const AtomicCounterValueT _count, const Fnc& _try_consume_an_all_fnc, AllFnc& _all_fnc, Args&&... _args) noexcept { while (true) { -#if defined(__cpp_lib_atomic_wait) - const bool already_popping = popping_.test_and_set(std::memory_order_acquire); -#else - bool expected = false; - const bool already_popping = !popping_.compare_exchange_strong(expected, true, std::memory_order_acquire); -#endif - if (!already_popping) { - // wait for lock to be 1 or 2. - std::underlying_type_t value; - - if (!lock_.compare_exchange_weak(value= to_underlying(LockE::Filled), to_underlying(LockE::Popping))) { - auto spin = spin_count; - do { - if (!_try_consume_an_all_fnc(&lock_, _all_fnc, std::forward(_args)...) && !spin--) { - spin = 0; - std::atomic_wait(&lock_, value); - } - } while (!lock_.compare_exchange_weak(value= to_underlying(LockE::Filled), to_underlying(LockE::Popping))); - _rstats.popOneWaitLock(); - } + const auto cnt = consume_count_.load(); + if (cnt == _count) { return static_cast(event_); - } else { -#if defined(__cpp_lib_atomic_wait) - popping_.wait(true); -#else - std::atomic_wait(&popping_, true); -#endif + } else if (!_try_consume_an_all_fnc(&consume_count_, _count, _all_fnc, std::forward(_args)...)) { + + std::atomic_wait_explicit(&consume_count_, cnt, std::memory_order_relaxed); + _rstats.popOneWaitPopping(); } } @@ -560,59 +500,42 @@ class ThreadPool : NonCopyable { void notifyWhilePop() noexcept { - lock_.store(to_underlying(LockE::Empty)); - std::atomic_notify_one(&lock_); -#if defined(__cpp_lib_atomic_wait) - popping_.clear(std::memory_order_release); - popping_.notify_one(); -#else - popping_.store(false, std::memory_order_release); - std::atomic_notify_one(&popping_); -#endif + ++produce_count_; + std::atomic_notify_one(&produce_count_); } }; - struct AllStub : TaskData { -#if defined(__cpp_lib_atomic_wait) - std::atomic_flag pushing_ = ATOMIC_FLAG_INIT; -#else - std::atomic_bool pushing_ = {false}; -#endif - AtomicLockT lock_ = {to_underlying(LockE::Empty)}; + struct AllStub { + AtomicCounterT produce_count_{0}; + AtomicCounterT consume_count_{static_cast(-1)}; std::atomic_uint32_t use_count_ = {0}; std::atomic_uint64_t id_ = {0}; + TaskData* data_ptr_ = nullptr; + + auto& task() noexcept + { + return data_ptr_->task(); + } + template + void task(T&& _rt) + { + data_ptr_->task(std::forward(_rt)); + } - void waitWhilePushAll(Stats& _rstats) noexcept + void destroy() + { + data_ptr_->destroy(); + } + + void waitWhilePushAll(Stats& _rstats, const AtomicCounterValueT _count) noexcept { while (true) { -#if defined(__cpp_lib_atomic_wait) - const bool already_pushing = pushing_.test_and_set(std::memory_order_acquire); -#else - bool expected = false; - const bool already_pushing = !pushing_.compare_exchange_strong(expected, true, std::memory_order_acquire); -#endif - if (!already_pushing) { - std::underlying_type_t value = to_underlying(LockE::Empty); - - if (!lock_.compare_exchange_weak(value, to_underlying(LockE::Pushing))) { - auto spin = spin_count; - do { - if(!spin--){ - spin = 0; - std::atomic_wait(&lock_, value); - } - value = to_underlying(LockE::Empty); - } while (!lock_.compare_exchange_weak(value, to_underlying(LockE::Pushing))); - _rstats.pushAllWaitLock(); - } - return; + const auto cnt = produce_count_.load(); + if (cnt == _count) { + break; } else { -#if defined(__cpp_lib_atomic_wait) - pushing_.wait(true); -#else - std::atomic_wait(&pushing_, true); -#endif - _rstats.pushAllWaitPushing(); + _rstats.pushOneWaitLock(); + std::atomic_wait_explicit(&produce_count_, cnt, std::memory_order_relaxed); } } } @@ -621,30 +544,25 @@ class ThreadPool : NonCopyable { { use_count_.store(_thread_count); id_.store(_id); - lock_.store(to_underlying(LockE::Filled)); -#if defined(__cpp_lib_atomic_wait) - pushing_.clear(std::memory_order_release); - pushing_.notify_one(); -#else - pushing_.store(false, std::memory_order_release); - std::atomic_notify_one(&pushing_); -#endif + ++consume_count_; } bool notifyWhilePop() noexcept { if (use_count_.fetch_sub(1) == 1) { - TaskData::destroy(); - lock_.store(to_underlying(LockE::Empty)); - std::atomic_notify_one(&lock_); + destroy(); + ++produce_count_; + std::atomic_notify_one(&produce_count_); return true; } return false; } - bool isFilled(const uint64_t _id) const + bool isFilled(const uint64_t _id, const size_t _capacity) const { - return lock_.load() == to_underlying(LockE::Filled) && id_.load() == _id; + const auto count = consume_count_.load(std::memory_order_relaxed); + const AtomicCounterValueT expected_count = computeCounter(_id, _capacity); + return count == expected_count && id_.load() == _id; } }; using AllStubT = AllStub; @@ -652,30 +570,36 @@ class ThreadPool : NonCopyable { using ThreadVectorT = std::vector; /* alignas(hardware_constructive_interference_size) */ struct { - size_t capacity_{0}; - std::atomic_size_t pending_count_{0}; - std::atomic_uint_fast64_t push_index_{1}; - std::atomic_uint_fast64_t commited_index_{0}; - std::unique_ptr tasks_; - } all_; - /* alignas(hardware_constructive_interference_size) */ struct { - size_t capacity_{0}; - //std::unique_ptr tasks_; - std::vector> tasks_; + size_t capacity_{0}; + std::unique_ptr tasks_; + std::unique_ptr[]> datas_; } one_; + + /* alignas(hardware_constructive_interference_size) */ struct { + size_t capacity_{0}; + std::atomic_size_t pending_count_{0}; + std::atomic_uint_fast64_t push_index_{1}; + std::atomic_uint_fast64_t commited_index_{0}; + std::unique_ptr tasks_; + std::unique_ptr[]> datas_; + } all_; Stats statistic_; alignas(hardware_destructive_interference_size) std::atomic_size_t push_one_index_{0}; alignas(hardware_destructive_interference_size) std::atomic_size_t pop_one_index_{0}; ThreadVectorT threads_; std::atomic running_{false}; - size_t pushOneIndex() noexcept + std::tuple pushOneIndex() noexcept { - return push_one_index_.fetch_add(1) % one_.capacity_; + // return push_one_index_.fetch_add(1) % one_.capacity_; + const auto index = push_one_index_.fetch_add(1); + return {index % one_.capacity_, computeCounter(index, one_.capacity_)}; } - size_t popOneIndex() noexcept + std::tuple popOneIndex() noexcept { - return pop_one_index_.fetch_add(1) % one_.capacity_; + // return pop_one_index_.fetch_add(1) % one_.capacity_; + const auto index = pop_one_index_.fetch_add(1); + return {index % one_.capacity_, computeCounter(index, one_.capacity_)}; } auto pushAllId() noexcept @@ -764,7 +688,8 @@ class ThreadPool : NonCopyable { template < class AllFnc, typename... Args> - bool tryConsumeAnAllTask(AtomicLockT* _plock, LocalContext& _rlocal_context, AllFnc& _all_fnc, Args&&... _args); + bool tryConsumeAnAllTask(AtomicCounterT* _pcounter, + const AtomicCounterValueT _count, LocalContext& _rlocal_context, AllFnc& _all_fnc, Args&&... _args); template < class AllFnc, typename... Args> @@ -1056,16 +981,22 @@ void ThreadPool::doStart( const auto thread_count = _thread_count ? _thread_count : std::thread::hardware_concurrency(); one_.capacity_ = _one_capacity >= thread_count ? _one_capacity : std::max(static_cast(1024), thread_count); -#if 0 one_.tasks_.reset(new OneStubT[one_.capacity_]); -#else - one_.tasks_.resize(one_.capacity_); - for (auto& task : one_.tasks_) { - task = std::make_unique(); + one_.datas_.reset(new TaskData[one_.capacity_]); + + for (size_t i = 0; i < one_.capacity_; ++i) { + one_.tasks_[i].data_ptr_ = &one_.datas_[i]; } -#endif + all_.capacity_ = _all_capacity ? _all_capacity : 1; all_.tasks_.reset(new AllStubT[all_.capacity_]); + all_.datas_.reset(new TaskData[all_.capacity_]); + + for (size_t i = 0; i < all_.capacity_; ++i) { + all_.tasks_[i].data_ptr_ = &all_.datas_[i]; + } + all_.tasks_[0].produce_count_ = 1; //+ + all_.tasks_[0].consume_count_ = 0; // first entry is skipped on the first iteration for (size_t i = 0; i < thread_count; ++i) { threads_.emplace_back( @@ -1090,9 +1021,10 @@ void ThreadPool::doStop() } for (size_t i = 0; i < threads_.size(); ++i) { - auto& rstub = *one_.tasks_[pushOneIndex()]; + const auto [index, count] = pushOneIndex(); + auto& rstub = one_.tasks_[index]; - rstub.waitWhileStop(statistic_); + rstub.waitWhileStop(statistic_, count); rstub.notifyWhileStop(); } @@ -1115,18 +1047,21 @@ void ThreadPool::doRun( LocalContext local_context; while (true) { - const size_t index = popOneIndex(); - auto& rstub = *one_.tasks_[index]; - uint64_t local_one_context_count = 0; - const auto event = rstub.waitWhilePop( + const auto [index, count] = popOneIndex(); + auto& rstub = one_.tasks_[index]; + uint64_t local_one_context_count = 0; + + const auto event = rstub.waitWhilePop( statistic_, + count, [this, &local_context]( - AtomicLockT* _plock, - AllFnc& _all_fnc, + AtomicCounterT* _pcounter, + const AtomicCounterValueT _count, + AllFnc& _all_fnc, Args&&... _args) { // we need to make sure that, after processing an all_task, no new one_task can have // the all_id less than the all task that we have just processed. - return tryConsumeAnAllTask(_plock, local_context, _all_fnc, std::forward(_args)...); + return tryConsumeAnAllTask(_pcounter, _count, local_context, _all_fnc, std::forward(_args)...); }, _all_fnc, std::forward(_args)...); @@ -1204,6 +1139,9 @@ void ThreadPool::doRun( statistic_.runOneContextCount(local_one_context_count, local_context.one_context_count_); } else if (event == EventE::Wake) { + const auto all_id = rstub.all_id_; + consumeAll(local_context, all_id, _all_fnc, std::forward(_args)...); + ++local_context.wake_count_; statistic_.runWakeCount(local_context.wake_count_); rstub.notifyWhilePop(); @@ -1218,10 +1156,11 @@ template template < class AllFnc, typename... Args> -bool ThreadPool::tryConsumeAnAllTask(AtomicLockT* _plock, LocalContext& _rlocal_context, AllFnc& _all_fnc, Args&&... _args) +bool ThreadPool::tryConsumeAnAllTask(AtomicCounterT* _pcounter, + const AtomicCounterValueT _count, LocalContext& _rlocal_context, AllFnc& _all_fnc, Args&&... _args) { auto& rstub = all_.tasks_[_rlocal_context.next_all_id_ % all_.capacity_]; - if (rstub.isFilled(_rlocal_context.next_all_id_)) { + if (rstub.isFilled(_rlocal_context.next_all_id_, all_.capacity_)) { // NOTE: first we fetch the commited_all_index then we check if the // current stub is reserved (some thread is starting to push something) // - this is to ensure that we are not processing an all task prior to being @@ -1230,11 +1169,11 @@ bool ThreadPool::tryConsumeAnAllTask(AtomicLockT* _ploc // we're atomicaly marking the one stub as Pushing. const auto commited_all_index = all_.commited_index_.load(); - if (_plock && *_plock != to_underlying(LockE::Empty)) { + if (_pcounter && _pcounter->load(/* std::memory_order_relaxed */) == _count) { // NOTE: this is to ensure that pushOnes and pushAlls from // the same producer are processed in the same order they // were produced. - return false; // will wait on lock + return true; } if (overflow_safe_less(commited_all_index, _rlocal_context.next_all_id_)) { @@ -1269,7 +1208,7 @@ void ThreadPool::consumeAll(LocalContext& _rlocal_conte { size_t repeat_count = 0; while (overflow_safe_less(_rlocal_context.next_all_id_, _all_id) || _rlocal_context.next_all_id_ == _all_id) { - tryConsumeAnAllTask(nullptr, _rlocal_context, _all_fnc, std::forward(_args)...); + tryConsumeAnAllTask(nullptr, 0, _rlocal_context, _all_fnc, std::forward(_args)...); ++repeat_count; } statistic_.consumeAll(repeat_count); @@ -1280,11 +1219,11 @@ template void ThreadPool::doPushOne(Tsk&& _task, ContextStub* _pctx) { using namespace std::chrono; - const auto start = steady_clock::now(); - const auto index = pushOneIndex(); - auto& rstub = *one_.tasks_[index]; + const auto start = steady_clock::now(); + const auto [index, count] = pushOneIndex(); + auto& rstub = one_.tasks_[index]; - rstub.waitWhilePushOne(statistic_); + rstub.waitWhilePushOne(statistic_, count); rstub.task(std::forward(_task)); rstub.pcontext_ = _pctx; @@ -1311,7 +1250,7 @@ void ThreadPool::doPushAll(Tsk&& _task) const auto id = pushAllId(); auto& rstub = all_.tasks_[id % all_.capacity_]; - rstub.waitWhilePushAll(statistic_); + rstub.waitWhilePushAll(statistic_, computeCounter(id, all_.capacity_)); rstub.task(std::forward(_task)); @@ -1323,9 +1262,10 @@ void ThreadPool::doPushAll(Tsk&& _task) if (should_wake_threads) { for (size_t i = 0; i < threads_.size(); ++i) { - auto& rstub = *one_.tasks_[pushOneIndex()]; + const auto [index, count] = pushOneIndex(); // TODO: + auto& rstub = one_.tasks_[index]; - rstub.waitWhilePushAll(statistic_); + rstub.waitWhilePushAll(statistic_, count); rstub.all_id_ = id; diff --git a/tutorials/aio_echo/aio_echo_server.cpp b/tutorials/aio_echo/aio_echo_server.cpp index 03590133..8c85bc81 100644 --- a/tutorials/aio_echo/aio_echo_server.cpp +++ b/tutorials/aio_echo/aio_echo_server.cpp @@ -233,8 +233,7 @@ void Listener::onAccept(frame::aio::ReactorContext& _rctx, SocketDevice& _rsd) break; } --repeatcnt; - } while (repeatcnt && sock.accept( - _rctx, [this](frame::aio::ReactorContext& _rctx, SocketDevice& _rsd) { return onAccept(_rctx, _rsd); }, _rsd)); + } while (repeatcnt && sock.accept(_rctx, [this](frame::aio::ReactorContext& _rctx, SocketDevice& _rsd) { return onAccept(_rctx, _rsd); }, _rsd)); if (!repeatcnt) { sock.postAccept( @@ -328,8 +327,7 @@ void Talker::onRecv(frame::aio::ReactorContext& _rctx, SocketAddress& _raddr, si } --repeatcnt; } while ( - repeatcnt && sock.recvFrom( - _rctx, buf, BufferCapacity, [this](frame::aio::ReactorContext& _rctx, SocketAddress& _raddr, size_t _sz) { onRecv(_rctx, _raddr, _sz); }, _raddr, _sz)); + repeatcnt && sock.recvFrom(_rctx, buf, BufferCapacity, [this](frame::aio::ReactorContext& _rctx, SocketAddress& _raddr, size_t _sz) { onRecv(_rctx, _raddr, _sz); }, _raddr, _sz)); if (repeatcnt == 0) { sock.postRecvFrom( From a82c83750fea6aa20cbd247b8ca290c88f1a36a0 Mon Sep 17 00:00:00 2001 From: Valentin Palade Date: Thu, 20 Jun 2024 20:46:17 +0300 Subject: [PATCH 08/13] threadpool: introducing ThreadPoolConfiguration --- .../aio_echo/example_echo_auto_client.cpp | 2 +- .../aio_echo/example_secure_echo_client.cpp | 2 +- .../frame/mprpc_echo/example_mprpc_echo.cpp | 2 +- .../relay_server/example_relay_server.cpp | 2 +- .../relay_server/example_relay_server_bi.cpp | 2 +- .../example_relay_server_bi_cp.cpp | 2 +- .../example_relay_server_bi_ex.cpp | 2 +- .../example_relay_server_bi_sh.cpp | 2 +- .../threadpool/example_file_open_pool.cpp | 2 +- .../utility/threadpool/example_threadpool.cpp | 2 +- solid/frame/aio/test/test_echo_tcp_stress.cpp | 2 +- solid/frame/aio/test/test_event_stress_wp.cpp | 6 +- .../test/test_perf_threadpool_lockfree.cpp | 2 +- .../test_perf_threadpool_synch_context.cpp | 2 +- .../test/test_clientfrontback_download.cpp | 2 +- .../test/test_clientfrontback_upload.cpp | 2 +- .../mprpc/test/test_clientserver_basic.cpp | 2 +- .../test/test_clientserver_cancel_client.cpp | 2 +- .../test/test_clientserver_cancel_server.cpp | 2 +- .../mprpc/test/test_clientserver_delayed.cpp | 2 +- .../mprpc/test/test_clientserver_download.cpp | 2 +- .../test/test_clientserver_idempotent.cpp | 2 +- .../mprpc/test/test_clientserver_noserver.cpp | 2 +- .../mprpc/test/test_clientserver_oneshot.cpp | 2 +- .../test/test_clientserver_sendrequest.cpp | 2 +- .../mprpc/test/test_clientserver_split.cpp | 2 +- .../test/test_clientserver_timeout_secure.cpp | 2 +- .../mprpc/test/test_clientserver_topic.cpp | 4 +- .../mprpc/test/test_clientserver_upload.cpp | 2 +- .../test/test_clientserver_upload_single.cpp | 2 +- .../test/test_clientserver_versioning.cpp | 2 +- .../mprpc/test/test_connection_close.cpp | 2 +- .../frame/mprpc/test/test_keepalive_fail.cpp | 2 +- .../mprpc/test/test_keepalive_success.cpp | 2 +- .../mprpc/test/test_multiprotocol_basic.cpp | 2 +- solid/frame/mprpc/test/test_pool_basic.cpp | 2 +- .../mprpc/test/test_pool_delay_close.cpp | 2 +- .../mprpc/test/test_pool_force_close.cpp | 2 +- solid/frame/mprpc/test/test_raw_basic.cpp | 2 +- solid/frame/mprpc/test/test_raw_proxy.cpp | 2 +- solid/frame/mprpc/test/test_relay_basic.cpp | 2 +- .../mprpc/test/test_relay_cancel_request.cpp | 2 +- .../mprpc/test/test_relay_cancel_response.cpp | 2 +- .../mprpc/test/test_relay_close_request.cpp | 2 +- .../mprpc/test/test_relay_close_response.cpp | 2 +- .../mprpc/test/test_relay_detect_close.cpp | 2 +- ...test_relay_detect_close_while_response.cpp | 2 +- .../frame/mprpc/test/test_relay_disabled.cpp | 2 +- solid/frame/mprpc/test/test_relay_split.cpp | 2 +- solid/utility/common.hpp | 15 ++ .../test/test_callpool_multicast_basic.cpp | 2 +- .../test/test_callpool_multicast_pattern.cpp | 2 +- solid/utility/test/test_collapse.cpp | 4 +- solid/utility/test/test_threadpool.cpp | 2 +- solid/utility/test/test_threadpool_basic.cpp | 2 +- solid/utility/test/test_threadpool_batch.cpp | 27 ++- solid/utility/test/test_threadpool_chain.cpp | 4 +- .../utility/test/test_threadpool_context.cpp | 2 +- .../test/test_threadpool_multicast_basic.cpp | 2 +- .../test/test_threadpool_multicast_sleep.cpp | 2 +- ...ulticast_synchronization_context_basic.cpp | 2 +- .../utility/test/test_threadpool_pattern.cpp | 2 +- .../test/test_threadpool_thread_context.cpp | 2 +- solid/utility/threadpool.hpp | 206 +++++++++++------- tutorials/mprpc_echo/mprpc_echo_client.cpp | 2 +- .../mprpc_echo/mprpc_echo_client_pool.cpp | 2 +- .../mprpc_echo_relay_client.cpp | 2 +- tutorials/mprpc_file/mprpc_file_client.cpp | 2 +- .../mprpc_request/mprpc_request_client.cpp | 2 +- .../mprpc_request_client.cpp | 2 +- 70 files changed, 227 insertions(+), 165 deletions(-) diff --git a/examples/frame/aio_echo/example_echo_auto_client.cpp b/examples/frame/aio_echo/example_echo_auto_client.cpp index 52476d7d..28cf2bfa 100644 --- a/examples/frame/aio_echo/example_echo_auto_client.cpp +++ b/examples/frame/aio_echo/example_echo_auto_client.cpp @@ -196,7 +196,7 @@ int main(int argc, char* argv[]) 1024 * 1024 * 64); } - CallPoolT cwp{1, 100, 0, [](const size_t) {}, [](const size_t) {}}; + CallPoolT cwp{{1, 100, 0}, [](const size_t) {}, [](const size_t) {}}; frame::aio::Resolver resolver([&cwp](std::function&& _fnc) { cwp.pushOne(std::move(_fnc)); }); async_resolver(&resolver); diff --git a/examples/frame/aio_echo/example_secure_echo_client.cpp b/examples/frame/aio_echo/example_secure_echo_client.cpp index 4090f782..7103a95e 100644 --- a/examples/frame/aio_echo/example_secure_echo_client.cpp +++ b/examples/frame/aio_echo/example_secure_echo_client.cpp @@ -174,7 +174,7 @@ int main(int argc, char* argv[]) frame::ServiceT service(manager); frame::ActorIdT actuid; - CallPoolT cwp{1, 100, 0, [](const size_t) {}, [](const size_t) {}}; + CallPoolT cwp{{1, 100, 0}, [](const size_t) {}, [](const size_t) {}}; frame::aio::Resolver resolver([&cwp](std::function&& _fnc) { cwp.pushOne(std::move(_fnc)); }); ErrorConditionT err; diff --git a/examples/frame/mprpc_echo/example_mprpc_echo.cpp b/examples/frame/mprpc_echo/example_mprpc_echo.cpp index c26bcafb..3b303636 100644 --- a/examples/frame/mprpc_echo/example_mprpc_echo.cpp +++ b/examples/frame/mprpc_echo/example_mprpc_echo.cpp @@ -173,7 +173,7 @@ int main(int argc, char* argv[]) frame::Manager m; frame::mprpc::ServiceT ipcsvc(m); ErrorConditionT err; - CallPoolT cwp{1, 100, 0, [](const size_t) {}, [](const size_t) {}}; + CallPoolT cwp{{1, 100, 0}, [](const size_t) {}, [](const size_t) {}}; frame::aio::Resolver resolver([&cwp](std::function&& _fnc) { cwp.pushOne(std::move(_fnc)); }); if (!restart(ipcsvc, resolver, sch)) { diff --git a/examples/frame/relay_server/example_relay_server.cpp b/examples/frame/relay_server/example_relay_server.cpp index 7f2a1a83..29bcfef1 100644 --- a/examples/frame/relay_server/example_relay_server.cpp +++ b/examples/frame/relay_server/example_relay_server.cpp @@ -217,7 +217,7 @@ int main(int argc, char* argv[]) 3, 1024 * 1024 * 64); } - CallPoolT cwp{1, 100, 0, [](const size_t) {}, [](const size_t) {}}; + CallPoolT cwp{{1, 100, 0}, [](const size_t) {}, [](const size_t) {}}; frame::aio::Resolver resolver([&cwp](std::function&& _fnc) { cwp.pushOne(std::move(_fnc)); }); async_resolver(&resolver); diff --git a/examples/frame/relay_server/example_relay_server_bi.cpp b/examples/frame/relay_server/example_relay_server_bi.cpp index 1af1d03a..268af281 100644 --- a/examples/frame/relay_server/example_relay_server_bi.cpp +++ b/examples/frame/relay_server/example_relay_server_bi.cpp @@ -180,7 +180,7 @@ int main(int argc, char* argv[]) 1024 * 1024 * 64); } - CallPoolT cwp{1, 100, 0, [](const size_t) {}, [](const size_t) {}}; + CallPoolT cwp{{1, 100, 0}, [](const size_t) {}, [](const size_t) {}}; frame::aio::Resolver resolver([&cwp](std::function&& _fnc) { cwp.pushOne(std::move(_fnc)); }); async_resolver(&resolver); diff --git a/examples/frame/relay_server/example_relay_server_bi_cp.cpp b/examples/frame/relay_server/example_relay_server_bi_cp.cpp index 22716b60..b638177d 100644 --- a/examples/frame/relay_server/example_relay_server_bi_cp.cpp +++ b/examples/frame/relay_server/example_relay_server_bi_cp.cpp @@ -181,7 +181,7 @@ int main(int argc, char* argv[]) 3, 1024 * 1024 * 64); } - CallPoolT cwp{1, 100, 0, [](const size_t) {}, [](const size_t) {}}; + CallPoolT cwp{{1, 100, 0}, [](const size_t) {}, [](const size_t) {}}; frame::aio::Resolver resolver([&cwp](std::function&& _fnc) { cwp.pushOne(std::move(_fnc)); }); async_resolver(&resolver); diff --git a/examples/frame/relay_server/example_relay_server_bi_ex.cpp b/examples/frame/relay_server/example_relay_server_bi_ex.cpp index 09856ce4..2cc7493e 100644 --- a/examples/frame/relay_server/example_relay_server_bi_ex.cpp +++ b/examples/frame/relay_server/example_relay_server_bi_ex.cpp @@ -382,7 +382,7 @@ int main(int argc, char* argv[]) cout << "sizeof(Connection) = " << sizeof(Connection) << endl; - CallPoolT cwp{1, 100, 0, [](const size_t) {}, [](const size_t) {}}; + CallPoolT cwp{{1, 100, 0}, [](const size_t) {}, [](const size_t) {}}; frame::aio::Resolver resolver([&cwp](std::function&& _fnc) { cwp.pushOne(std::move(_fnc)); }); async_resolver(&resolver); diff --git a/examples/frame/relay_server/example_relay_server_bi_sh.cpp b/examples/frame/relay_server/example_relay_server_bi_sh.cpp index d31f6dbb..9cb95f66 100644 --- a/examples/frame/relay_server/example_relay_server_bi_sh.cpp +++ b/examples/frame/relay_server/example_relay_server_bi_sh.cpp @@ -311,7 +311,7 @@ int main(int argc, char* argv[]) cout << "sizeof(Connection) = " << sizeof(Connection) << endl; - CallPoolT cwp{1, 100, 0, [](const size_t) {}, [](const size_t) {}}; + CallPoolT cwp{{1, 100, 0}, [](const size_t) {}, [](const size_t) {}}; frame::aio::Resolver resolver([&cwp](std::function&& _fnc) { cwp.pushOne(std::move(_fnc)); }); async_resolver(&resolver); diff --git a/examples/utility/threadpool/example_file_open_pool.cpp b/examples/utility/threadpool/example_file_open_pool.cpp index 65b1a927..4595616a 100644 --- a/examples/utility/threadpool/example_file_open_pool.cpp +++ b/examples/utility/threadpool/example_file_open_pool.cpp @@ -108,7 +108,7 @@ int main(int argc, char* argv[]) using ThreadPoolT = ThreadPool; Context context; ThreadPoolT wp{ - 1, 100, 0, [](const size_t, Context&) {}, [](const size_t, Context&) {}, + {1, 100, 0}, [](const size_t, Context&) {}, [](const size_t, Context&) {}, [](FileDevice* _pfile, Context& _rctx) { int64_t sz = _pfile->size(); int toread; diff --git a/examples/utility/threadpool/example_threadpool.cpp b/examples/utility/threadpool/example_threadpool.cpp index 830086f5..d0d5f62c 100644 --- a/examples/utility/threadpool/example_threadpool.cpp +++ b/examples/utility/threadpool/example_threadpool.cpp @@ -20,7 +20,7 @@ int main(int argc, char* argv[]) solid::log_start(std::cerr, {".*:VIEW"}); ThreadPool wp{ - 1, 100, 0, [](const size_t) {}, [](const size_t) {}, + {1, 100, 0}, [](const size_t) {}, [](const size_t) {}, [](int _v) { solid_log(generic_logger, Info, "v = " << _v); std::this_thread::sleep_for(std::chrono::milliseconds(_v * 10)); diff --git a/solid/frame/aio/test/test_echo_tcp_stress.cpp b/solid/frame/aio/test/test_echo_tcp_stress.cpp index 5734534a..78de3829 100644 --- a/solid/frame/aio/test/test_echo_tcp_stress.cpp +++ b/solid/frame/aio/test/test_echo_tcp_stress.cpp @@ -487,7 +487,7 @@ int test_echo_tcp_stress(int argc, char* argv[]) frame::Manager srv_mgr; SecureContextT srv_secure_ctx{SecureContextT::create()}; frame::ServiceT srv_svc{srv_mgr}; - CallPoolT cwp{1, 100, 0, [](const size_t) {}, [](const size_t) {}}; + CallPoolT cwp{{1, 100, 0}, [](const size_t) {}, [](const size_t) {}}; frame::aio::Resolver resolver([&cwp](std::function&& _fnc) { cwp.pushOne(std::move(_fnc)); }); async_resolver(&resolver); diff --git a/solid/frame/aio/test/test_event_stress_wp.cpp b/solid/frame/aio/test/test_event_stress_wp.cpp index 818f9404..c68a12c4 100644 --- a/solid/frame/aio/test/test_event_stress_wp.cpp +++ b/solid/frame/aio/test/test_event_stress_wp.cpp @@ -224,11 +224,11 @@ int test_event_stress_wp(int argc, char* argv[]) gctx.stopping_ = false; account_cp.start( - thread_count, account_count, 0, [](const size_t, AccountContext&) {}, [](const size_t, AccountContext&) {}, std::ref(acc_ctx)); + {thread_count, account_count, 0}, [](const size_t, AccountContext&) {}, [](const size_t, AccountContext&) {}, std::ref(acc_ctx)); connection_cp.start( - thread_count, account_count * account_connection_count, 0, [](const size_t, ConnectionContext&) {}, [](const size_t, ConnectionContext&) {}, std::ref(conn_ctx)); + {thread_count, account_count * account_connection_count, 0}, [](const size_t, ConnectionContext&) {}, [](const size_t, ConnectionContext&) {}, std::ref(conn_ctx)); device_cp.start( - thread_count, account_count * account_connection_count, 0, [](const size_t, DeviceContext&) {}, [](const size_t, DeviceContext&) {}, std::ref(dev_ctx)); + {thread_count, account_count * account_connection_count, 0}, [](const size_t, DeviceContext&) {}, [](const size_t, DeviceContext&) {}, std::ref(dev_ctx)); conn_ctx.conn_cnt_ = (account_connection_count * account_count); auto produce_lambda = [&]() { diff --git a/solid/frame/aio/test/test_perf_threadpool_lockfree.cpp b/solid/frame/aio/test/test_perf_threadpool_lockfree.cpp index afb976e9..7d02254f 100644 --- a/solid/frame/aio/test/test_perf_threadpool_lockfree.cpp +++ b/solid/frame/aio/test/test_perf_threadpool_lockfree.cpp @@ -48,7 +48,7 @@ int test_perf_threadpool_lockfree(int argc, char* argv[]) (void)context_count; auto lambda = [&]() { ThreadPoolT wp{ - thread_count, 10000, 0, [](const size_t) {}, [](const size_t) {}, + {thread_count, 10000, 0}, [](const size_t) {}, [](const size_t) {}, [&](EventBase& _event) { if (_event == generic_event) { ++received_events; diff --git a/solid/frame/aio/test/test_perf_threadpool_synch_context.cpp b/solid/frame/aio/test/test_perf_threadpool_synch_context.cpp index 7d85e46c..c7bb692f 100644 --- a/solid/frame/aio/test/test_perf_threadpool_synch_context.cpp +++ b/solid/frame/aio/test/test_perf_threadpool_synch_context.cpp @@ -51,7 +51,7 @@ int test_perf_threadpool_synch_context(int argc, char* argv[]) auto lambda = [&]() { auto start = std::chrono::steady_clock::now(); ThreadPoolT wp{ - thread_count, 10000, 0, [](const size_t) {}, [](const size_t) {}, + {thread_count, 10000, 0}, [](const size_t) {}, [](const size_t) {}, [&](EventBase& _event) { if (_event == generic_event) { ++received_events; diff --git a/solid/frame/mprpc/test/test_clientfrontback_download.cpp b/solid/frame/mprpc/test/test_clientfrontback_download.cpp index 6a49af0a..d4b53eb7 100644 --- a/solid/frame/mprpc/test/test_clientfrontback_download.cpp +++ b/solid/frame/mprpc/test/test_clientfrontback_download.cpp @@ -354,7 +354,7 @@ int test_clientfrontback_download(int argc, char* argv[]) frame::mprpc::ServiceT mprpc_back_client(m); frame::mprpc::ServiceT mprpc_back_server(m); ErrorConditionT err; - CallPoolT cwp{1, 100, 0, [](const size_t) {}, [](const size_t) {}}; + CallPoolT cwp{{1, 100, 0}, [](const size_t) {}, [](const size_t) {}}; frame::aio::Resolver resolver([&cwp](std::function&& _fnc) { cwp.pushOne(std::move(_fnc)); }); sch_client.start(1); diff --git a/solid/frame/mprpc/test/test_clientfrontback_upload.cpp b/solid/frame/mprpc/test/test_clientfrontback_upload.cpp index 93bf69e0..112876ea 100644 --- a/solid/frame/mprpc/test/test_clientfrontback_upload.cpp +++ b/solid/frame/mprpc/test/test_clientfrontback_upload.cpp @@ -346,7 +346,7 @@ int test_clientfrontback_upload(int argc, char* argv[]) frame::mprpc::ServiceT mprpc_back_client(m); frame::mprpc::ServiceT mprpc_back_server(m); ErrorConditionT err; - CallPoolT cwp{1, 100, 0, [](const size_t) {}, [](const size_t) {}}; + CallPoolT cwp{{1, 100, 0}, [](const size_t) {}, [](const size_t) {}}; frame::aio::Resolver resolver([&cwp](std::function&& _fnc) { cwp.pushOne(std::move(_fnc)); }); sch_client.start(1); diff --git a/solid/frame/mprpc/test/test_clientserver_basic.cpp b/solid/frame/mprpc/test/test_clientserver_basic.cpp index 2d2fe3df..962f9292 100644 --- a/solid/frame/mprpc/test/test_clientserver_basic.cpp +++ b/solid/frame/mprpc/test/test_clientserver_basic.cpp @@ -326,7 +326,7 @@ int test_clientserver_basic(int argc, char* argv[]) frame::mprpc::ServiceT mprpcserver(m); frame::mprpc::ServiceT mprpcclient(m); ErrorConditionT err; - CallPoolT cwp{1, 100, 0, [](const size_t) {}, [](const size_t) {}}; + CallPoolT cwp{{1, 100, 0}, [](const size_t) {}, [](const size_t) {}}; frame::aio::Resolver resolver([&cwp](std::function&& _fnc) { cwp.pushOne(std::move(_fnc)); }); sch_client.start(1); diff --git a/solid/frame/mprpc/test/test_clientserver_cancel_client.cpp b/solid/frame/mprpc/test/test_clientserver_cancel_client.cpp index d842e647..4c15651a 100644 --- a/solid/frame/mprpc/test/test_clientserver_cancel_client.cpp +++ b/solid/frame/mprpc/test/test_clientserver_cancel_client.cpp @@ -309,7 +309,7 @@ int test_clientserver_cancel_client(int argc, char* argv[]) frame::mprpc::ServiceT mprpcserver(m); frame::mprpc::ServiceT mprpcclient(m); ErrorConditionT err; - CallPoolT cwp{1, 100, 0, [](const size_t) {}, [](const size_t) {}}; + CallPoolT cwp{{1, 100, 0}, [](const size_t) {}, [](const size_t) {}}; frame::aio::Resolver resolver([&cwp](std::function&& _fnc) { cwp.pushOne(std::move(_fnc)); }); sch_client.start(1); diff --git a/solid/frame/mprpc/test/test_clientserver_cancel_server.cpp b/solid/frame/mprpc/test/test_clientserver_cancel_server.cpp index 42a90a1b..b2dcc782 100644 --- a/solid/frame/mprpc/test/test_clientserver_cancel_server.cpp +++ b/solid/frame/mprpc/test/test_clientserver_cancel_server.cpp @@ -321,7 +321,7 @@ int test_clientserver_cancel_server(int argc, char* argv[]) frame::mprpc::ServiceT mprpcserver(m); frame::mprpc::ServiceT mprpcclient(m); ErrorConditionT err; - CallPoolT cwp{1, 100, 0, [](const size_t) {}, [](const size_t) {}}; + CallPoolT cwp{{1, 100, 0}, [](const size_t) {}, [](const size_t) {}}; frame::aio::Resolver resolver([&cwp](std::function&& _fnc) { cwp.pushOne(std::move(_fnc)); }); sch_client.start(1); diff --git a/solid/frame/mprpc/test/test_clientserver_delayed.cpp b/solid/frame/mprpc/test/test_clientserver_delayed.cpp index 81cb69f9..1c82a97e 100644 --- a/solid/frame/mprpc/test/test_clientserver_delayed.cpp +++ b/solid/frame/mprpc/test/test_clientserver_delayed.cpp @@ -307,7 +307,7 @@ int test_clientserver_delayed(int argc, char* argv[]) frame::mprpc::ServiceT mprpcserver(m); frame::mprpc::ServiceT mprpcclient(m); ErrorConditionT err; - CallPoolT cwp{1, 100, 0, [](const size_t) {}, [](const size_t) {}}; + CallPoolT cwp{{1, 100, 0}, [](const size_t) {}, [](const size_t) {}}; frame::aio::Resolver resolver([&cwp](std::function&& _fnc) { cwp.pushOne(std::move(_fnc)); }); sch_client.start(1); diff --git a/solid/frame/mprpc/test/test_clientserver_download.cpp b/solid/frame/mprpc/test/test_clientserver_download.cpp index 60ed60b2..b9fc7688 100644 --- a/solid/frame/mprpc/test/test_clientserver_download.cpp +++ b/solid/frame/mprpc/test/test_clientserver_download.cpp @@ -233,7 +233,7 @@ int test_clientserver_download(int argc, char* argv[]) frame::mprpc::ServiceT mprpc_client(m); frame::mprpc::ServiceT mprpc_server(m); ErrorConditionT err; - CallPoolT cwp{1, 100, 0, [](const size_t) {}, [](const size_t) {}}; + CallPoolT cwp{{1, 100, 0}, [](const size_t) {}, [](const size_t) {}}; frame::aio::Resolver resolver([&cwp](std::function&& _fnc) { cwp.pushOne(std::move(_fnc)); }); sch_client.start(1); diff --git a/solid/frame/mprpc/test/test_clientserver_idempotent.cpp b/solid/frame/mprpc/test/test_clientserver_idempotent.cpp index 51776734..84acbcfa 100644 --- a/solid/frame/mprpc/test/test_clientserver_idempotent.cpp +++ b/solid/frame/mprpc/test/test_clientserver_idempotent.cpp @@ -324,7 +324,7 @@ int test_clientserver_idempotent(int argc, char* argv[]) frame::mprpc::ServiceT mprpcserver(m); frame::mprpc::ServiceT mprpcclient(m); ErrorConditionT err; - CallPoolT cwp{1, 100, 0, [](const size_t) {}, [](const size_t) {}}; + CallPoolT cwp{{1, 100, 0}, [](const size_t) {}, [](const size_t) {}}; frame::aio::Resolver resolver([&cwp](std::function&& _fnc) { cwp.pushOne(std::move(_fnc)); }); sch_client.start(1); diff --git a/solid/frame/mprpc/test/test_clientserver_noserver.cpp b/solid/frame/mprpc/test/test_clientserver_noserver.cpp index 8d2ca81f..30424232 100644 --- a/solid/frame/mprpc/test/test_clientserver_noserver.cpp +++ b/solid/frame/mprpc/test/test_clientserver_noserver.cpp @@ -220,7 +220,7 @@ int test_clientserver_noserver(int argc, char* argv[]) frame::Manager m; frame::mprpc::ServiceT mprpcclient(m); ErrorConditionT err; - CallPoolT cwp{1, 100, 0, [](const size_t) {}, [](const size_t) {}}; + CallPoolT cwp{{1, 100, 0}, [](const size_t) {}, [](const size_t) {}}; frame::aio::Resolver resolver([&cwp](std::function&& _fnc) { cwp.pushOne(std::move(_fnc)); }); sch_client.start(1); diff --git a/solid/frame/mprpc/test/test_clientserver_oneshot.cpp b/solid/frame/mprpc/test/test_clientserver_oneshot.cpp index 6b592f31..c16569fb 100644 --- a/solid/frame/mprpc/test/test_clientserver_oneshot.cpp +++ b/solid/frame/mprpc/test/test_clientserver_oneshot.cpp @@ -221,7 +221,7 @@ int test_clientserver_oneshot(int argc, char* argv[]) frame::Manager m; frame::mprpc::ServiceT mprpcclient(m); ErrorConditionT err; - CallPoolT cwp{1, 100, 0, [](const size_t) {}, [](const size_t) {}}; + CallPoolT cwp{{1, 100, 0}, [](const size_t) {}, [](const size_t) {}}; frame::aio::Resolver resolver([&cwp](std::function&& _fnc) { cwp.pushOne(std::move(_fnc)); }); sch_client.start(1); diff --git a/solid/frame/mprpc/test/test_clientserver_sendrequest.cpp b/solid/frame/mprpc/test/test_clientserver_sendrequest.cpp index a5283f4b..387a74f5 100644 --- a/solid/frame/mprpc/test/test_clientserver_sendrequest.cpp +++ b/solid/frame/mprpc/test/test_clientserver_sendrequest.cpp @@ -388,7 +388,7 @@ int test_clientserver_sendrequest(int argc, char* argv[]) frame::mprpc::ServiceT mprpcserver(m); frame::mprpc::ServiceT mprpcclient(m); ErrorConditionT err; - CallPoolT cwp{1, 100, 0, [](const size_t) {}, [](const size_t) {}}; + CallPoolT cwp{{1, 100, 0}, [](const size_t) {}, [](const size_t) {}}; frame::aio::Resolver resolver([&cwp](std::function&& _fnc) { cwp.pushOne(std::move(_fnc)); }); sch_client.start(1); diff --git a/solid/frame/mprpc/test/test_clientserver_split.cpp b/solid/frame/mprpc/test/test_clientserver_split.cpp index 53d38dbf..15085570 100644 --- a/solid/frame/mprpc/test/test_clientserver_split.cpp +++ b/solid/frame/mprpc/test/test_clientserver_split.cpp @@ -353,7 +353,7 @@ int test_clientserver_split(int argc, char* argv[]) frame::mprpc::ServiceT mprpcserver(m); frame::mprpc::ServiceT mprpcclient(m); ErrorConditionT err; - CallPoolT cwp{1, 100, 0, [](const size_t) {}, [](const size_t) {}}; + CallPoolT cwp{{1, 100, 0}, [](const size_t) {}, [](const size_t) {}}; frame::aio::Resolver resolver([&cwp](std::function&& _fnc) { cwp.pushOne(std::move(_fnc)); }); sch_client.start(1); diff --git a/solid/frame/mprpc/test/test_clientserver_timeout_secure.cpp b/solid/frame/mprpc/test/test_clientserver_timeout_secure.cpp index 7dabe3ba..274102f8 100644 --- a/solid/frame/mprpc/test/test_clientserver_timeout_secure.cpp +++ b/solid/frame/mprpc/test/test_clientserver_timeout_secure.cpp @@ -212,7 +212,7 @@ int test_clientserver_timeout_secure(int argc, char* argv[]) frame::Manager m; frame::mprpc::ServiceT mprpcserver(m); ErrorConditionT err; - CallPoolT cwp{1, 100, 0, [](const size_t) {}, [](const size_t) {}}; + CallPoolT cwp{{1, 100, 0}, [](const size_t) {}, [](const size_t) {}}; frame::aio::Resolver resolver([&cwp](std::function&& _fnc) { cwp.pushOne(std::move(_fnc)); }); frame::ServiceT svc_client{m}; async_resolver(&resolver); diff --git a/solid/frame/mprpc/test/test_clientserver_topic.cpp b/solid/frame/mprpc/test/test_clientserver_topic.cpp index 856939e2..69d90f7e 100644 --- a/solid/frame/mprpc/test/test_clientserver_topic.cpp +++ b/solid/frame/mprpc/test/test_clientserver_topic.cpp @@ -333,7 +333,7 @@ int test_clientserver_topic(int argc, char* argv[]) frame::aio::Resolver resolver([&resolve_pool](std::function&& _fnc) { resolve_pool.pushOne(std::move(_fnc)); }); worker_pool.start( - thread_count, 10000, 100, + {thread_count, 10000, 100}, [](const size_t) { set_current_thread_affinity(); local_thread_pool_context_ptr = std::make_unique(); @@ -341,7 +341,7 @@ int test_clientserver_topic(int argc, char* argv[]) [](const size_t) {}); resolve_pool.start( - 1, 100, 0, [](const size_t) {}, [](const size_t) {}); + {1, 100, 0}, [](const size_t) {}, [](const size_t) {}); sch_client.start([]() {set_current_thread_affinity();return true; }, []() {}, 1); sch_server.start([]() { set_current_thread_affinity(); diff --git a/solid/frame/mprpc/test/test_clientserver_upload.cpp b/solid/frame/mprpc/test/test_clientserver_upload.cpp index e9c94686..b1be9b1d 100644 --- a/solid/frame/mprpc/test/test_clientserver_upload.cpp +++ b/solid/frame/mprpc/test/test_clientserver_upload.cpp @@ -223,7 +223,7 @@ int test_clientserver_upload(int argc, char* argv[]) frame::mprpc::ServiceT mprpc_client(m); frame::mprpc::ServiceT mprpc_server(m); ErrorConditionT err; - CallPoolT cwp{1, 100, 0, [](const size_t) {}, [](const size_t) {}}; + CallPoolT cwp{{1, 100, 0}, [](const size_t) {}, [](const size_t) {}}; frame::aio::Resolver resolver([&cwp](std::function&& _fnc) { cwp.pushOne(std::move(_fnc)); }); sch_client.start(1); diff --git a/solid/frame/mprpc/test/test_clientserver_upload_single.cpp b/solid/frame/mprpc/test/test_clientserver_upload_single.cpp index 6fb5e5e0..e7ae6864 100644 --- a/solid/frame/mprpc/test/test_clientserver_upload_single.cpp +++ b/solid/frame/mprpc/test/test_clientserver_upload_single.cpp @@ -227,7 +227,7 @@ int test_clientserver_upload_single(int argc, char* argv[]) frame::mprpc::ServiceT mprpc_client(m); frame::mprpc::ServiceT mprpc_server(m); ErrorConditionT err; - CallPoolT cwp{1, 100, 0, [](const size_t) {}, [](const size_t) {}}; + CallPoolT cwp{{1, 100, 0}, [](const size_t) {}, [](const size_t) {}}; frame::aio::Resolver resolver([&cwp](std::function&& _fnc) { cwp.pushOne(std::move(_fnc)); }); sch_client.start(1); diff --git a/solid/frame/mprpc/test/test_clientserver_versioning.cpp b/solid/frame/mprpc/test/test_clientserver_versioning.cpp index 4fc09652..1f1372c9 100644 --- a/solid/frame/mprpc/test/test_clientserver_versioning.cpp +++ b/solid/frame/mprpc/test/test_clientserver_versioning.cpp @@ -77,7 +77,7 @@ int test_clientserver_versioning(int argc, char* argv[]) AioSchedulerT scheduler; frame::Manager manager; - CallPoolT cwp{1, 100, 0, [](const size_t) {}, [](const size_t) {}}; + CallPoolT cwp{{1, 100, 0}, [](const size_t) {}, [](const size_t) {}}; frame::aio::Resolver resolver([&cwp](std::function&& _fnc) { cwp.pushOne(std::move(_fnc)); }); frame::mprpc::ServiceT service(manager); frame::mprpc::ServiceT service_v1(manager); diff --git a/solid/frame/mprpc/test/test_connection_close.cpp b/solid/frame/mprpc/test/test_connection_close.cpp index c9eba4b4..c2b2e8f7 100644 --- a/solid/frame/mprpc/test/test_connection_close.cpp +++ b/solid/frame/mprpc/test/test_connection_close.cpp @@ -319,7 +319,7 @@ int test_connection_close(int argc, char* argv[]) frame::Manager m; - CallPoolT cwp{1, 100, 0, [](const size_t) {}, [](const size_t) {}}; + CallPoolT cwp{{1, 100, 0}, [](const size_t) {}, [](const size_t) {}}; frame::aio::Resolver resolver([&cwp](std::function&& _fnc) { cwp.pushOne(std::move(_fnc)); }); frame::mprpc::ServiceT mprpcserver(m); frame::mprpc::ServiceT mprpcclient(m); diff --git a/solid/frame/mprpc/test/test_keepalive_fail.cpp b/solid/frame/mprpc/test/test_keepalive_fail.cpp index 4cbfc0ea..f34d6bb1 100644 --- a/solid/frame/mprpc/test/test_keepalive_fail.cpp +++ b/solid/frame/mprpc/test/test_keepalive_fail.cpp @@ -307,7 +307,7 @@ int test_keepalive_fail(int argc, char* argv[]) frame::mprpc::ServiceT mprpcserver(m); frame::mprpc::ServiceT mprpcclient(m); ErrorConditionT err; - CallPoolT cwp{1, 100, 0, [](const size_t) {}, [](const size_t) {}}; + CallPoolT cwp{{1, 100, 0}, [](const size_t) {}, [](const size_t) {}}; frame::aio::Resolver resolver([&cwp](std::function&& _fnc) { cwp.pushOne(std::move(_fnc)); }); sch_client.start(1); diff --git a/solid/frame/mprpc/test/test_keepalive_success.cpp b/solid/frame/mprpc/test/test_keepalive_success.cpp index c2f585f0..791ffd16 100644 --- a/solid/frame/mprpc/test/test_keepalive_success.cpp +++ b/solid/frame/mprpc/test/test_keepalive_success.cpp @@ -288,7 +288,7 @@ int test_keepalive_success(int argc, char* argv[]) frame::mprpc::ServiceT mprpcserver(m); frame::mprpc::ServiceT mprpcclient(m); ErrorConditionT err; - CallPoolT cwp{1, 100, 0, [](const size_t) {}, [](const size_t) {}}; + CallPoolT cwp{{1, 100, 0}, [](const size_t) {}, [](const size_t) {}}; frame::aio::Resolver resolver([&cwp](std::function&& _fnc) { cwp.pushOne(std::move(_fnc)); }); sch_client.start(1); diff --git a/solid/frame/mprpc/test/test_multiprotocol_basic.cpp b/solid/frame/mprpc/test/test_multiprotocol_basic.cpp index de9d248a..3209c490 100644 --- a/solid/frame/mprpc/test/test_multiprotocol_basic.cpp +++ b/solid/frame/mprpc/test/test_multiprotocol_basic.cpp @@ -86,7 +86,7 @@ int test_multiprotocol_basic(int argc, char* argv[]) frame::Manager m; frame::mprpc::ServiceT mprpcserver(m); ErrorConditionT err; - CallPoolT cwp{1, 100, 0, [](const size_t) {}, [](const size_t) {}}; + CallPoolT cwp{{1, 100, 0}, [](const size_t) {}, [](const size_t) {}}; frame::aio::Resolver resolver([&cwp](std::function&& _fnc) { cwp.pushOne(std::move(_fnc)); }); sch_client.start(1); diff --git a/solid/frame/mprpc/test/test_pool_basic.cpp b/solid/frame/mprpc/test/test_pool_basic.cpp index cc5a63fe..63c0d3a0 100644 --- a/solid/frame/mprpc/test/test_pool_basic.cpp +++ b/solid/frame/mprpc/test/test_pool_basic.cpp @@ -317,7 +317,7 @@ int test_pool_basic(int argc, char* argv[]) frame::mprpc::ServiceT mprpcserver(m); frame::mprpc::ServiceT mprpcclient(m); ErrorConditionT err; - CallPoolT cwp{1, 100, 0, [](const size_t) {}, [](const size_t) {}}; + CallPoolT cwp{{1, 100, 0}, [](const size_t) {}, [](const size_t) {}}; frame::aio::Resolver resolver([&cwp](std::function&& _fnc) { cwp.pushOne(std::move(_fnc)); }); sch_client.start(1); diff --git a/solid/frame/mprpc/test/test_pool_delay_close.cpp b/solid/frame/mprpc/test/test_pool_delay_close.cpp index 02fa4044..70853c55 100644 --- a/solid/frame/mprpc/test/test_pool_delay_close.cpp +++ b/solid/frame/mprpc/test/test_pool_delay_close.cpp @@ -288,7 +288,7 @@ int test_pool_delay_close(int argc, char* argv[]) frame::mprpc::ServiceT mprpcserver(m); frame::mprpc::ServiceT mprpcclient(m); ErrorConditionT err; - CallPoolT cwp{1, 100, 0, [](const size_t) {}, [](const size_t) {}}; + CallPoolT cwp{{1, 100, 0}, [](const size_t) {}, [](const size_t) {}}; frame::aio::Resolver resolver([&cwp](std::function&& _fnc) { cwp.pushOne(std::move(_fnc)); }); sch_client.start(1); diff --git a/solid/frame/mprpc/test/test_pool_force_close.cpp b/solid/frame/mprpc/test/test_pool_force_close.cpp index 176e21d8..f189d77f 100644 --- a/solid/frame/mprpc/test/test_pool_force_close.cpp +++ b/solid/frame/mprpc/test/test_pool_force_close.cpp @@ -257,7 +257,7 @@ int test_pool_force_close(int argc, char* argv[]) frame::mprpc::ServiceT mprpcserver(m); frame::mprpc::ServiceT mprpcclient(m); ErrorConditionT err; - CallPoolT cwp{1, 100, 0, [](const size_t) {}, [](const size_t) {}}; + CallPoolT cwp{{1, 100, 0}, [](const size_t) {}, [](const size_t) {}}; frame::aio::Resolver resolver([&cwp](std::function&& _fnc) { cwp.pushOne(std::move(_fnc)); }); sch_client.start(1); diff --git a/solid/frame/mprpc/test/test_raw_basic.cpp b/solid/frame/mprpc/test/test_raw_basic.cpp index b0b324fe..b3b2270b 100644 --- a/solid/frame/mprpc/test/test_raw_basic.cpp +++ b/solid/frame/mprpc/test/test_raw_basic.cpp @@ -359,7 +359,7 @@ int test_raw_basic(int argc, char* argv[]) frame::mprpc::ServiceT mprpcserver(m); frame::mprpc::ServiceT mprpcclient(m); ErrorConditionT err; - CallPoolT cwp{1, 100, 0, [](const size_t) {}, [](const size_t) {}}; + CallPoolT cwp{{1, 100, 0}, [](const size_t) {}, [](const size_t) {}}; frame::aio::Resolver resolver([&cwp](std::function&& _fnc) { cwp.pushOne(std::move(_fnc)); }); sch_client.start(1); diff --git a/solid/frame/mprpc/test/test_raw_proxy.cpp b/solid/frame/mprpc/test/test_raw_proxy.cpp index 638f61c0..f9229ef7 100644 --- a/solid/frame/mprpc/test/test_raw_proxy.cpp +++ b/solid/frame/mprpc/test/test_raw_proxy.cpp @@ -358,7 +358,7 @@ int test_raw_proxy(int argc, char* argv[]) frame::mprpc::ServiceT mprpcserver(m); frame::mprpc::ServiceT mprpcclient(m); ErrorConditionT err; - CallPoolT cwp{1, 100, 0, [](const size_t) {}, [](const size_t) {}}; + CallPoolT cwp{{1, 100, 0}, [](const size_t) {}, [](const size_t) {}}; frame::aio::Resolver resolver([&cwp](std::function&& _fnc) { cwp.pushOne(std::move(_fnc)); }); sch_client.start(1); diff --git a/solid/frame/mprpc/test/test_relay_basic.cpp b/solid/frame/mprpc/test/test_relay_basic.cpp index 69da737f..8582aa53 100644 --- a/solid/frame/mprpc/test/test_relay_basic.cpp +++ b/solid/frame/mprpc/test/test_relay_basic.cpp @@ -388,7 +388,7 @@ int test_relay_basic(int argc, char* argv[]) frame::mprpc::ServiceT mprpcpeera(m); frame::mprpc::ServiceT mprpcpeerb(m); ErrorConditionT err; - CallPoolT cwp{1, 100, 0, [](const size_t) {}, [](const size_t) {}}; + CallPoolT cwp{{1, 100, 0}, [](const size_t) {}, [](const size_t) {}}; frame::aio::Resolver resolver([&cwp](std::function&& _fnc) { cwp.pushOne(std::move(_fnc)); }); sch_peera.start(1); diff --git a/solid/frame/mprpc/test/test_relay_cancel_request.cpp b/solid/frame/mprpc/test/test_relay_cancel_request.cpp index b6b32eeb..030f197c 100644 --- a/solid/frame/mprpc/test/test_relay_cancel_request.cpp +++ b/solid/frame/mprpc/test/test_relay_cancel_request.cpp @@ -437,7 +437,7 @@ int test_relay_cancel_request(int argc, char* argv[]) frame::mprpc::ServiceT mprpcpeera(m); frame::mprpc::ServiceT mprpcpeerb(m); ErrorConditionT err; - CallPoolT cwp{1, 100, 0, [](const size_t) {}, [](const size_t) {}}; + CallPoolT cwp{{1, 100, 0}, [](const size_t) {}, [](const size_t) {}}; frame::aio::Resolver resolver([&cwp](std::function&& _fnc) { cwp.pushOne(std::move(_fnc)); }); sch_peera.start(1); diff --git a/solid/frame/mprpc/test/test_relay_cancel_response.cpp b/solid/frame/mprpc/test/test_relay_cancel_response.cpp index 268e015b..1667c344 100644 --- a/solid/frame/mprpc/test/test_relay_cancel_response.cpp +++ b/solid/frame/mprpc/test/test_relay_cancel_response.cpp @@ -434,7 +434,7 @@ int test_relay_cancel_response(int argc, char* argv[]) frame::mprpc::ServiceT mprpcpeera(m); frame::mprpc::ServiceT mprpcpeerb(m); ErrorConditionT err; - CallPoolT cwp{1, 100, 0, [](const size_t) {}, [](const size_t) {}}; + CallPoolT cwp{{1, 100, 0}, [](const size_t) {}, [](const size_t) {}}; frame::aio::Resolver resolver([&cwp](std::function&& _fnc) { cwp.pushOne(std::move(_fnc)); }); sch_peera.start(1); diff --git a/solid/frame/mprpc/test/test_relay_close_request.cpp b/solid/frame/mprpc/test/test_relay_close_request.cpp index dfc6c941..8f21c6e3 100644 --- a/solid/frame/mprpc/test/test_relay_close_request.cpp +++ b/solid/frame/mprpc/test/test_relay_close_request.cpp @@ -394,7 +394,7 @@ int test_relay_close_request(int argc, char* argv[]) frame::mprpc::ServiceT mprpcpeera(m); frame::mprpc::ServiceT mprpcpeerb(m); ErrorConditionT err; - CallPoolT cwp{1, 100, 0, [](const size_t) {}, [](const size_t) {}}; + CallPoolT cwp{{1, 100, 0}, [](const size_t) {}, [](const size_t) {}}; frame::aio::Resolver resolver([&cwp](std::function&& _fnc) { cwp.pushOne(std::move(_fnc)); }); sch_peera.start(1); diff --git a/solid/frame/mprpc/test/test_relay_close_response.cpp b/solid/frame/mprpc/test/test_relay_close_response.cpp index 0eb832c3..a7df6bec 100644 --- a/solid/frame/mprpc/test/test_relay_close_response.cpp +++ b/solid/frame/mprpc/test/test_relay_close_response.cpp @@ -394,7 +394,7 @@ int test_relay_close_response(int argc, char* argv[]) frame::mprpc::ServiceT mprpcpeera(m); frame::mprpc::ServiceT mprpcpeerb(m); ErrorConditionT err; - CallPoolT cwp{1, 100, 0, [](const size_t) {}, [](const size_t) {}}; + CallPoolT cwp{{1, 100, 0}, [](const size_t) {}, [](const size_t) {}}; frame::aio::Resolver resolver([&cwp](std::function&& _fnc) { cwp.pushOne(std::move(_fnc)); }); sch_peera.start(1); diff --git a/solid/frame/mprpc/test/test_relay_detect_close.cpp b/solid/frame/mprpc/test/test_relay_detect_close.cpp index 73b19471..1c2bc749 100644 --- a/solid/frame/mprpc/test/test_relay_detect_close.cpp +++ b/solid/frame/mprpc/test/test_relay_detect_close.cpp @@ -267,7 +267,7 @@ int test_relay_detect_close(int argc, char* argv[]) frame::mprpc::ServiceT mprpcpeera(m); frame::mprpc::ServiceT mprpcpeerb(m); ErrorConditionT err; - CallPoolT cwp{1, 100, 0, [](const size_t) {}, [](const size_t) {}}; + CallPoolT cwp{{1, 100, 0}, [](const size_t) {}, [](const size_t) {}}; frame::aio::Resolver resolver([&cwp](std::function&& _fnc) { cwp.pushOne(std::move(_fnc)); }); sch_peera.start(1); diff --git a/solid/frame/mprpc/test/test_relay_detect_close_while_response.cpp b/solid/frame/mprpc/test/test_relay_detect_close_while_response.cpp index 7b483745..048b53ab 100644 --- a/solid/frame/mprpc/test/test_relay_detect_close_while_response.cpp +++ b/solid/frame/mprpc/test/test_relay_detect_close_while_response.cpp @@ -306,7 +306,7 @@ int test_relay_detect_close_while_response(int argc, char* argv[]) frame::mprpc::ServiceT mprpcpeera(m); frame::mprpc::ServiceT mprpcpeerb(m); ErrorConditionT err; - CallPoolT cwp{1, 100, 0, [](const size_t) {}, [](const size_t) {}}; + CallPoolT cwp{{1, 100, 0}, [](const size_t) {}, [](const size_t) {}}; frame::aio::Resolver resolver([&cwp](std::function&& _fnc) { cwp.pushOne(std::move(_fnc)); }); sch_peera.start(1); diff --git a/solid/frame/mprpc/test/test_relay_disabled.cpp b/solid/frame/mprpc/test/test_relay_disabled.cpp index 09270a30..fba6eb06 100644 --- a/solid/frame/mprpc/test/test_relay_disabled.cpp +++ b/solid/frame/mprpc/test/test_relay_disabled.cpp @@ -360,7 +360,7 @@ int test_relay_disabled(int argc, char* argv[]) frame::mprpc::ServiceT mprpcpeera(m); frame::mprpc::ServiceT mprpcpeerb(m); ErrorConditionT err; - CallPoolT cwp{1, 100, 0, [](const size_t) {}, [](const size_t) {}}; + CallPoolT cwp{{1, 100, 0}, [](const size_t) {}, [](const size_t) {}}; frame::aio::Resolver resolver([&cwp](std::function&& _fnc) { cwp.pushOne(std::move(_fnc)); }); sch_peera.start(1); diff --git a/solid/frame/mprpc/test/test_relay_split.cpp b/solid/frame/mprpc/test/test_relay_split.cpp index ba15aca7..a5fbd7eb 100644 --- a/solid/frame/mprpc/test/test_relay_split.cpp +++ b/solid/frame/mprpc/test/test_relay_split.cpp @@ -413,7 +413,7 @@ int test_relay_split(int argc, char* argv[]) frame::mprpc::ServiceT mprpcpeera(m); frame::mprpc::ServiceT mprpcpeerb(m); ErrorConditionT err; - CallPoolT cwp{1, 100, 0, [](const size_t) {}, [](const size_t) {}}; + CallPoolT cwp{{1, 100, 0}, [](const size_t) {}, [](const size_t) {}}; frame::aio::Resolver resolver([&cwp](std::function&& _fnc) { cwp.pushOne(std::move(_fnc)); }); sch_peera.start(1); diff --git a/solid/utility/common.hpp b/solid/utility/common.hpp index b1235621..deeb38b3 100644 --- a/solid/utility/common.hpp +++ b/solid/utility/common.hpp @@ -12,6 +12,9 @@ #include "solid/system/common.hpp" #include +#ifdef __cpp_lib_bitops +#include +#endif namespace solid { @@ -72,11 +75,23 @@ inline constexpr size_t fast_padded_size(const size_t _sz, const size_t _bitpad) return _sz + pad; } +#ifdef __cpp_lib_bitops + +template +size_t bit_count(const T _v) +{ + return static_cast(std::popcount(_v)); +} + +#else + size_t bit_count(const uint8_t _v); size_t bit_count(const uint16_t _v); size_t bit_count(const uint32_t _v); size_t bit_count(const uint64_t _v); +#endif + inline size_t leading_zero_count(uint64_t x) { x = x | (x >> 1); diff --git a/solid/utility/test/test_callpool_multicast_basic.cpp b/solid/utility/test/test_callpool_multicast_basic.cpp index c1e525f6..42750f26 100644 --- a/solid/utility/test/test_callpool_multicast_basic.cpp +++ b/solid/utility/test/test_callpool_multicast_basic.cpp @@ -61,7 +61,7 @@ int test_callpool_multicast_basic(int argc, char* argv[]) ++worker_stop_count; }; { - CallPoolT cp{worker_count, 100000, 1000, worker_start, worker_stop, std::ref(context), std::ref(record_dq)}; + CallPoolT cp{{worker_count, 100000, 1000}, worker_start, worker_stop, std::ref(context), std::ref(record_dq)}; pwp = &cp; std::promise barrier; diff --git a/solid/utility/test/test_callpool_multicast_pattern.cpp b/solid/utility/test/test_callpool_multicast_pattern.cpp index 7aae8e76..e277f685 100644 --- a/solid/utility/test/test_callpool_multicast_pattern.cpp +++ b/solid/utility/test/test_callpool_multicast_pattern.cpp @@ -48,7 +48,7 @@ int test_callpool_multicast_pattern(int argc, char* argv[]) auto lambda = [&]() { for (int i = 0; i < loop_cnt; ++i) { { - CallPoolT cp{2, 100000, 100000, [](const size_t, Context&) {}, [](const size_t, Context& _rctx) {}, std::ref(context)}; + CallPoolT cp{{2, 100000, 100000}, [](const size_t, Context&) {}, [](const size_t, Context& _rctx) {}, std::ref(context)}; pwp = &cp; for (size_t j = 0; j < cnt; ++j) { diff --git a/solid/utility/test/test_collapse.cpp b/solid/utility/test/test_collapse.cpp index 9c6c7eb2..2af573ab 100644 --- a/solid/utility/test/test_collapse.cpp +++ b/solid/utility/test/test_collapse.cpp @@ -114,7 +114,7 @@ int test_collapse(int argc, char* argv[]) } } } else if (choice == 'p') { - CallPoolT wp{thread_count, 10000, 100, + CallPoolT wp{{thread_count, 10000, 100}, [](const size_t) { set_current_thread_affinity(); }, @@ -148,7 +148,7 @@ int test_collapse(int argc, char* argv[]) const auto stop_time = chrono::high_resolution_clock::now(); cout << "Duration: " << chrono::duration_cast(stop_time - start_time).count() << "us" << endl; } else if (choice == 'b') { - CallPoolT wp{thread_count, 10000, 100, + CallPoolT wp{{thread_count, 10000, 100}, [](const size_t) { set_current_thread_affinity(); }, diff --git a/solid/utility/test/test_threadpool.cpp b/solid/utility/test/test_threadpool.cpp index a05a4cdb..bd77d838 100644 --- a/solid/utility/test/test_threadpool.cpp +++ b/solid/utility/test/test_threadpool.cpp @@ -91,7 +91,7 @@ int test_threadpool(int argc, char* argv[]) // 1000 10 0 0 1 0 0 auto lambda = [&]() { ThreadPoolT wp{ - consumer_count, queue_size, 0, [](size_t, Context&&) {}, [](size_t, Context&&) {}, + {consumer_count, queue_size, 0}, [](size_t, Context&&) {}, [](size_t, Context&&) {}, [job_sleep_msecs](size_t _v, Context&& _rctx) { // solid_check(_rs == "this is a string", "failed string check"); diff --git a/solid/utility/test/test_threadpool_basic.cpp b/solid/utility/test/test_threadpool_basic.cpp index 46bc7763..a86c7f63 100644 --- a/solid/utility/test/test_threadpool_basic.cpp +++ b/solid/utility/test/test_threadpool_basic.cpp @@ -41,7 +41,7 @@ int test_threadpool_basic(int argc, char* argv[]) for (int i = 0; i < loop_cnt; ++i) { { ThreadPoolT wp{ - 2, 10000, 0, [](const size_t) {}, [](const size_t) {}, + {2, 10000, 0}, [](const size_t) {}, [](const size_t) {}, [&val](const size_t _v) { val += _v; }, diff --git a/solid/utility/test/test_threadpool_batch.cpp b/solid/utility/test/test_threadpool_batch.cpp index df4fc8a6..cab0cc05 100644 --- a/solid/utility/test/test_threadpool_batch.cpp +++ b/solid/utility/test/test_threadpool_batch.cpp @@ -75,13 +75,17 @@ void busy_for(const Dur& _dur) using AtomicCounterT = std::atomic; using AtomicCounterValueT = AtomicCounterT::value_type; atomic_size_t push_one_index{0}; -size_t capacity = 1024; +size_t capacity = 8 * 1024; +template +inline constexpr static auto computeCounter(const IndexT _index, const size_t _capacity) noexcept +{ + return (_index / _capacity) & std::numeric_limits::max(); +} std::tuple pushOneIndex() noexcept { - // return push_one_index_.fetch_add(1) % one_.capacity_; const auto index = push_one_index.fetch_add(1); - return {index % capacity, (index / capacity) & std::numeric_limits::max()}; + return {index % capacity, computeCounter(index, capacity)}; } } // namespace @@ -92,23 +96,27 @@ int test_threadpool_batch(int argc, char* argv[]) int wait_seconds = 500; size_t entry_count = 300; size_t repeat_count = 1000000; + solid_log(logger, Verbose, "capacity " << capacity << " reminder " << (std::numeric_limits::max() % capacity)); { vector cnt_vec(capacity, 0); + for (size_t i = 0; i < (capacity * std::numeric_limits::max() + capacity); ++i) { const auto [index, counter] = pushOneIndex(); - solid_check(cnt_vec[index] == counter, "" << (int)cnt_vec[index] << " != " << (int)counter << " index = " << index << " i = " << i); + solid_check(cnt_vec[index] == counter, "" << (int)cnt_vec[index] << " == " << (int)counter << " index = " << index << " i = " << i); ++cnt_vec[index]; } - push_one_index = std::numeric_limits::max() - capacity + 1; + push_one_index = std::numeric_limits::max() - (10 * capacity) + 1; - for (auto& rv : cnt_vec) { - rv = 238; + for (size_t i = 0; i < capacity; ++i) { + const auto [index, counter] = pushOneIndex(); + cnt_vec[index] = counter; + ++cnt_vec[index]; } for (size_t i = 0; i < (capacity * std::numeric_limits::max() + capacity); ++i) { const auto [index, counter] = pushOneIndex(); - solid_check(cnt_vec[index] == counter, "" << (int)cnt_vec[index] << " != " << (int)counter << " index = " << index << " i = " << i); + solid_check(cnt_vec[index] == counter, "" << (int)cnt_vec[index] << " == " << (int)counter << " index = " << index << " i = " << i << " one_index = " << push_one_index); ++cnt_vec[index]; } } @@ -119,8 +127,9 @@ int test_threadpool_batch(int argc, char* argv[]) { solid_log(logger, Verbose, "start"); CallPoolT wp{ - thread_count, 12000, 100, [](const size_t, Context&) {}, [](const size_t, Context& _rctx) {}, + {thread_count, 12000, 100}, [](const size_t, Context&) {}, [](const size_t, Context& _rctx) {}, std::ref(ctx)}; + solid_log(logger, Verbose, "TP capacity: one " << wp.capacityOne() << " all " << wp.capacityAll()); solid_log(logger, Verbose, "create contexts"); vector entries; for (size_t i = 0; i < entry_count; ++i) { diff --git a/solid/utility/test/test_threadpool_chain.cpp b/solid/utility/test/test_threadpool_chain.cpp index bd1b4782..fb119440 100644 --- a/solid/utility/test/test_threadpool_chain.cpp +++ b/solid/utility/test/test_threadpool_chain.cpp @@ -47,13 +47,13 @@ int test_threadpool_chain(int argc, char* argv[]) for (int i = 0; i < loop_cnt; ++i) { { ThreadPoolT wp_b{ - thread_count, 10000, 1000, [](const size_t) {}, [](const size_t) {}, + {thread_count, 10000, 1000}, [](const size_t) {}, [](const size_t) {}, [&val](const size_t _v) { val += _v; }, [](const size_t) {}}; ThreadPoolT wp_f{ - thread_count, 10000, 1000, [](const size_t) {}, [](const size_t) {}, + {thread_count, 10000, 1000}, [](const size_t) {}, [](const size_t) {}, [&wp_b](const size_t _v) { wp_b.pushOne(_v); }, diff --git a/solid/utility/test/test_threadpool_context.cpp b/solid/utility/test/test_threadpool_context.cpp index 1c68acc8..1ec53a85 100644 --- a/solid/utility/test/test_threadpool_context.cpp +++ b/solid/utility/test/test_threadpool_context.cpp @@ -44,7 +44,7 @@ int test_threadpool_context(int argc, char* argv[]) Context ctx{"test", 1, cnt + 1}; { CallPoolT wp{ - 2, 10000, 100, [](const size_t, Context&) {}, [](const size_t, Context& _rctx) {}, + {2, 10000, 100}, [](const size_t, Context&) {}, [](const size_t, Context& _rctx) {}, std::ref(ctx)}; solid_log(generic_logger, Verbose, "wp started"); diff --git a/solid/utility/test/test_threadpool_multicast_basic.cpp b/solid/utility/test/test_threadpool_multicast_basic.cpp index 041cab0b..e33555f6 100644 --- a/solid/utility/test/test_threadpool_multicast_basic.cpp +++ b/solid/utility/test/test_threadpool_multicast_basic.cpp @@ -46,7 +46,7 @@ int test_threadpool_multicast_basic(int argc, char* argv[]) record_dq.resize(cnt, -1); { ThreadPoolT wp{ - 2, 10000, 1000, [](const size_t) {}, [](const size_t) {}, + {2, 10000, 1000}, [](const size_t) {}, [](const size_t) {}, [&val, &record_dq](const size_t _v) { val += _v; solid_check(record_dq[_v] == static_cast(-1)); diff --git a/solid/utility/test/test_threadpool_multicast_sleep.cpp b/solid/utility/test/test_threadpool_multicast_sleep.cpp index 4d96a322..d0164d61 100644 --- a/solid/utility/test/test_threadpool_multicast_sleep.cpp +++ b/solid/utility/test/test_threadpool_multicast_sleep.cpp @@ -46,7 +46,7 @@ int test_threadpool_multicast_sleep(int argc, char* argv[]) record_dq.resize(cnt, -1); { ThreadPoolT wp{ - 2, 10000, 1000, [](const size_t) {}, [](const size_t) {}, + {2, 10000, 1000}, [](const size_t) {}, [](const size_t) {}, [&val, &record_dq](const size_t _v) { val += _v; solid_check(record_dq[_v] == static_cast(-1)); diff --git a/solid/utility/test/test_threadpool_multicast_synchronization_context_basic.cpp b/solid/utility/test/test_threadpool_multicast_synchronization_context_basic.cpp index 391d0909..4dd1a3e3 100644 --- a/solid/utility/test/test_threadpool_multicast_synchronization_context_basic.cpp +++ b/solid/utility/test/test_threadpool_multicast_synchronization_context_basic.cpp @@ -59,7 +59,7 @@ int test_threadpool_multicast_synchronization_context_basic(int argc, char* argv record_dq.resize(count); ThreadPoolT wp{ - 2, 10000, 1000, [](const size_t) {}, [](const size_t) {}, + {2, 10000, 1000}, [](const size_t) {}, [](const size_t) {}, [&record_dq](const Record& _r) { solid_check(record_dq[_r.value_].multicast_value_ == static_cast(-1)); record_dq[_r.value_].multicast_value_ = thread_local_value; diff --git a/solid/utility/test/test_threadpool_pattern.cpp b/solid/utility/test/test_threadpool_pattern.cpp index 6c13c032..f1e9a270 100644 --- a/solid/utility/test/test_threadpool_pattern.cpp +++ b/solid/utility/test/test_threadpool_pattern.cpp @@ -59,7 +59,7 @@ int test_threadpool_pattern(int argc, char* argv[]) auto lambda = [&]() { ThreadPoolT wp{ - consumer_cnt, 10000, 0, [](const size_t) {}, [](const size_t) {}, + {consumer_cnt, 10000, 0}, [](const size_t) {}, [](const size_t) {}, [&sum, &consummer_pattern, loop = consummer_pattern[0].first, idx = 0](const size_t _v) mutable { sum += _v; --loop; diff --git a/solid/utility/test/test_threadpool_thread_context.cpp b/solid/utility/test/test_threadpool_thread_context.cpp index 39e22043..cfebf41d 100644 --- a/solid/utility/test/test_threadpool_thread_context.cpp +++ b/solid/utility/test/test_threadpool_thread_context.cpp @@ -70,7 +70,7 @@ int test_threadpool_thread_context(int argc, char* argv[]) auto start = chrono::steady_clock::now(); { CallPoolT wp{ - 2, 1000, 0, [](const size_t, Context&&) {}, [](const size_t, Context&&) {}, + {2, 1000, 0}, [](const size_t, Context&&) {}, [](const size_t, Context&&) {}, Context("simple text", 0UL)}; solid_log(logger, Verbose, "wp started"); diff --git a/solid/utility/threadpool.hpp b/solid/utility/threadpool.hpp index e7ba9441..fe4086f3 100644 --- a/solid/utility/threadpool.hpp +++ b/solid/utility/threadpool.hpp @@ -10,6 +10,7 @@ #pragma once #include +#include #include #include #include @@ -168,6 +169,50 @@ struct EmptyThreadPoolStatistic : solid::Statistic { void clear() {} }; +struct ThreadPoolConfiguration { + static constexpr size_t default_one_capacity = 8 * 1024; + static constexpr size_t default_all_capacity = 1024; + + size_t thread_count_ = 1; + size_t one_capacity_ = default_one_capacity; + size_t all_capacity_ = default_all_capacity; + size_t spin_count_ = 1; + + ThreadPoolConfiguration( + const size_t _thread_count = 1, + const size_t _one_capacity = 10 * 1024, + const size_t _all_capacity = 1024, + const size_t _spin_count = 1) + : thread_count_(_thread_count) + , one_capacity_(_one_capacity) + , all_capacity_(_all_capacity) + , spin_count_(_spin_count) + { + } + + auto& threadCount(const size_t _value) + { + thread_count_ = _value; + return *this; + } + auto& oneCapacity(const size_t _value) + { + one_capacity_ = _value; + return *this; + } + auto& allCapacity(const size_t _value) + { + all_capacity_ = _value; + return *this; + } + + auto& spinCount(const size_t _value) + { + spin_count_ = _value; + return *this; + } +}; + template class ThreadPool; @@ -380,14 +425,6 @@ class ThreadPool : NonCopyable { }; private: - using AtomicLockT = std::atomic; - - enum struct LockE : AtomicLockT::value_type { - Empty = 0, - Pushing, - Filled, - Popping - }; enum struct EventE : uint8_t { Fill, Stop, @@ -433,15 +470,17 @@ class ThreadPool : NonCopyable { context_produce_id_ = 0; } - void waitWhilePushOne(Stats& _rstats, const AtomicCounterValueT _count) noexcept + void waitWhilePushOne(Stats& _rstats, const AtomicCounterValueT _count, const size_t _spin_count) noexcept { + auto spin = _spin_count; while (true) { const auto cnt = produce_count_.load(); if (cnt == _count) { break; - } else { + } else if (_spin_count && !spin--) { _rstats.pushOneWaitLock(); std::atomic_wait_explicit(&produce_count_, cnt, std::memory_order_relaxed); + spin = _spin_count; } } } @@ -455,14 +494,14 @@ class ThreadPool : NonCopyable { _rduration = duration_cast(steady_clock::now() - _start).count(); } - void waitWhileStop(Stats& _rstats, const AtomicCounterValueT _count) noexcept + void waitWhileStop(Stats& _rstats, const AtomicCounterValueT _count, const size_t _spin_count) noexcept { - waitWhilePushOne(_rstats, _count); + waitWhilePushOne(_rstats, _count, _spin_count); } - void waitWhilePushAll(Stats& _rstats, const AtomicCounterValueT _count) noexcept + void waitWhilePushAll(Stats& _rstats, const AtomicCounterValueT _count, const size_t _spin_count) noexcept { - waitWhilePushOne(_rstats, _count); + waitWhilePushOne(_rstats, _count, _spin_count); } void notifyWhileStop() noexcept @@ -483,17 +522,19 @@ class ThreadPool : NonCopyable { class Fnc, class AllFnc, typename... Args> - EventE waitWhilePop(Stats& _rstats, const AtomicCounterValueT _count, const Fnc& _try_consume_an_all_fnc, AllFnc& _all_fnc, Args&&... _args) noexcept + EventE waitWhilePop(Stats& _rstats, const AtomicCounterValueT _count, const size_t _spin_count, const Fnc& _try_consume_an_all_fnc, AllFnc& _all_fnc, Args&&... _args) noexcept { + auto spin = _spin_count; while (true) { const auto cnt = consume_count_.load(); if (cnt == _count) { return static_cast(event_); - } else if (!_try_consume_an_all_fnc(&consume_count_, _count, _all_fnc, std::forward(_args)...)) { + } else if (!_try_consume_an_all_fnc(&consume_count_, _count, _all_fnc, std::forward(_args)...) && _spin_count && !spin--) { std::atomic_wait_explicit(&consume_count_, cnt, std::memory_order_relaxed); _rstats.popOneWaitPopping(); + spin = _spin_count; } } } @@ -527,15 +568,17 @@ class ThreadPool : NonCopyable { data_ptr_->destroy(); } - void waitWhilePushAll(Stats& _rstats, const AtomicCounterValueT _count) noexcept + void waitWhilePushAll(Stats& _rstats, const AtomicCounterValueT _count, const size_t _spin_count) noexcept { + auto spin = _spin_count; while (true) { const auto cnt = produce_count_.load(); if (cnt == _count) { break; - } else { + } else if (_spin_count && !spin--) { _rstats.pushOneWaitLock(); std::atomic_wait_explicit(&produce_count_, cnt, std::memory_order_relaxed); + spin = _spin_count; } } } @@ -569,6 +612,7 @@ class ThreadPool : NonCopyable { using OneStubT = OneStub; using ThreadVectorT = std::vector; + size_t spin_count_ = 1; /* alignas(hardware_constructive_interference_size) */ struct { size_t capacity_{0}; std::unique_ptr tasks_; @@ -583,21 +627,23 @@ class ThreadPool : NonCopyable { std::unique_ptr tasks_; std::unique_ptr[]> datas_; } all_; + Stats statistic_; - alignas(hardware_destructive_interference_size) std::atomic_size_t push_one_index_{0}; - alignas(hardware_destructive_interference_size) std::atomic_size_t pop_one_index_{0}; + using AtomicIndexT = std::atomic_size_t; + using AtomicIndexValueT = std::atomic_size_t::value_type; + + alignas(hardware_destructive_interference_size) AtomicIndexT push_one_index_{0}; + alignas(hardware_destructive_interference_size) AtomicIndexT pop_one_index_{0}; ThreadVectorT threads_; std::atomic running_{false}; - std::tuple pushOneIndex() noexcept + std::tuple pushOneIndex() noexcept { - // return push_one_index_.fetch_add(1) % one_.capacity_; const auto index = push_one_index_.fetch_add(1); return {index % one_.capacity_, computeCounter(index, one_.capacity_)}; } - std::tuple popOneIndex() noexcept + std::tuple popOneIndex() noexcept { - // return pop_one_index_.fetch_add(1) % one_.capacity_; const auto index = pop_one_index_.fetch_add(1); return {index % one_.capacity_, computeCounter(index, one_.capacity_)}; } @@ -626,13 +672,11 @@ class ThreadPool : NonCopyable { class AllFnc, typename... Args> void doStart( - const size_t _thread_count, - const size_t _one_capacity, - const size_t _all_capacity, - StartFnc _start_fnc, - StopFnc _stop_fnc, - OneFnc _one_fnc, - AllFnc _all_fnc, + const ThreadPoolConfiguration& _config, + StartFnc _start_fnc, + StopFnc _stop_fnc, + OneFnc _one_fnc, + AllFnc _all_fnc, Args&&... _args); void doStop(); @@ -710,6 +754,7 @@ class ThreadPool { public: using SynchronizationContextT = SynchronizationContext; + using ConfigurationT = ThreadPoolConfiguration; ThreadPool() = default; @@ -720,19 +765,15 @@ class ThreadPool { class AllFnc, typename... Args> ThreadPool( - const size_t _thread_count, - const size_t _one_capacity, - const size_t _all_capacity, - StartFnc _start_fnc, - StopFnc _stop_fnc, - OneFnc _one_fnc, - AllFnc _all_fnc, + const ThreadPoolConfiguration& _config, + StartFnc _start_fnc, + StopFnc _stop_fnc, + OneFnc _one_fnc, + AllFnc _all_fnc, Args&&... _args) { impl_.doStart( - _thread_count, - _one_capacity, - _all_capacity, + _config, _start_fnc, _stop_fnc, _one_fnc, @@ -747,19 +788,15 @@ class ThreadPool { class AllFnc, typename... Args> void start( - const size_t _thread_count, - const size_t _one_capacity, - const size_t _all_capacity, - StartFnc _start_fnc, - StopFnc _stop_fnc, - OneFnc _one_fnc, - AllFnc _all_fnc, + const ThreadPoolConfiguration& _config, + StartFnc _start_fnc, + StopFnc _stop_fnc, + OneFnc _one_fnc, + AllFnc _all_fnc, Args&&... _args) { impl_.doStart( - _thread_count, - _one_capacity, - _all_capacity, + _config, _start_fnc, _stop_fnc, _one_fnc, @@ -833,6 +870,7 @@ class ThreadPool, Function; + using ConfigurationT = ThreadPoolConfiguration; template static constexpr bool is_small_one_type() @@ -851,17 +889,13 @@ class ThreadPool, Function ThreadPool( - const size_t _thread_count, - const size_t _one_capacity, - const size_t _all_capacity, - StartFnc _start_fnc, - StopFnc _stop_fnc, + const ThreadPoolConfiguration& _config, + StartFnc _start_fnc, + StopFnc _stop_fnc, Args&&... _args) { impl_.doStart( - _thread_count, - _one_capacity, - _all_capacity, + _config, _start_fnc, _stop_fnc, [](OneFunctionT& _rfnc, Args&&... _args) { @@ -876,16 +910,12 @@ class ThreadPool, Function - void start(const size_t _thread_count, - const size_t _one_capacity, - const size_t _all_capacity, - StartFnc _start_fnc, - StopFnc _stop_fnc, Args... _args) + void start(const ThreadPoolConfiguration& _config, + StartFnc _start_fnc, + StopFnc _stop_fnc, Args... _args) { impl_.doStart( - _thread_count, - _one_capacity, - _all_capacity, + _config, _start_fnc, _stop_fnc, [](OneFunctionT& _rfnc, Args&&... _args) { @@ -953,20 +983,22 @@ class ThreadPool, Function -template + class AllFnc, + typename... Args> void ThreadPool::doStart( - const size_t _thread_count, - const size_t _one_capacity, - const size_t _all_capacity, - StartFnc _start_fnc, - StopFnc _stop_fnc, - OneFnc _one_fnc, - AllFnc _all_fnc, + const ThreadPoolConfiguration& _config, + StartFnc _start_fnc, + StopFnc _stop_fnc, + OneFnc _one_fnc, + AllFnc _all_fnc, Args&&... _args) { + static_assert( + (std::numeric_limits::max() % std::bit_ceil(ThreadPoolConfiguration::default_one_capacity)) == (std::bit_ceil(ThreadPoolConfiguration::default_one_capacity) - 1) && (std::numeric_limits::max() % std::bit_ceil(ThreadPoolConfiguration::default_all_capacity)) == (std::bit_ceil(ThreadPoolConfiguration::default_all_capacity) - 1)); bool expect = false; if (!running_.compare_exchange_strong(expect, true)) { @@ -976,11 +1008,11 @@ void ThreadPool::doStart( solid_dbg(generic_logger, Error, "sizeof(OneStub) = " << sizeof(OneStubT) << " sizeof(AllStub) = " << sizeof(AllStubT)); threads_.clear(); - threads_.reserve(_thread_count); - const auto thread_count = _thread_count ? _thread_count : std::thread::hardware_concurrency(); + const auto thread_count = _config.thread_count_ ? _config.thread_count_ : std::thread::hardware_concurrency(); + threads_.reserve(thread_count); - one_.capacity_ = _one_capacity >= thread_count ? _one_capacity : std::max(static_cast(1024), thread_count); + one_.capacity_ = std::bit_ceil(std::max(_config.one_capacity_, thread_count)); one_.tasks_.reset(new OneStubT[one_.capacity_]); one_.datas_.reset(new TaskData[one_.capacity_]); @@ -988,16 +1020,21 @@ void ThreadPool::doStart( one_.tasks_[i].data_ptr_ = &one_.datas_[i]; } - all_.capacity_ = _all_capacity ? _all_capacity : 1; + all_.capacity_ = std::bit_ceil(_config.all_capacity_ ? _config.all_capacity_ : 1); all_.tasks_.reset(new AllStubT[all_.capacity_]); all_.datas_.reset(new TaskData[all_.capacity_]); + solid_check( + (std::numeric_limits::max() % one_.capacity_) == (one_.capacity_ - 1) && (std::numeric_limits::max() % all_.capacity_) == (all_.capacity_ - 1)); + for (size_t i = 0; i < all_.capacity_; ++i) { all_.tasks_[i].data_ptr_ = &all_.datas_[i]; } all_.tasks_[0].produce_count_ = 1; //+ all_.tasks_[0].consume_count_ = 0; // first entry is skipped on the first iteration + spin_count_ = _config.spin_count_; + for (size_t i = 0; i < thread_count; ++i) { threads_.emplace_back( std::thread{ @@ -1024,7 +1061,7 @@ void ThreadPool::doStop() const auto [index, count] = pushOneIndex(); auto& rstub = one_.tasks_[index]; - rstub.waitWhileStop(statistic_, count); + rstub.waitWhileStop(statistic_, count, spin_count_); rstub.notifyWhileStop(); } @@ -1054,6 +1091,7 @@ void ThreadPool::doRun( const auto event = rstub.waitWhilePop( statistic_, count, + spin_count_, [this, &local_context]( AtomicCounterT* _pcounter, const AtomicCounterValueT _count, @@ -1223,7 +1261,7 @@ void ThreadPool::doPushOne(Tsk&& _task, ContextStub* _p const auto [index, count] = pushOneIndex(); auto& rstub = one_.tasks_[index]; - rstub.waitWhilePushOne(statistic_, count); + rstub.waitWhilePushOne(statistic_, count, spin_count_); rstub.task(std::forward(_task)); rstub.pcontext_ = _pctx; @@ -1250,7 +1288,7 @@ void ThreadPool::doPushAll(Tsk&& _task) const auto id = pushAllId(); auto& rstub = all_.tasks_[id % all_.capacity_]; - rstub.waitWhilePushAll(statistic_, computeCounter(id, all_.capacity_)); + rstub.waitWhilePushAll(statistic_, computeCounter(id, all_.capacity_), spin_count_); rstub.task(std::forward(_task)); @@ -1265,7 +1303,7 @@ void ThreadPool::doPushAll(Tsk&& _task) const auto [index, count] = pushOneIndex(); // TODO: auto& rstub = one_.tasks_[index]; - rstub.waitWhilePushAll(statistic_, count); + rstub.waitWhilePushAll(statistic_, count, spin_count_); rstub.all_id_ = id; diff --git a/tutorials/mprpc_echo/mprpc_echo_client.cpp b/tutorials/mprpc_echo/mprpc_echo_client.cpp index 378e715c..bcf66fda 100644 --- a/tutorials/mprpc_echo/mprpc_echo_client.cpp +++ b/tutorials/mprpc_echo/mprpc_echo_client.cpp @@ -73,7 +73,7 @@ int main(int argc, char* argv[]) AioSchedulerT scheduler; frame::Manager manager; frame::mprpc::ServiceT rpcservice(manager); - CallPoolT cwp{1, 100, 0, [](const size_t) {}, [](const size_t) {}}; + CallPoolT cwp{{1, 100, 0}, [](const size_t) {}, [](const size_t) {}}; frame::aio::Resolver resolver([&cwp](std::function&& _fnc) { cwp.pushOne(std::move(_fnc)); }); ErrorConditionT err; diff --git a/tutorials/mprpc_echo/mprpc_echo_client_pool.cpp b/tutorials/mprpc_echo/mprpc_echo_client_pool.cpp index 06cadf1c..15bca936 100644 --- a/tutorials/mprpc_echo/mprpc_echo_client_pool.cpp +++ b/tutorials/mprpc_echo/mprpc_echo_client_pool.cpp @@ -77,7 +77,7 @@ int main(int argc, char* argv[]) AioSchedulerT scheduler; frame::Manager manager; frame::mprpc::ServiceT rpcservice(manager); - CallPoolT cwp{1, 100, 0, [](const size_t) {}, [](const size_t) {}}; + CallPoolT cwp{{1, 100, 0}, [](const size_t) {}, [](const size_t) {}}; frame::aio::Resolver resolver([&cwp](std::function&& _fnc) { cwp.pushOne(std::move(_fnc)); }); ErrorConditionT err; diff --git a/tutorials/mprpc_echo_relay/mprpc_echo_relay_client.cpp b/tutorials/mprpc_echo_relay/mprpc_echo_relay_client.cpp index ccb06fe3..05924f1c 100644 --- a/tutorials/mprpc_echo_relay/mprpc_echo_relay_client.cpp +++ b/tutorials/mprpc_echo_relay/mprpc_echo_relay_client.cpp @@ -71,7 +71,7 @@ int main(int argc, char* argv[]) frame::Manager manager; frame::mprpc::ServiceT rpcservice(manager); ErrorConditionT err; - CallPoolT cwp{1, 100, 0, [](const size_t) {}, [](const size_t) {}}; + CallPoolT cwp{{1, 100, 0}, [](const size_t) {}, [](const size_t) {}}; frame::aio::Resolver resolver([&cwp](std::function&& _fnc) { cwp.pushOne(std::move(_fnc)); }); scheduler.start(1); diff --git a/tutorials/mprpc_file/mprpc_file_client.cpp b/tutorials/mprpc_file/mprpc_file_client.cpp index 4c2c2ce2..4c201080 100644 --- a/tutorials/mprpc_file/mprpc_file_client.cpp +++ b/tutorials/mprpc_file/mprpc_file_client.cpp @@ -80,7 +80,7 @@ int main(int argc, char* argv[]) AioSchedulerT scheduler; frame::Manager manager; frame::mprpc::ServiceT rpcservice(manager); - CallPoolT cwp{1, 100, 0, [](const size_t) {}, [](const size_t) {}}; + CallPoolT cwp{{1, 100, 0}, [](const size_t) {}, [](const size_t) {}}; frame::aio::Resolver resolver([&cwp](std::function&& _fnc) { cwp.pushOne(std::move(_fnc)); }); ErrorConditionT err; diff --git a/tutorials/mprpc_request/mprpc_request_client.cpp b/tutorials/mprpc_request/mprpc_request_client.cpp index 83870eb4..3767e842 100644 --- a/tutorials/mprpc_request/mprpc_request_client.cpp +++ b/tutorials/mprpc_request/mprpc_request_client.cpp @@ -78,7 +78,7 @@ int main(int argc, char* argv[]) AioSchedulerT scheduler; frame::Manager manager; frame::mprpc::ServiceT rpcservice(manager); - CallPoolT cwp{1, 100, 0, [](const size_t) {}, [](const size_t) {}}; + CallPoolT cwp{{1, 100, 0}, [](const size_t) {}, [](const size_t) {}}; frame::aio::Resolver resolver([&cwp](std::function&& _fnc) { cwp.pushOne(std::move(_fnc)); }); ErrorConditionT err; diff --git a/tutorials/mprpc_request_ssl/mprpc_request_client.cpp b/tutorials/mprpc_request_ssl/mprpc_request_client.cpp index 427e5095..717d426b 100644 --- a/tutorials/mprpc_request_ssl/mprpc_request_client.cpp +++ b/tutorials/mprpc_request_ssl/mprpc_request_client.cpp @@ -94,7 +94,7 @@ int main(int argc, char* argv[]) AioSchedulerT scheduler; frame::Manager manager; frame::mprpc::ServiceT rpcservice(manager); - CallPoolT cwp{1, 100, 0, [](const size_t) {}, [](const size_t) {}}; + CallPoolT cwp{{1, 100, 0}, [](const size_t) {}, [](const size_t) {}}; frame::aio::Resolver resolver([&cwp](std::function&& _fnc) { cwp.pushOne(std::move(_fnc)); }); ErrorConditionT err; From 7dc8ef67e9c53b6fdf12c7227d787f98d55ca679 Mon Sep 17 00:00:00 2001 From: Valentin Palade Date: Fri, 21 Jun 2024 19:39:10 +0300 Subject: [PATCH 09/13] threadpool: update reactors with the same techniques used by threadpool --- solid/frame/aio/aioreactor.hpp | 35 +++++++++--------- solid/frame/aio/src/aioreactor.cpp | 2 +- solid/frame/reactor.hpp | 27 +++++++------- solid/frame/reactorbase.hpp | 57 ++++++++++++++++++++++++++++++ solid/frame/src/reactor.cpp | 3 +- 5 files changed, 93 insertions(+), 31 deletions(-) diff --git a/solid/frame/aio/aioreactor.hpp b/solid/frame/aio/aioreactor.hpp index 4c0120a9..19717b48 100644 --- a/solid/frame/aio/aioreactor.hpp +++ b/solid/frame/aio/aioreactor.hpp @@ -10,6 +10,8 @@ #pragma once +#include + #include "solid/frame/aio/aiocommon.hpp" #include "solid/frame/aio/aioreactorcontext.hpp" #include "solid/frame/common.hpp" @@ -140,9 +142,10 @@ class Reactor : public frame::ReactorBase { Reactor(SchedulerBase& _rsched, StatisticT& _rstatistic, const size_t _schedidx, const size_t _wake_capacity); ~Reactor(); - size_t pushWakeIndex() noexcept + std::tuple pushWakeIndex() noexcept { - return push_wake_index_.fetch_add(1) % wake_capacity_; + const auto index = push_wake_index_.fetch_add(1); + return {index % wake_capacity_, frame::impl::computeCounter(index, wake_capacity_)}; } template @@ -374,7 +377,7 @@ class Reactor : public impl::Reactor { Reactor(SchedulerBase& _rsched, StatisticT& _rstatistic, const size_t _sched_idx, const size_t _wake_capacity) : impl::Reactor(_rsched, _rstatistic, _sched_idx, _wake_capacity) - , wake_arr_(new WakeStubT[_wake_capacity]) + , wake_arr_(new WakeStubT[wake_capacity_]) { } @@ -385,10 +388,10 @@ class Reactor : public impl::Reactor { mutex().lock(); const UniqueId uid = this->popUid(*_ract); mutex().unlock(); - const auto index = pushWakeIndex(); - auto& rstub = wake_arr_[index]; + const auto [index, count] = pushWakeIndex(); + auto& rstub = wake_arr_[index]; - rstub.waitWhilePush(rstatistic_); + rstub.waitWhilePush(rstatistic_, count); rstub.reset(uid, _revent, std::move(_ract), &_rsvc); @@ -414,10 +417,10 @@ class Reactor : public impl::Reactor { mutex().lock(); const UniqueId uid = this->popUid(*_ract); mutex().unlock(); - const auto index = pushWakeIndex(); - auto& rstub = wake_arr_[index]; + const auto [index, count] = pushWakeIndex(); + auto& rstub = wake_arr_[index]; - rstub.waitWhilePush(rstatistic_); + rstub.waitWhilePush(rstatistic_, count); rstub.reset(uid, std::move(_revent), std::move(_ract), &_rsvc); @@ -442,10 +445,10 @@ class Reactor : public impl::Reactor { { bool notify = false; { - const auto index = pushWakeIndex(); - auto& rstub = wake_arr_[index]; + const auto [index, count] = pushWakeIndex(); + auto& rstub = wake_arr_[index]; - rstub.waitWhilePush(rstatistic_); + rstub.waitWhilePush(rstatistic_, count); rstub.reset(_ractuid, _revent); @@ -468,10 +471,10 @@ class Reactor : public impl::Reactor { { bool notify = false; { - const auto index = pushWakeIndex(); - auto& rstub = wake_arr_[index]; + const auto [index, count] = pushWakeIndex(); + auto& rstub = wake_arr_[index]; - rstub.waitWhilePush(rstatistic_); + rstub.waitWhilePush(rstatistic_, count); rstub.reset(_ractuid, std::move(_revent)); @@ -533,7 +536,7 @@ class Reactor : public impl::Reactor { while (true) { const size_t index = pop_wake_index_ % wake_capacity_; auto& rstub = wake_arr_[index]; - if (rstub.isFilled()) { + if (rstub.isFilled(pop_wake_index_, wake_capacity_)) { if (rstub.actor_ptr_) [[unlikely]] { ++actor_count_; rstatistic_.actorCount(actor_count_); diff --git a/solid/frame/aio/src/aioreactor.cpp b/solid/frame/aio/src/aioreactor.cpp index f25b7923..852aa28e 100644 --- a/solid/frame/aio/src/aioreactor.cpp +++ b/solid/frame/aio/src/aioreactor.cpp @@ -343,7 +343,7 @@ Reactor::Reactor( SchedulerBase& _rsched, StatisticT& _rstatistic, const size_t _idx, const size_t _wake_capacity) : ReactorBase(_rsched, _idx) - , wake_capacity_(_wake_capacity) + , wake_capacity_(std::bit_ceil(_wake_capacity)) , rstatistic_(_rstatistic) { solid_log(logger, Verbose, ""); diff --git a/solid/frame/reactor.hpp b/solid/frame/reactor.hpp index 64e8b758..2b1c34c3 100644 --- a/solid/frame/reactor.hpp +++ b/solid/frame/reactor.hpp @@ -120,9 +120,10 @@ class Reactor : public frame::ReactorBase { Reactor(SchedulerBase& _rsched, StatisticT& _rstatistic, const size_t _schedidx, const size_t _wake_capacity); ~Reactor(); - size_t pushWakeIndex() noexcept + std::tuple pushWakeIndex() noexcept { - return push_wake_index_.fetch_add(1) % wake_capacity_; + const auto index = push_wake_index_.fetch_add(1); + return {index % wake_capacity_, computeCounter(index, wake_capacity_)}; } template @@ -339,7 +340,7 @@ class Reactor : public impl::Reactor { Reactor(SchedulerBase& _rsched, StatisticT& _rstatistic, const size_t _sched_idx, const size_t _wake_capacity) : impl::Reactor(_rsched, _rstatistic, _sched_idx, _wake_capacity) - , wake_arr_(new WakeStubT[_wake_capacity]) + , wake_arr_(new WakeStubT[wake_capacity_]) { } @@ -379,10 +380,10 @@ class Reactor : public impl::Reactor { mutex().lock(); const UniqueId uid = this->popUid(*_ract); mutex().unlock(); - const auto index = pushWakeIndex(); - auto& rstub = wake_arr_[index]; + const auto [index, count] = pushWakeIndex(); + auto& rstub = wake_arr_[index]; - rstub.waitWhilePush(rstatistic_); + rstub.waitWhilePush(rstatistic_, count); rstub.reset(uid, std::move(_revent), std::move(_ract), &_rsvc); @@ -407,10 +408,10 @@ class Reactor : public impl::Reactor { { bool notify = false; { - const auto index = pushWakeIndex(); - auto& rstub = wake_arr_[index]; + const auto [index, count] = pushWakeIndex(); + auto& rstub = wake_arr_[index]; - rstub.waitWhilePush(rstatistic_); + rstub.waitWhilePush(rstatistic_, count); rstub.reset(_ractuid, _revent); @@ -432,10 +433,10 @@ class Reactor : public impl::Reactor { { bool notify = false; { - const auto index = pushWakeIndex(); - auto& rstub = wake_arr_[index]; + const auto [index, count] = pushWakeIndex(); + auto& rstub = wake_arr_[index]; - rstub.waitWhilePush(rstatistic_); + rstub.waitWhilePush(rstatistic_, count); rstub.reset(_ractuid, std::move(_revent)); @@ -492,7 +493,7 @@ class Reactor : public impl::Reactor { while (true) { const size_t index = pop_wake_index_ % wake_capacity_; auto& rstub = wake_arr_[index]; - if (rstub.isFilled()) { + if (rstub.isFilled(pop_wake_index_, wake_capacity_)) { if (rstub.actor_ptr_) [[unlikely]] { ++actor_count_; rstatistic_.actorCount(actor_count_); diff --git a/solid/frame/reactorbase.hpp b/solid/frame/reactorbase.hpp index b6683891..bdfa87de 100644 --- a/solid/frame/reactorbase.hpp +++ b/solid/frame/reactorbase.hpp @@ -46,7 +46,10 @@ struct ReactorStatisticBase : solid::Statistic { namespace impl { +#if 0 struct WakeStubBase { + using AtomicCounterT = std::atomic; + using AtomicCounterValueT = AtomicCounterT::value_type; enum struct LockE : uint8_t { Empty = 0, Pushing, @@ -116,6 +119,60 @@ struct WakeStubBase { } }; +#else +using AtomicIndexT = std::atomic_size_t; +using AtomicIndexValueT = std::atomic_size_t::value_type; +using AtomicCounterT = std::atomic; +using AtomicCounterValueT = AtomicCounterT::value_type; + +template +inline constexpr static auto computeCounter(const IndexT _index, const size_t _capacity) noexcept +{ + return (_index / _capacity) & std::numeric_limits::max(); +} + +struct WakeStubBase { + AtomicCounterT produce_count_{0}; + AtomicCounterT consume_count_{static_cast(-1)}; + + template + void waitWhilePush(Statistic& _rstats, const AtomicCounterValueT _count, const size_t _spin_count = 1) noexcept + { + auto spin = _spin_count; + while (true) { + const auto cnt = produce_count_.load(); + if (cnt == _count) { + break; + } else if (_spin_count && !spin--) { + _rstats.pushWhileWaitLock(); + std::atomic_wait_explicit(&produce_count_, cnt, std::memory_order_relaxed); + spin = _spin_count; + } + } + } + + void notifyWhilePush() noexcept + { + ++consume_count_; + std::atomic_notify_one(&consume_count_); + } + + void notifyWhilePop() noexcept + { + ++produce_count_; + std::atomic_notify_one(&produce_count_); + } + + bool isFilled(const uint64_t _id, const size_t _capacity) const + { + const auto count = consume_count_.load(std::memory_order_relaxed); + const AtomicCounterValueT expected_count = computeCounter(_id, _capacity); + return count == expected_count; + } +}; + +#endif + } // namespace impl //! The base for every selector diff --git a/solid/frame/src/reactor.cpp b/solid/frame/src/reactor.cpp index 507592f1..10b25ac3 100644 --- a/solid/frame/src/reactor.cpp +++ b/solid/frame/src/reactor.cpp @@ -7,6 +7,7 @@ // Distributed under the Boost Software License, Version 1.0. // See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt. // +#include #include #include #include @@ -154,7 +155,7 @@ Reactor::Reactor( SchedulerBase& _rsched, StatisticT& _rstatistic, const size_t _idx, const size_t _wake_capacity) : ReactorBase(_rsched, _idx) - , wake_capacity_(_wake_capacity) + , wake_capacity_(std::bit_ceil(_wake_capacity)) , rstatistic_(_rstatistic) { solid_log(frame_logger, Verbose, ""); From f8f7fb839a535b8acc14ee22f81a0012e16a27ca Mon Sep 17 00:00:00 2001 From: Valentin Palade Date: Mon, 24 Jun 2024 19:26:08 +0300 Subject: [PATCH 10/13] threadpool: pthread_spin_lock --- cmake/check.config.cmake | 4 +++ cmake/check/pthread_spinlock.cpp | 8 ++++++ solid/system/configuration_impl.hpp.in | 1 + solid/system/spinlock.hpp | 40 +++++++++++++++++++++++--- 4 files changed, 49 insertions(+), 4 deletions(-) create mode 100644 cmake/check/pthread_spinlock.cpp diff --git a/cmake/check.config.cmake b/cmake/check.config.cmake index 302456f8..39f78fe1 100644 --- a/cmake/check.config.cmake +++ b/cmake/check.config.cmake @@ -45,5 +45,9 @@ file (READ "${CMAKE_CURRENT_SOURCE_DIR}/cmake/check/epoll2.cpp" source_code) CHECK_CXX_SOURCE_RUNS("${source_code}" SOLID_USE_EPOLL2) +file (READ "${CMAKE_CURRENT_SOURCE_DIR}/cmake/check/pthread_spinlock.cpp" source_code) + +CHECK_CXX_SOURCE_COMPILES("${source_code}" SOLID_USE_PTHREAD_SPINLOCK) + #TODO: #set(SOLID_FRAME_AIO_REACTOR_USE_SPINLOCK TRUE) \ No newline at end of file diff --git a/cmake/check/pthread_spinlock.cpp b/cmake/check/pthread_spinlock.cpp new file mode 100644 index 00000000..9f22f7bd --- /dev/null +++ b/cmake/check/pthread_spinlock.cpp @@ -0,0 +1,8 @@ +#include + +int main(){ + pthread_spinlock_t spl; + auto rv = pthread_spin_init(&spl, PTHREAD_PROCESS_PRIVATE); + if(rv != 0) return -1; + return 0; +} \ No newline at end of file diff --git a/solid/system/configuration_impl.hpp.in b/solid/system/configuration_impl.hpp.in index 5c396d03..9ec992ff 100644 --- a/solid/system/configuration_impl.hpp.in +++ b/solid/system/configuration_impl.hpp.in @@ -1,6 +1,7 @@ #pragma once #cmakedefine SOLID_USE_PTHREAD +#cmakedefine SOLID_USE_PTHREAD_SPINLOCK #cmakedefine SOLID_USE_EVENTFD #cmakedefine SOLID_USE_EPOLL #cmakedefine SOLID_USE_KQUEUE diff --git a/solid/system/spinlock.hpp b/solid/system/spinlock.hpp index 65080458..77ec8505 100644 --- a/solid/system/spinlock.hpp +++ b/solid/system/spinlock.hpp @@ -10,6 +10,12 @@ #pragma once #include "solid/system/common.hpp" + +#ifdef SOLID_USE_PTHREAD_SPINLOCK + +#include + +#else #include #include @@ -25,6 +31,7 @@ #include #endif #endif +#endif namespace solid { @@ -45,8 +52,34 @@ inline void cpu_pause() #endif } +#if defined(SOLID_USE_PTHREAD_SPINLOCK) +class SpinLock : NonCopyable { + pthread_spinlock_t spin_; + +public: + SpinLock() + { + solid_check(pthread_spin_init(&spin_, PTHREAD_PROCESS_PRIVATE) == 0); + } + void lock() noexcept + { + pthread_spin_lock(&spin_); + } + + bool try_lock() noexcept + { + return pthread_spin_trylock(&spin_) == 0; + } + + void unlock() noexcept + { + pthread_spin_unlock(&spin_); + } +}; +#else + #if defined(__cpp_lib_atomic_flag_test) -class SpinLock { +class SpinLock : NonCopyable { std::atomic_flag atomic_flag = ATOMIC_FLAG_INIT; public: @@ -58,8 +91,6 @@ class SpinLock { } while (atomic_flag.test(std::memory_order_relaxed)) { cpu_pause(); - //_mm_pause(); - // std::this_thread::yield(); } } } @@ -78,7 +109,7 @@ class SpinLock { }; #else // https://rigtorp.se/spinlock/ -class SpinLock { +class SpinLock : NonCopyable { std::atomic lock_ = {0}; public: @@ -111,6 +142,7 @@ class SpinLock { } }; #endif +#endif using SpinGuardT = std::lock_guard; From 33173d15832f019268ead8f5db5b037b0a90b4ba Mon Sep 17 00:00:00 2001 From: Valentin Palade Date: Mon, 24 Jun 2024 20:45:27 +0300 Subject: [PATCH 11/13] threadpool: compile on macos --- solid/utility/test/test_threadpool_batch.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/solid/utility/test/test_threadpool_batch.cpp b/solid/utility/test/test_threadpool_batch.cpp index cab0cc05..660fb493 100644 --- a/solid/utility/test/test_threadpool_batch.cpp +++ b/solid/utility/test/test_threadpool_batch.cpp @@ -37,6 +37,8 @@ constexpr size_t one_task_size = 64; using CallPoolT = ThreadPool, Function>; struct Entry { CallPoolT::SynchronizationContextT ctx_; + + Entry(CallPoolT::SynchronizationContextT &&_ctx):ctx_(std::move(_ctx)){} }; constexpr size_t thread_count = 10; From 93634e9f4509f7f2cdf470b3f3db34b105679a78 Mon Sep 17 00:00:00 2001 From: Valentin Palade Date: Tue, 25 Jun 2024 09:39:41 +0300 Subject: [PATCH 12/13] threadpool: fixes --- solid/frame/reactorbase.hpp | 81 +------------------- solid/utility/test/test_threadpool_batch.cpp | 5 +- solid/utility/threadpool.hpp | 10 +-- 3 files changed, 11 insertions(+), 85 deletions(-) diff --git a/solid/frame/reactorbase.hpp b/solid/frame/reactorbase.hpp index bdfa87de..51075404 100644 --- a/solid/frame/reactorbase.hpp +++ b/solid/frame/reactorbase.hpp @@ -45,81 +45,6 @@ struct ReactorStatisticBase : solid::Statistic { }; namespace impl { - -#if 0 -struct WakeStubBase { - using AtomicCounterT = std::atomic; - using AtomicCounterValueT = AtomicCounterT::value_type; - enum struct LockE : uint8_t { - Empty = 0, - Pushing, - Filled, - }; -#if defined(__cpp_lib_atomic_wait) - std::atomic_flag pushing_ = ATOMIC_FLAG_INIT; -#else - std::atomic_bool pushing_ = {false}; -#endif - std::atomic_uint8_t lock_ = {to_underlying(LockE::Empty)}; - - template - void waitWhilePush(Statistic& _rstats) noexcept - { - while (true) { -#if defined(__cpp_lib_atomic_wait) - const bool already_pushing = pushing_.test_and_set(std::memory_order_acquire); -#else - bool expected = false; - const bool already_pushing = !pushing_.compare_exchange_strong(expected, true, std::memory_order_acquire); -#endif - if (!already_pushing) { - // wait for lock to be 0. - uint8_t value = to_underlying(LockE::Empty); - - if (!lock_.compare_exchange_weak(value, to_underlying(LockE::Pushing))) { - do { - std::atomic_wait(&lock_, value); - value = to_underlying(LockE::Empty); - } while (!lock_.compare_exchange_weak(value, to_underlying(LockE::Pushing))); - _rstats.pushWhileWaitLock(); - } - return; - } else { -#if defined(__cpp_lib_atomic_wait) - pushing_.wait(true); -#else - std::atomic_wait(&pushing_, true); -#endif - _rstats.pushWhileWaitPushing(); - } - } - } - - void notifyWhilePush() noexcept - { - lock_.store(to_underlying(LockE::Filled)); -#if defined(__cpp_lib_atomic_wait) - pushing_.clear(std::memory_order_release); - pushing_.notify_one(); -#else - pushing_.store(false, std::memory_order_release); - std::atomic_notify_one(&pushing_); -#endif - } - - void notifyWhilePop() noexcept - { - lock_.store(to_underlying(LockE::Empty)); - std::atomic_notify_one(&lock_); - } - - bool isFilled() const noexcept - { - return lock_.load() == to_underlying(LockE::Filled); - } -}; - -#else using AtomicIndexT = std::atomic_size_t; using AtomicIndexValueT = std::atomic_size_t::value_type; using AtomicCounterT = std::atomic; @@ -154,13 +79,13 @@ struct WakeStubBase { void notifyWhilePush() noexcept { ++consume_count_; - std::atomic_notify_one(&consume_count_); + std::atomic_notify_all(&consume_count_); } void notifyWhilePop() noexcept { ++produce_count_; - std::atomic_notify_one(&produce_count_); + std::atomic_notify_all(&produce_count_); } bool isFilled(const uint64_t _id, const size_t _capacity) const @@ -171,8 +96,6 @@ struct WakeStubBase { } }; -#endif - } // namespace impl //! The base for every selector diff --git a/solid/utility/test/test_threadpool_batch.cpp b/solid/utility/test/test_threadpool_batch.cpp index 660fb493..c51195fa 100644 --- a/solid/utility/test/test_threadpool_batch.cpp +++ b/solid/utility/test/test_threadpool_batch.cpp @@ -38,7 +38,10 @@ using CallPoolT = ThreadPool, Function(steady_clock::now() - _start).count(); } @@ -508,14 +508,14 @@ class ThreadPool : NonCopyable { { event_ = to_underlying(EventE::Stop); ++consume_count_; - std::atomic_notify_one(&consume_count_); + std::atomic_notify_all(&consume_count_); } void notifyWhilePushAll() noexcept { event_ = to_underlying(EventE::Wake); ++consume_count_; - std::atomic_notify_one(&consume_count_); + std::atomic_notify_all(&consume_count_); } template < @@ -542,7 +542,7 @@ class ThreadPool : NonCopyable { void notifyWhilePop() noexcept { ++produce_count_; - std::atomic_notify_one(&produce_count_); + std::atomic_notify_all(&produce_count_); } }; @@ -595,7 +595,7 @@ class ThreadPool : NonCopyable { if (use_count_.fetch_sub(1) == 1) { destroy(); ++produce_count_; - std::atomic_notify_one(&produce_count_); + std::atomic_notify_all(&produce_count_); return true; } return false; From 1693e0876870e54b5963220d0e724c60f12f81e7 Mon Sep 17 00:00:00 2001 From: Valentin Palade Date: Sat, 29 Jun 2024 10:40:45 +0300 Subject: [PATCH 13/13] VERSION 12.0 --- CMakeLists.txt | 2 +- RELEASES.md | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 1c27c806..d4938c71 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -2,7 +2,7 @@ cmake_minimum_required(VERSION 3.13 FATAL_ERROR) #----------------------------------------------------------------- # The project #----------------------------------------------------------------- -project (SolidFrame VERSION 11.1) +project (SolidFrame VERSION 12.0) message("SolidFrame version: ${PROJECT_VERSION} - ${PROJECT_VERSION_MAJOR}.${PROJECT_VERSION_MINOR}") #----------------------------------------------------------------- diff --git a/RELEASES.md b/RELEASES.md index 8f2725cc..a36282e3 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -1,5 +1,11 @@ # SolidFrame Releases +## Version 12.0 +* utility: support pthread_spin_lock when available +* utility: ThreadPool improvements and fixes +* frame: Reactor and aio::Reactor using the same new technique from ThreadPool for event passing +* mprpc: preparing the stage for relay multicast support + ## Version 11.1 * mprpc: Split Connection in ClientConnection, ServerConnection, RelayConnection * mprpc: Some cleanup and some small improvements on the MessageReader and MessageWriter