From 982f54a28ae62e85772cb4f071c46c138fd95016 Mon Sep 17 00:00:00 2001 From: Edward Tremel Date: Fri, 3 May 2024 20:27:27 -0400 Subject: [PATCH 1/3] Updated Verbs files so they compile with the current Derecho API Apparently we haven't tested the -DUSE_VERBS_API flag in a while, because there are a bunch of compile errors if you use it. Some are due to minor changes to Derecho's internals that were never propagated to verbs.hpp/cpp, like the ip_addr_t declaration moving inside the derecho namespace. The rest are due to the new OOB API only being implemented on LibFabrics. I added declarations for the OOB functions to the Verbs interface, with function bodies that simply throw exceptions, so that Derecho can at least compile with the Verbs interface (even if none of the OOB functions actually work). --- .../derecho/core/detail/p2p_connection.hpp | 1 + include/derecho/rdmc/detail/lf_helper.hpp | 1 + include/derecho/rdmc/detail/verbs_helper.hpp | 3 + include/derecho/sst/detail/lf.hpp | 1 + include/derecho/sst/detail/verbs.hpp | 139 +++++++++++++++++- src/core/git_version.cpp | 4 +- src/sst/verbs.cpp | 83 ++++++++--- 7 files changed, 201 insertions(+), 31 deletions(-) diff --git a/include/derecho/core/detail/p2p_connection.hpp b/include/derecho/core/detail/p2p_connection.hpp index 5fd7bf15..db2dfb7a 100644 --- a/include/derecho/core/detail/p2p_connection.hpp +++ b/include/derecho/core/detail/p2p_connection.hpp @@ -6,6 +6,7 @@ #else #include #endif +#include "derecho/utils/logger.hpp" #include #include diff --git a/include/derecho/rdmc/detail/lf_helper.hpp b/include/derecho/rdmc/detail/lf_helper.hpp index f346a1df..7b213bef 100644 --- a/include/derecho/rdmc/detail/lf_helper.hpp +++ b/include/derecho/rdmc/detail/lf_helper.hpp @@ -34,6 +34,7 @@ struct fid_cq; */ namespace rdma { using ip_addr_t = derecho::ip_addr_t; +using node_id_t = derecho::node_id_t; class exception {}; class invalid_args : public exception {}; diff --git a/include/derecho/rdmc/detail/verbs_helper.hpp b/include/derecho/rdmc/detail/verbs_helper.hpp index aea0fade..a1e94b83 100644 --- a/include/derecho/rdmc/detail/verbs_helper.hpp +++ b/include/derecho/rdmc/detail/verbs_helper.hpp @@ -24,6 +24,9 @@ struct ibv_cq; * to the IB Verbs library. */ namespace rdma { +using ip_addr_t = derecho::ip_addr_t; +using node_id_t = derecho::node_id_t; + // Various classes of exceptions class exception {}; class invalid_args : public exception {}; diff --git a/include/derecho/sst/detail/lf.hpp b/include/derecho/sst/detail/lf.hpp index 3abdffa3..fb2b8168 100644 --- a/include/derecho/sst/detail/lf.hpp +++ b/include/derecho/sst/detail/lf.hpp @@ -30,6 +30,7 @@ namespace sst { using memory_attribute_t = derecho::memory_attribute_t; using ip_addr_t = derecho::ip_addr_t; +using node_id_t = derecho::node_id_t; class lf_completion_entry_ctxt { private: diff --git a/include/derecho/sst/detail/verbs.hpp b/include/derecho/sst/detail/verbs.hpp index 8911373c..d81266f4 100644 --- a/include/derecho/sst/detail/verbs.hpp +++ b/include/derecho/sst/detail/verbs.hpp @@ -12,9 +12,20 @@ #include #include #include +#include +#include +#include namespace sst { +using memory_attribute_t = derecho::memory_attribute_t; +using ip_addr_t = derecho::ip_addr_t; +using node_id_t = derecho::node_id_t; + +struct unsupported_operation_exception : public std::logic_error { + unsupported_operation_exception(const std::string& message) : logic_error(message) {} +}; + /** Structure to exchange the data needed to connect the Queue Pairs */ struct cm_con_data_t { /** Buffer address */ @@ -81,8 +92,8 @@ class _resources { int post_remote_send(verbs_sender_ctxt* sctxt, const long long int offset, const long long int size, const int op, const bool completion); public: - /** Index of the remote node. */ - int remote_index; + /** ID of the remote node. */ + int remote_id; /** Handle for the IB Verbs Queue Pair object. */ struct ibv_qp* qp; /** Memory Region handle for the write buffer. */ @@ -107,15 +118,131 @@ class _resources { /** Constructor; initializes Queue Pair, Memory Regions, and `remote_props`. */ - _resources(int r_index, uint8_t* write_addr, uint8_t* read_addr, int size_w, + _resources(int r_id, uint8_t* write_addr, uint8_t* read_addr, int size_w, int size_r); /** Destroys the resources. */ virtual ~_resources(); + /** + * Out-of-Band memory and send management + * These are not currently supported when using the Verbs interface, but are + * declared here to allow compilation to succeed. Attempting to use any of + * them will throw an exception. + */ + + /** + * get the descriptor of the corresponding oob memory region + * Important: it assumes shared lock on oob_mrs_mutex. + * If iov does not fall into an oob memory region, it fails with nullptr. + * + * @param addr + * + * @return the descriptor of type void*, or nullptr in case of failure. + * @throw derecho::derecho_exception if not found. + */ + static void* get_oob_mr_desc(void* addr); + + /** + * Get the key of the corresponding oob memory region for remote access. + * + * @param addr The address of registered oob memory + * + * @return the remote access key, + * @throw derecho::derecho_exception if not found. + */ + static uint64_t get_oob_mr_key(void* addr); + + /** + * Register oob memory + * @param addr the address of the OOB memory + * @param size the size of the OOB memory + * @param attr the memory attribute + * + * @throws derecho_exception on failure. + */ + static void register_oob_memory_ex(void* addr, size_t size, const memory_attribute_t& attr); + + /** + * Deregister oob memory + * @param addr the address of OOB memory + * + * @throws derecho_exception on failure. + */ + static void deregister_oob_memory(void* addr); + + /** + * Wait for a completion entries + * @param num_entries The number of entries to wait for + * @param timeout_us The number of microseconds to wait before throwing timeout + * + * @throws derecho_exception on failure. + */ + void wait_for_thread_local_completion_entries(size_t num_entries, uint64_t timeout_us); + + /* + * oob write + * @param iov The gather memory vector, the total size of the source should not go beyond 'size'. + * @param iovcnt The length of the vector. + * @param remote_dest_addr The remote address for receiving this message + * @param rkey The access key for the remote memory. + * @param size The size of the remote buffer + * + * @throws derecho_exception at failure. + */ + void oob_remote_write(const struct iovec* iov, int iovcnt, + void* remote_dest_addr, uint64_t rkey, size_t size); + + /* + * oob read + * @param iov The scatter memory vector, the total size of the source should not go beyond 'size'. + * @param iovcnt The length of the vector. + * @param remote_src_addr The remote address for receiving this message + * @param rkey The access key for the remote memory. + * @param size The size of the remote buffer + * + * @throws derecho_exception at failure. + */ + void oob_remote_read(const struct iovec* iov, int iovcnt, + void* remote_src_addr, uint64_t rkey, size_t size); + + /* + * oob send + * @param iov The gather memory vector. + * @param iovcnt The length of the vector. + * + * @throws derecho_exception at failure. + */ + void oob_send(const struct iovec* iov, int iovcnt); + + /* + * oob recv + * @param iov The gather memory vector. + * @param iovcnt The length of the vector. + * + * @throws derecho_exception at failure. + */ + void oob_recv(const struct iovec* iov, int iovcnt); + +#define OOB_OP_READ 0x0 +#define OOB_OP_WRITE 0x1 +#define OOB_OP_SEND 0x2 +#define OOB_OP_RECV 0x3 + /* + * Public callable wrapper around wait_for_thread_local_completion_entries() + * @param op The OOB operations, one of the following: + * - OOB_OP_READ + * - OOB_OP_WRITE + * - OOB_OP_SEND + * - OOB_OP_RECV + * For most of the cases, we wait for only one completion. To allow an operation like + * "exchange", which is to be implemented, we might need to write for two completions. + * @param timeout_us Timeout settings in microseconds. + */ + void wait_for_oob_op(uint32_t op, uint64_t timeout_us); }; class resources : public _resources { public: - resources(int r_index, uint8_t* write_addr, uint8_t* read_addr, int size_w, + resources(int r_id, uint8_t* write_addr, uint8_t* read_addr, int size_w, int size_r); /** * Report that the remote node this object is connected to has failed. @@ -144,7 +271,7 @@ class resources_two_sided : public _resources { int post_receive(verbs_sender_ctxt* ce_ctxt, const long long int offset, const long long int size); public: - resources_two_sided(int r_index, uint8_t* write_addr, uint8_t* read_addr, int size_w, + resources_two_sided(int r_id, uint8_t* write_addr, uint8_t* read_addr, int size_w, int size_r); /** * Report that the remote node this object is connected to has failed. @@ -181,7 +308,7 @@ bool sync(uint32_t r_index); * @param live_nodes_list A list of node IDs whose connections should be retained; * all other connections will be deleted. */ -void filter_external_to(const std::vector& live_nodes_list); +void filter_external_to(const std::vector& live_nodes_list); /** Initializes the global verbs resources. */ void verbs_initialize(const std::map>& ip_addrs_and_sst_ports, diff --git a/src/core/git_version.cpp b/src/core/git_version.cpp index 20fe30e4..83cdf130 100644 --- a/src/core/git_version.cpp +++ b/src/core/git_version.cpp @@ -13,8 +13,8 @@ namespace derecho { const int MAJOR_VERSION = 2; const int MINOR_VERSION = 4; const int PATCH_VERSION = 0; -const int COMMITS_AHEAD_OF_VERSION = 0; +const int COMMITS_AHEAD_OF_VERSION = 1; const char* VERSION_STRING = "2.4.0"; -const char* VERSION_STRING_PLUS_COMMITS = "2.4.0+0"; +const char* VERSION_STRING_PLUS_COMMITS = "2.4.0+1"; } diff --git a/src/sst/verbs.cpp b/src/sst/verbs.cpp index 1f94827a..ec47e53b 100644 --- a/src/sst/verbs.cpp +++ b/src/sst/verbs.cpp @@ -28,6 +28,7 @@ #include #include #include +#include #include #include @@ -84,7 +85,7 @@ static bool shutdown = false; * Initializes the resources. Registers write_addr and read_addr as the read * and write buffers and connects a queue pair with the specified remote node. * - * @param r_index The node rank of the remote node to connect to. + * @param r_id The node rank of the remote node to connect to. * @param write_addr A pointer to the memory to use as the write buffer. This * is where data should be written locally in order to send it in an RDMA write * to the remote node. @@ -93,10 +94,10 @@ static bool shutdown = false; * @param size_w The size of the write buffer (in bytes). * @param size_r The size of the read buffer (in bytes). */ -_resources::_resources(int r_index, uint8_t* write_addr, uint8_t* read_addr, int size_w, +_resources::_resources(int r_id, uint8_t* write_addr, uint8_t* read_addr, int size_w, int size_r) : remote_failed(false), - remote_index(r_index), + remote_id(r_id), write_buf(write_addr), read_buf(read_addr) { if(!write_buf) { @@ -154,7 +155,7 @@ _resources::_resources(int r_index, uint8_t* write_addr, uint8_t* read_addr, int without_completion_sender_ctxt.type = verbs_sender_ctxt::INTERNAL_FLOW_CONTROL; without_completion_sender_ctxt.ctxt.res = this; - cout << "Established RDMA connection with node " << r_index << endl; + cout << "Established RDMA connection with node " << r_id << endl; } /** @@ -287,7 +288,7 @@ void _resources::connect_qp() { local_con_data.lid = htons(g_res->port_attr.lid); memcpy(local_con_data.gid, &my_gid, 16); try { - sst_connections->exchange(remote_index, local_con_data, tmp_con_data); + sst_connections->exchange(remote_id, local_con_data, tmp_con_data); } catch(tcp::socket_error&) { cout << "Could not exchange qp data in connect_qp" << endl; } @@ -311,7 +312,7 @@ void _resources::connect_qp() { // sync to make sure that both sides are in states that they can connect to // prevent packet loss // just send a dummy char back and forth - bool success = sync(remote_index); + bool success = sync(remote_id); if(!success) { cout << "Could not sync in connect_qp after qp transition to RTS state" << endl; } @@ -388,8 +389,44 @@ int _resources::post_remote_send(verbs_sender_ctxt* sctxt, const long long int o return ret; } -resources::resources(int r_index, uint8_t* write_addr, uint8_t* read_addr, int size_w, - int size_r) : _resources(r_index, write_addr, read_addr, size_w, size_r) { +void _resources::register_oob_memory_ex(void* addr, size_t size, const memory_attribute_t& attr) { + throw unsupported_operation_exception("register_oob_memory_ex"); +} + +void _resources::deregister_oob_memory(void* addr) { + throw unsupported_operation_exception("deregister_oob_memory"); +} + +void* _resources::get_oob_mr_desc(void* addr) { + throw unsupported_operation_exception("get_oob_mr_desc"); +} + +uint64_t _resources::get_oob_mr_key(void* addr) { + throw unsupported_operation_exception("get_oob_mr_key"); +} + +void _resources::oob_remote_write(const struct iovec* iov, int iovcnt, void* remote_dest_addr, uint64_t rkey, size_t size) { + throw unsupported_operation_exception("oob_remote_write"); +} + +void _resources::oob_remote_read(const struct iovec* iov, int iovcnt, void* remote_src_addr, uint64_t rkey, size_t size) { + throw unsupported_operation_exception("oob_remote_read"); +} + +void _resources::oob_send(const struct iovec* iov, int iovcnt) { + throw unsupported_operation_exception("oob_send"); +} + +void _resources::oob_recv(const struct iovec* iov, int iovcnt) { + throw unsupported_operation_exception("oob_recv"); +} + +void _resources::wait_for_oob_op(uint32_t op, uint64_t timeout_us) { + throw unsupported_operation_exception("wait_for_oob_op"); +} + +resources::resources(int r_id, uint8_t* write_addr, uint8_t* read_addr, int size_w, + int size_r) : _resources(r_id, write_addr, read_addr, size_w, size_r) { } void resources::report_failure() { @@ -402,7 +439,7 @@ void resources::report_failure() { void resources::post_remote_read(const long long int size) { int rc = post_remote_send(nullptr, 0, size, 0, false); if(rc) { - cout << "Could not post RDMA read, error code is " << rc << ", remote_index is " << remote_index << endl; + cout << "Could not post RDMA read, error code is " << rc << ", remote_id is " << remote_id << endl; } } /** @@ -413,7 +450,7 @@ void resources::post_remote_read(const long long int size) { void resources::post_remote_read(const long long int offset, const long long int size) { int rc = post_remote_send(nullptr, offset, size, 0, false); if(rc) { - cout << "Could not post RDMA read, error code is " << rc << ", remote_index is " << remote_index << endl; + cout << "Could not post RDMA read, error code is " << rc << ", remote_id is " << remote_id << endl; } } /** @@ -423,7 +460,7 @@ void resources::post_remote_read(const long long int offset, const long long int void resources::post_remote_write(const long long int size) { int rc = post_remote_send(nullptr, 0, size, 1, false); if(rc) { - cout << "Could not post RDMA write (with no offset), error code is " << rc << ", remote_index is " << remote_index << endl; + cout << "Could not post RDMA write (with no offset), error code is " << rc << ", remote_id is " << remote_id << endl; } } @@ -436,26 +473,26 @@ void resources::post_remote_write(const long long int size) { void resources::post_remote_write(const long long int offset, const long long int size) { int rc = post_remote_send(nullptr, offset, size, 1, false); if(rc) { - cout << "Could not post RDMA write with offset, error code is " << rc << ", remote_index is " << remote_index << endl; + cout << "Could not post RDMA write with offset, error code is " << rc << ", remote_id is " << remote_id << endl; } } void resources::post_remote_write_with_completion(verbs_sender_ctxt* sctxt, const long long int size) { int rc = post_remote_send(sctxt, 0, size, 1, true); if(rc) { - cout << "Could not post RDMA write (with no offset) with completion, error code is " << rc << ", remote_index is " << remote_index << endl; + cout << "Could not post RDMA write (with no offset) with completion, error code is " << rc << ", remote_id is " << remote_id << endl; } } void resources::post_remote_write_with_completion(verbs_sender_ctxt* sctxt, const long long int offset, const long long int size) { int rc = post_remote_send(sctxt, offset, size, 1, true); if(rc) { - cout << "Could not post RDMA write with offset and completion, error code is " << rc << ", remote_index is " << remote_index << endl; + cout << "Could not post RDMA write with offset and completion, error code is " << rc << ", remote_id is " << remote_id << endl; } } -resources_two_sided::resources_two_sided(int r_index, uint8_t* write_addr, uint8_t* read_addr, int size_w, - int size_r) : _resources(r_index, write_addr, read_addr, size_w, size_r) { +resources_two_sided::resources_two_sided(int r_id, uint8_t* write_addr, uint8_t* read_addr, int size_w, + int size_r) : _resources(r_id, write_addr, read_addr, size_w, size_r) { } void resources_two_sided::report_failure() { @@ -469,7 +506,7 @@ void resources_two_sided::report_failure() { void resources_two_sided::post_two_sided_send(const long long int size) { int rc = post_remote_send(nullptr, 0, size, 2, false); if(rc) { - cout << "Could not post RDMA two sided send (with no offset), error code is " << rc << ", remote_index is " << remote_index << endl; + cout << "Could not post RDMA two sided send (with no offset), error code is " << rc << ", remote_id is " << remote_id << endl; } } @@ -482,21 +519,21 @@ void resources_two_sided::post_two_sided_send(const long long int size) { void resources_two_sided::post_two_sided_send(const long long int offset, const long long int size) { int rc = post_remote_send(nullptr, offset, size, 2, false); if(rc) { - cout << "Could not post RDMA two sided send with offset, error code is " << rc << ", remote_index is " << remote_index << endl; + cout << "Could not post RDMA two sided send with offset, error code is " << rc << ", remote_id is " << remote_id << endl; } } void resources_two_sided::post_two_sided_send_with_completion(verbs_sender_ctxt* sctxt, const long long int size) { int rc = post_remote_send(sctxt, 0, size, 2, true); if(rc) { - cout << "Could not post RDMA two sided send (with no offset) with completion, error code is " << rc << ", remote_index is " << remote_index << endl; + cout << "Could not post RDMA two sided send (with no offset) with completion, error code is " << rc << ", remote_id is " << remote_id << endl; } } void resources_two_sided::post_two_sided_send_with_completion(verbs_sender_ctxt* sctxt, const long long int offset, const long long int size) { int rc = post_remote_send(sctxt, offset, size, 2, true); if(rc) { - cout << "Could not post RDMA two sided send with offset and completion, error code is " << rc << ", remote_index is " << remote_index << endl; + cout << "Could not post RDMA two sided send with offset and completion, error code is " << rc << ", remote_id is " << remote_id << endl; } } @@ -525,14 +562,14 @@ int resources_two_sided::post_receive(verbs_sender_ctxt* sctxt, const long long void resources_two_sided::post_two_sided_receive(verbs_sender_ctxt* sctxt, const long long int size) { int rc = post_receive(sctxt, 0, size); if(rc) { - cout << "Could not post RDMA two sided receive (with no offset), error code is " << rc << ", remote_index is " << remote_index << endl; + cout << "Could not post RDMA two sided receive (with no offset), error code is " << rc << ", remote_id is " << remote_id << endl; } } void resources_two_sided::post_two_sided_receive(verbs_sender_ctxt* sctxt, const long long int offset, const long long int size) { int rc = post_receive(sctxt, offset, size); if(rc) { - cout << "Could not post RDMA two sided receive with offset, error code is " << rc << ", remote_index is " << remote_index << endl; + cout << "Could not post RDMA two sided receive with offset, error code is " << rc << ", remote_id is " << remote_id << endl; } } @@ -711,7 +748,7 @@ bool add_external_node(uint32_t new_id, const std::pair& ne bool remove_node(uint32_t node_id) { if(sst_connections->contains_node(node_id)) { return sst_connections->delete_node(node_id); - } else if { + } else { return external_client_connections->delete_node(node_id); } } From 85946df4250b9683eb85961201c509323f56c03c Mon Sep 17 00:00:00 2001 From: Edward Tremel Date: Fri, 3 May 2024 20:56:02 -0400 Subject: [PATCH 2/3] Correctly placed node_id_t inside derecho namespace While debugging the compile errors caused by enabling USE_VERBS_API, I noticed that connection_manager.hpp was improperly putting node_id_t into the global namespace even though we intended it to be scoped within derecho. This was inadvertently hiding a bunch of improper uses of node_id_t that should have been caught back when we first put node_id_t inside the derecho namespace. --- README.md | 6 ++-- .../core/detail/connection_manager.hpp | 2 +- .../archive/custom_subgroup_profiles_test.cpp | 1 + .../archive/external_client_perf_test.cpp | 1 + .../archive/long_subgroup_test.cpp | 1 + .../archive/notification_test.cpp | 1 + .../archive/scaling_subgroup_test.cpp | 1 + .../smart_membership_function_test.cpp | 2 ++ src/applications/archive/subgroup_test.cpp | 1 + .../archive/typed_subgroup_test.cpp | 1 + src/applications/demos/oob_rdma.cpp | 34 +++++++++---------- .../demos/overlapping_replicated_objects.cpp | 1 + .../demos/signed_store_mockup.cpp | 10 +++--- .../demos/simple_replicated_objects.cpp | 1 + .../demos/simple_replicated_objects_json.cpp | 1 + .../simple_replicated_objects_overlap.cpp | 1 + .../tests/performance_tests/oob_perf.cpp | 2 +- .../tests/performance_tests/p2p_bw_test.cpp | 1 + .../performance_tests/p2p_latency_test.cpp | 1 + .../performance_tests/signed_store_test.cpp | 26 +++++++------- .../unit_tests/client_callback_mockup.cpp | 18 +++++----- .../unit_tests/client_callback_mockup.hpp | 6 ++-- .../unit_tests/external_notification_test.cpp | 1 + .../persistence_notification_test.cpp | 10 +++--- .../persistence_notification_test.hpp | 4 +-- .../tests/unit_tests/rpc_function_types.cpp | 1 + .../tests/unit_tests/rpc_reply_maps.cpp | 2 ++ .../unit_tests/subgroup_function_tester.cpp | 4 +++ src/core/git_version.cpp | 4 +-- 29 files changed, 84 insertions(+), 61 deletions(-) diff --git a/README.md b/README.md index 01b78ebb..64255cc3 100644 --- a/README.md +++ b/README.md @@ -395,7 +395,7 @@ Here is an example of a JSON layout string that uses "reserved_node_ids_by_shard if(curr_view.num_members < 3) { throw derecho::subgroup_provisioning_exception(); } - std::vector first_3_nodes(&curr_view.members[0], &curr_view.members[0] + 3); + std::vector first_3_nodes(&curr_view.members[0], &curr_view.members[0] + 3); //Put the desired SubView at subgroup_layout[0][0] since there's one subgroup with one shard subgroup_layout[0].emplace_back(curr_view.make_subview(first_3_nodes)); //Advance next_unassigned_rank by 3, unless it was already beyond 3, since we assigned the first 3 nodes @@ -405,7 +405,7 @@ Here is an example of a JSON layout string that uses "reserved_node_ids_by_shard if(curr_view.num_members < 6) { throw derecho::subgroup_provisioning_exception(); } - std::vector next_3_nodes(&curr_view.members[3], &curr_view.members[3] + 3); + std::vector next_3_nodes(&curr_view.members[3], &curr_view.members[3] + 3); subgroup_layout[0].emplace_back(curr_view.make_subview(next_3_nodes)); curr_view.next_unassigned_rank += 3; } @@ -453,7 +453,7 @@ PeerCaller& p2p_cache_handle = group->get_nonmember_subgroup(1); When invoking a P2P send, the caller must specify, as the first argument, the ID of the node to communicate with. The caller must ensure that this node is actually a member of the subgroup that the PeerCaller targets (though it can be in any shard of that subgroup). Nodes can find out the current membership of a subgroup by calling the `get_subgroup_members` method on the Group, which uses the same template parameter and argument as `get_subgroup` to select a subgroup by type and index. For example, assuming Cache subgroups are not sharded, this is how a non-member process could make a call to `get`, targeting the first node in the second subgroup of type Cache: ```cpp -std::vector cache_members = group.get_subgroup_members(1)[0]; +std::vector cache_members = group.get_subgroup_members(1)[0]; derecho::rpc::QueryResults results = p2p_cache_handle.p2p_send(cache_members[0], "Foo"); ``` diff --git a/include/derecho/core/detail/connection_manager.hpp b/include/derecho/core/detail/connection_manager.hpp index 34d8ef30..a5528ac7 100644 --- a/include/derecho/core/detail/connection_manager.hpp +++ b/include/derecho/core/detail/connection_manager.hpp @@ -9,10 +9,10 @@ #include #include -using node_id_t = derecho::node_id_t; namespace tcp { +using node_id_t = derecho::node_id_t; using ip_addr_t = derecho::ip_addr_t; class tcp_connections { diff --git a/src/applications/archive/custom_subgroup_profiles_test.cpp b/src/applications/archive/custom_subgroup_profiles_test.cpp index 8b4df0a4..6db33462 100644 --- a/src/applications/archive/custom_subgroup_profiles_test.cpp +++ b/src/applications/archive/custom_subgroup_profiles_test.cpp @@ -12,6 +12,7 @@ using derecho::PeerCaller; using derecho::Replicated; +using derecho::node_id_t; using std::cout; using std::endl; using namespace persistent; diff --git a/src/applications/archive/external_client_perf_test.cpp b/src/applications/archive/external_client_perf_test.cpp index fba7fbc9..f5c88113 100644 --- a/src/applications/archive/external_client_perf_test.cpp +++ b/src/applications/archive/external_client_perf_test.cpp @@ -8,6 +8,7 @@ using derecho::ExternalClientCaller; using derecho::Replicated; +using derecho::node_id_t; using std::cout; using std::endl; using derecho::Bytes; diff --git a/src/applications/archive/long_subgroup_test.cpp b/src/applications/archive/long_subgroup_test.cpp index e899ad7e..6f4b13f9 100644 --- a/src/applications/archive/long_subgroup_test.cpp +++ b/src/applications/archive/long_subgroup_test.cpp @@ -7,6 +7,7 @@ using derecho::PeerCaller; using derecho::Replicated; +using derecho::node_id_t; using std::cout; using std::endl; using namespace persistent; diff --git a/src/applications/archive/notification_test.cpp b/src/applications/archive/notification_test.cpp index 07f65d9e..5910a74f 100644 --- a/src/applications/archive/notification_test.cpp +++ b/src/applications/archive/notification_test.cpp @@ -7,6 +7,7 @@ #include using derecho::ExternalClientCaller; +using derecho::node_id_t; using std::cout; using std::endl; diff --git a/src/applications/archive/scaling_subgroup_test.cpp b/src/applications/archive/scaling_subgroup_test.cpp index 04101b4e..a65e287d 100644 --- a/src/applications/archive/scaling_subgroup_test.cpp +++ b/src/applications/archive/scaling_subgroup_test.cpp @@ -9,6 +9,7 @@ using derecho::PeerCaller; using derecho::Replicated; +using derecho::node_id_t; using std::cout; using std::endl; using namespace persistent; diff --git a/src/applications/archive/smart_membership_function_test.cpp b/src/applications/archive/smart_membership_function_test.cpp index b33e4faa..1611297d 100644 --- a/src/applications/archive/smart_membership_function_test.cpp +++ b/src/applications/archive/smart_membership_function_test.cpp @@ -21,6 +21,8 @@ #define RPC_NAME(...) 0ULL #endif +using derecho::node_id_t; + class Cache : public mutils::ByteRepresentable { std::map cache_map; diff --git a/src/applications/archive/subgroup_test.cpp b/src/applications/archive/subgroup_test.cpp index b7ac5f70..fba4dae8 100644 --- a/src/applications/archive/subgroup_test.cpp +++ b/src/applications/archive/subgroup_test.cpp @@ -10,6 +10,7 @@ #include using derecho::RawObject; +using derecho::node_id_t; using std::cin; using std::cout; using std::endl; diff --git a/src/applications/archive/typed_subgroup_test.cpp b/src/applications/archive/typed_subgroup_test.cpp index 53f3e9db..8ef7769d 100644 --- a/src/applications/archive/typed_subgroup_test.cpp +++ b/src/applications/archive/typed_subgroup_test.cpp @@ -12,6 +12,7 @@ using derecho::PeerCaller; using derecho::Replicated; +using derecho::node_id_t; using std::cout; using std::endl; using namespace persistent; diff --git a/src/applications/demos/oob_rdma.cpp b/src/applications/demos/oob_rdma.cpp index 8cc005bf..32ec0b28 100644 --- a/src/applications/demos/oob_rdma.cpp +++ b/src/applications/demos/oob_rdma.cpp @@ -1,7 +1,7 @@ /** * @file oob_rdma.cpp * - * This test creates one subgroup demonstrating OOB mechanism + * This test creates one subgroup demonstrating OOB mechanism * - between external clients and a group member, and * - between derecho members */ @@ -76,7 +76,7 @@ class OOBRDMA : public mutils::ByteRepresentable, * @param size the size of the data * * @return true for success, otherwise false. - */ + */ bool get(const uint64_t& callee_addr, const uint64_t& caller_addr, const uint64_t rkey, const uint64_t size) const; /** @@ -104,7 +104,7 @@ class OOBRDMA : public mutils::ByteRepresentable, bool recv(const uint64_t& callee_addr, const uint64_t size) const; // constructors - OOBRDMA(void* _oob_mr_ptr, size_t _oob_mr_size) : + OOBRDMA(void* _oob_mr_ptr, size_t _oob_mr_size) : oob_mr_ptr(_oob_mr_ptr), oob_mr_size(_oob_mr_size) {} @@ -149,7 +149,7 @@ uint64_t OOBRDMA::put(const uint64_t& caller_addr, const uint64_t rkey, const ui bool OOBRDMA::get(const uint64_t& callee_addr, const uint64_t& caller_addr, const uint64_t rkey, const uint64_t size) const { // STEP 1 - validate the memory size - if ((callee_addr < reinterpret_cast(oob_mr_ptr)) || + if ((callee_addr < reinterpret_cast(oob_mr_ptr)) || ((callee_addr+size) > reinterpret_cast(oob_mr_ptr) + oob_mr_size)) { std::cerr << "callee address:0x" << std::hex << callee_addr << " or size " << size << " is invalid." << std::dec << std::endl; return false; @@ -192,7 +192,7 @@ bool OOBRDMA::recv(const uint64_t& callee_addr, const uint64_t size) const { // STEP 2 - do RDMA send auto& subgroup_handle = group->template get_subgroup(this->subgroup_index); struct iovec iov; - iov.iov_base = reinterpret_cast(callee_addr); + iov.iov_base = reinterpret_cast(callee_addr); iov.iov_len = static_cast(size); subgroup_handle.oob_send(group->get_rpc_caller_id(),&iov,1); subgroup_handle.wait_for_oob_op(group->get_rpc_caller_id(),OOB_OP_SEND,1000); @@ -217,7 +217,7 @@ static void print_data (void* addr,size_t size) { template void do_send_recv_test(SubgroupRefT& subgroup_handle, - node_id_t nid, + derecho::node_id_t nid, void* send_buffer_laddr, void* recv_buffer_laddr, size_t oob_data_size @@ -282,7 +282,7 @@ void do_send_recv_test(SubgroupRefT& subgroup_handle, iov.iov_len = oob_data_size; // 3.1 - post oob buffer for receive subgroup_handle.oob_recv(nid,&iov,1); - // 3.2 - post p2p_send + // 3.2 - post p2p_send auto recv_results = subgroup_handle.template p2p_send(nid,remote_addr,oob_data_size); // 3.3 - wait until oob received. subgroup_handle.wait_for_oob_op(nid,OOB_OP_RECV,1000); @@ -326,7 +326,7 @@ void do_send_recv_test(SubgroupRefT& subgroup_handle, } template -void do_test (P2PCaller& p2p_caller, node_id_t nid, uint64_t rkey, void* put_buffer_laddr, void* get_buffer_laddr, +void do_test (P2PCaller& p2p_caller, derecho::node_id_t nid, uint64_t rkey, void* put_buffer_laddr, void* get_buffer_laddr, size_t oob_data_size #ifdef CUDA_FOUND , bool use_gpu_mem @@ -383,7 +383,7 @@ void do_test (P2PCaller& p2p_caller, node_id_t nid, uint64_t rkey, void* put_buf auto results = p2p_caller.template p2p_send(nid,remote_addr,reinterpret_cast(get_buffer_laddr),rkey,oob_data_size); std::cout << "Wait for return" << std::endl; results.get().get(nid); - std::cout << "Data get from remote address @" << std::hex << remote_addr + std::cout << "Data get from remote address @" << std::hex << remote_addr << " to local address @" << reinterpret_cast(get_buffer_laddr) << std::endl; } // print 16 bytes of contents @@ -418,7 +418,7 @@ void do_test (P2PCaller& p2p_caller, node_id_t nid, uint64_t rkey, void* put_buf #endif } -const char* help_string = +const char* help_string = "--server,-s Run as server, otherwise, run as client by default.\n" #ifdef CUDA_FOUND "--gpu,-g Using GPU memory, otherwise, use CPU memory by default.\n" @@ -514,7 +514,7 @@ int main(int argc, char** argv) { std::cerr << "Unknown CUDA device:" << cuda_device << ". We found only " << n_devices << std::endl; return -1; } - // initialize cuda_ctxt; + // initialize cuda_ctxt; ASSERTDRV(cuDeviceGet(&cuda_ctxt.device, cuda_device)); ASSERTDRV(cuDevicePrimaryCtxRetain(&cuda_ctxt.context, cuda_ctxt.device)); ASSERTDRV(cuCtxSetCurrent(cuda_ctxt.context)); @@ -556,23 +556,23 @@ int main(int argc, char** argv) { if (server_mode) { // Read configurations from the command line options as well as the default config file derecho::Conf::initialize(argc, argv); - + // Define subgroup member ship using the default subgroup allocator function. // When constructed using make_subgroup_allocator with no arguments, this will check the config file // for either the json_layout or json_layout_file options, and use whichever one is present to define // the mapping from types to subgroup allocation parameters. derecho::SubgroupInfo subgroup_function{derecho::make_subgroup_allocator()}; - + // oobrdma_factory auto oobrdma_factory = [&oob_mr_ptr,&oob_mr_size](persistent::PersistentRegistry*, derecho::subgroup_id_t) { return std::make_unique(oob_mr_ptr,oob_mr_size); }; - + // group - derecho::Group group(derecho::UserMessageCallbacks{}, subgroup_function, + derecho::Group group(derecho::UserMessageCallbacks{}, subgroup_function, {&dsm}, std::vector{}, oobrdma_factory); - + std::cout << "Finished constructing/joining Group." << std::endl; #ifdef CUDA_FOUND if (use_gpu_mem) { @@ -636,7 +636,7 @@ int main(int argc, char** argv) { void* get_buffer_laddr = reinterpret_cast(((reinterpret_cast(oob_mr_ptr) + oob_data_size + 4095)>>12)<<12); for (uint32_t i=1;i<=count;i++) { - node_id_t nid = i%external_group.get_members().size(); + derecho::node_id_t nid = i%external_group.get_members().size(); do_test(external_caller,nid,rkey,put_buffer_laddr,get_buffer_laddr,oob_data_size #ifdef CUDA_FOUND ,use_gpu_mem diff --git a/src/applications/demos/overlapping_replicated_objects.cpp b/src/applications/demos/overlapping_replicated_objects.cpp index abcd09fe..d6b216ff 100644 --- a/src/applications/demos/overlapping_replicated_objects.cpp +++ b/src/applications/demos/overlapping_replicated_objects.cpp @@ -22,6 +22,7 @@ using derecho::PeerCaller; using derecho::Replicated; +using derecho::node_id_t; using std::cout; using std::endl; diff --git a/src/applications/demos/signed_store_mockup.cpp b/src/applications/demos/signed_store_mockup.cpp index 45d21d0b..f112fe57 100644 --- a/src/applications/demos/signed_store_mockup.cpp +++ b/src/applications/demos/signed_store_mockup.cpp @@ -110,13 +110,13 @@ ClientTier::ClientTier(){}; std::tuple> ClientTier::submit_update(const Blob& data) const { derecho::PeerCaller& storage_subgroup = group->template get_nonmember_subgroup(); derecho::PeerCaller& signature_subgroup = group->template get_nonmember_subgroup(); - std::vector> storage_members = group->get_subgroup_members(); - std::vector> signature_members = group->get_subgroup_members(); + std::vector> storage_members = group->get_subgroup_members(); + std::vector> signature_members = group->get_subgroup_members(); std::uniform_int_distribution<> storage_distribution(0, storage_members[0].size() - 1); std::uniform_int_distribution<> signature_distribution(0, signature_members[0].size() - 1); //Choose a random member of each subgroup to contact with the P2P message - const node_id_t storage_member_to_contact = storage_members[0][storage_distribution(random_engine)]; - const node_id_t signature_member_to_contact = signature_members[0][signature_distribution(random_engine)]; + const derecho::node_id_t storage_member_to_contact = storage_members[0][storage_distribution(random_engine)]; + const derecho::node_id_t signature_member_to_contact = signature_members[0][signature_distribution(random_engine)]; //Send the new data to the storage subgroup dbg_default_debug("Sending update data to node {}", storage_member_to_contact); auto storage_query_results = storage_subgroup.p2p_send(storage_member_to_contact, data); @@ -213,7 +213,7 @@ Blob ObjectStore::get_latest() const { /* -------------------------------------------------------------------- */ //Determines whether a node ID is a member of any shard in a list of shards -bool member_of_shards(node_id_t node_id, const std::vector>& shard_member_lists) { +bool member_of_shards(derecho::node_id_t node_id, const std::vector>& shard_member_lists) { for(const auto& shard_members : shard_member_lists) { if(std::find(shard_members.begin(), shard_members.end(), node_id) != shard_members.end()) { return true; diff --git a/src/applications/demos/simple_replicated_objects.cpp b/src/applications/demos/simple_replicated_objects.cpp index a4ab217e..e137d680 100644 --- a/src/applications/demos/simple_replicated_objects.cpp +++ b/src/applications/demos/simple_replicated_objects.cpp @@ -22,6 +22,7 @@ using derecho::PeerCaller; using derecho::Replicated; +using derecho::node_id_t; using std::cout; using std::endl; diff --git a/src/applications/demos/simple_replicated_objects_json.cpp b/src/applications/demos/simple_replicated_objects_json.cpp index 7caa7f14..b5b14a22 100644 --- a/src/applications/demos/simple_replicated_objects_json.cpp +++ b/src/applications/demos/simple_replicated_objects_json.cpp @@ -23,6 +23,7 @@ using derecho::PeerCaller; using derecho::Replicated; +using derecho::node_id_t; using std::cout; using std::endl; using json = nlohmann::json; diff --git a/src/applications/demos/simple_replicated_objects_overlap.cpp b/src/applications/demos/simple_replicated_objects_overlap.cpp index 72a3961e..39c16e90 100644 --- a/src/applications/demos/simple_replicated_objects_overlap.cpp +++ b/src/applications/demos/simple_replicated_objects_overlap.cpp @@ -13,6 +13,7 @@ using derecho::PeerCaller; using derecho::Replicated; +using derecho::node_id_t; using std::cout; using std::endl; diff --git a/src/applications/tests/performance_tests/oob_perf.cpp b/src/applications/tests/performance_tests/oob_perf.cpp index 80ce774f..ff40df9a 100644 --- a/src/applications/tests/performance_tests/oob_perf.cpp +++ b/src/applications/tests/performance_tests/oob_perf.cpp @@ -155,7 +155,7 @@ bool OOBRDMA::get(const uint64_t& callee_addr, const uint64_t& caller_addr, cons template void perf_test ( P2PCaller& p2p_caller, - node_id_t nid, + derecho::node_id_t nid, uint64_t rkey, void* put_buffer_laddr, void* get_buffer_laddr, diff --git a/src/applications/tests/performance_tests/p2p_bw_test.cpp b/src/applications/tests/performance_tests/p2p_bw_test.cpp index a77c206c..eb0421e8 100644 --- a/src/applications/tests/performance_tests/p2p_bw_test.cpp +++ b/src/applications/tests/performance_tests/p2p_bw_test.cpp @@ -15,6 +15,7 @@ using std::endl; using test::Bytes; using namespace std::chrono; +using derecho::node_id_t; /** * RPC Object with a single function that returns a byte array over P2P diff --git a/src/applications/tests/performance_tests/p2p_latency_test.cpp b/src/applications/tests/performance_tests/p2p_latency_test.cpp index e5e54b29..ccbb14a6 100644 --- a/src/applications/tests/performance_tests/p2p_latency_test.cpp +++ b/src/applications/tests/performance_tests/p2p_latency_test.cpp @@ -9,6 +9,7 @@ using std::cout; using std::endl; using std::string; using std::chrono::duration_cast; +using derecho::node_id_t; class TestObject : public mutils::ByteRepresentable { int state; diff --git a/src/applications/tests/performance_tests/signed_store_test.cpp b/src/applications/tests/performance_tests/signed_store_test.cpp index a9da4562..93d5ec04 100644 --- a/src/applications/tests/performance_tests/signed_store_test.cpp +++ b/src/applications/tests/performance_tests/signed_store_test.cpp @@ -113,13 +113,13 @@ ClientTier::ClientTier(std::size_t test_data_size) std::tuple> ClientTier::submit_update(const Blob& data) const { derecho::PeerCaller& storage_subgroup = group->template get_nonmember_subgroup(); derecho::PeerCaller& signature_subgroup = group->template get_nonmember_subgroup(); - std::vector> storage_members = group->get_subgroup_members(); - std::vector> signature_members = group->get_subgroup_members(); + std::vector> storage_members = group->get_subgroup_members(); + std::vector> signature_members = group->get_subgroup_members(); std::uniform_int_distribution<> storage_distribution(0, storage_members[0].size() - 1); std::uniform_int_distribution<> signature_distribution(0, signature_members[0].size() - 1); //Choose a random member of each subgroup to contact with the P2P message - const node_id_t storage_member_to_contact = storage_members[0][storage_distribution(random_engine)]; - const node_id_t signature_member_to_contact = signature_members[0][signature_distribution(random_engine)]; + const derecho::node_id_t storage_member_to_contact = storage_members[0][storage_distribution(random_engine)]; + const derecho::node_id_t signature_member_to_contact = signature_members[0][signature_distribution(random_engine)]; //Send the new data to the storage subgroup auto storage_query_results = storage_subgroup.p2p_send(storage_member_to_contact, data); //Meanwhile, start hashing the update (this might take a long time) @@ -152,13 +152,13 @@ bool ClientTier::update_batch_test(const int& num_updates) const { using namespace std::chrono; derecho::PeerCaller& storage_subgroup = group->template get_nonmember_subgroup(); derecho::PeerCaller& signature_subgroup = group->template get_nonmember_subgroup(); - const std::vector> storage_members = group->get_subgroup_members(); - const std::vector> signature_members = group->get_subgroup_members(); + const std::vector> storage_members = group->get_subgroup_members(); + const std::vector> signature_members = group->get_subgroup_members(); std::uniform_int_distribution<> storage_distribution(0, storage_members[0].size() - 1); std::uniform_int_distribution<> signature_distribution(0, signature_members[0].size() - 1); //Choose a random member of each subgroup to contact with the P2P message - const node_id_t storage_member_to_contact = storage_members[0][storage_distribution(random_engine)]; - const node_id_t signature_member_to_contact = signature_members[0][signature_distribution(random_engine)]; + const derecho::node_id_t storage_member_to_contact = storage_members[0][storage_distribution(random_engine)]; + const derecho::node_id_t signature_member_to_contact = signature_members[0][signature_distribution(random_engine)]; /* Note: This currently doesn't work. It gets "stuck" waiting for completion of the * await-persistence RPC calls, even though the storage subgroup nodes have in fact * finished persisting all of the updates. I think this is because of the P2P message @@ -320,7 +320,7 @@ std::unique_ptr ObjectStore::from_bytes(mutils::DeserializationMana /* -------------------------------------------------------------------- */ //Determines whether a node ID is a member of any shard in a list of shards -bool member_of_shards(node_id_t node_id, const std::vector>& shard_member_lists) { +bool member_of_shards(derecho::node_id_t node_id, const std::vector>& shard_member_lists) { for(const auto& shard_members : shard_member_lists) { if(std::find(shard_members.begin(), shard_members.end(), node_id) != shard_members.end()) { return true; @@ -432,13 +432,13 @@ int main(int argc, char** argv) { "data_signed_store_test"); //One node in the client tier should send the "end test" message to all the storage members, //which will signal the main thread to call group.leave() and exit - std::vector storage_members = group.get_subgroup_members(0)[0]; - std::vector signature_members = group.get_subgroup_members(0)[0]; + std::vector storage_members = group.get_subgroup_members(0)[0]; + std::vector signature_members = group.get_subgroup_members(0)[0]; if(group.get_subgroup_members()[0][0] == my_id) { - for(node_id_t nid : storage_members) { + for(derecho::node_id_t nid : storage_members) { object_store_subgroup.p2p_send(nid); } - for(node_id_t nid : signature_members) { + for(derecho::node_id_t nid : signature_members) { signature_store_subgroup.p2p_send(nid); } } diff --git a/src/applications/tests/unit_tests/client_callback_mockup.cpp b/src/applications/tests/unit_tests/client_callback_mockup.cpp index 9e613c9b..75d84440 100644 --- a/src/applications/tests/unit_tests/client_callback_mockup.cpp +++ b/src/applications/tests/unit_tests/client_callback_mockup.cpp @@ -125,10 +125,10 @@ InternalClientNode::~InternalClientNode() { std::pair InternalClientNode::submit_update(uint32_t update_counter, const Blob& new_data) const { derecho::PeerCaller& storage_subgroup = group->template get_nonmember_subgroup(); - std::vector> storage_members = group->get_subgroup_members(); + std::vector> storage_members = group->get_subgroup_members(); - const node_id_t storage_node_to_contact = storage_members[0][0]; - node_id_t my_id = derecho::getConfUInt32(derecho::Conf::DERECHO_LOCAL_ID); + const derecho::node_id_t storage_node_to_contact = storage_members[0][0]; + derecho::node_id_t my_id = derecho::getConfUInt32(derecho::Conf::DERECHO_LOCAL_ID); //Submit the update to the chosen storage node dbg_default_debug("Submitting update number {} to node {}", update_counter, storage_node_to_contact); @@ -199,7 +199,7 @@ StorageNode::~StorageNode() { request_queue_nonempty.notify_all(); } -std::pair StorageNode::update(node_id_t sender_id, +std::pair StorageNode::update(derecho::node_id_t sender_id, uint32_t update_counter, const Blob& new_data) const { dbg_default_debug("Received an update call from node {} for update {}", sender_id, update_counter); @@ -223,7 +223,7 @@ Blob StorageNode::get(const persistent::version_t& version) const { return *object_log[version]; } -void StorageNode::register_callback(node_id_t client_node_id, +void StorageNode::register_callback(derecho::node_id_t client_node_id, const ClientCallbackType& callback_type, persistent::version_t version) const { dbg_default_debug("Received a call to register_callback from node {} for version {}", client_node_id, version); @@ -362,7 +362,7 @@ void StorageNode::callback_thread_function() { } //Determines whether a node ID is a member of any shard in a list of shards -bool member_of_shards(node_id_t node_id, const std::vector>& shard_member_lists) { +bool member_of_shards(derecho::node_id_t node_id, const std::vector>& shard_member_lists) { for(const auto& shard_members : shard_member_lists) { if(std::find(shard_members.begin(), shard_members.end(), node_id) != shard_members.end()) { return true; @@ -393,7 +393,7 @@ int main(int argc, char** argv) { + derecho::remote_invocation_utilities::header_space(); //An update plus the two other parameters must fit in the available payload size const std::size_t update_size = derecho::getConfUInt64(derecho::Conf::SUBGROUP_DEFAULT_MAX_PAYLOAD_SIZE) - - rpc_header_size - sizeof(node_id_t) - sizeof(uint32_t); + - rpc_header_size - sizeof(derecho::node_id_t) - sizeof(uint32_t); //For generating random updates const std::string characters("abcdefghijklmnopqrstuvwxyz"); std::mt19937 random_generator(getpid()); @@ -429,8 +429,8 @@ int main(int argc, char** argv) { //Figure out which subgroup this node got assigned to uint32_t my_id = derecho::getConfUInt32(derecho::Conf::DERECHO_LOCAL_ID); - std::vector storage_members = group.get_subgroup_members(0)[0]; - std::vector> client_tier_shards = group.get_subgroup_members(0); + std::vector storage_members = group.get_subgroup_members(0)[0]; + std::vector> client_tier_shards = group.get_subgroup_members(0); if(member_of_shards(my_id, client_tier_shards)) { std::cout << "Assigned the ClientNode role" << std::endl; //Send some updates to the storage nodes and request callbacks when they have globally persisted diff --git a/src/applications/tests/unit_tests/client_callback_mockup.hpp b/src/applications/tests/unit_tests/client_callback_mockup.hpp index 65b7ef06..f4137bed 100644 --- a/src/applications/tests/unit_tests/client_callback_mockup.hpp +++ b/src/applications/tests/unit_tests/client_callback_mockup.hpp @@ -82,7 +82,7 @@ std::ostream& operator<<(std::ostream& os, const ClientCallbackType& cb_type); */ struct CallbackRequest { ClientCallbackType callback_type; - node_id_t client; + derecho::node_id_t client; persistent::version_t version; }; @@ -110,7 +110,7 @@ class StorageNode : public mutils::ByteRepresentable, * P2P-callable function that creates a new log entry with the provided data. * @return The version assigned to the new log entry, and the timestamp assigned to the new log entry */ - std::pair update(node_id_t sender_id, + std::pair update(derecho::node_id_t sender_id, uint32_t update_counter, const Blob& new_data) const; @@ -126,7 +126,7 @@ class StorageNode : public mutils::ByteRepresentable, * update (identified by its version) has reached a particular state (locally/globally * persisted, signed) */ - void register_callback(node_id_t client_node_id, const ClientCallbackType& callback_type, persistent::version_t version) const; + void register_callback(derecho::node_id_t client_node_id, const ClientCallbackType& callback_type, persistent::version_t version) const; /** * Function that implements the callback-checking thread. This thread waits for * updates to finish persisting and then sends callbacks to clients who requested diff --git a/src/applications/tests/unit_tests/external_notification_test.cpp b/src/applications/tests/unit_tests/external_notification_test.cpp index d9c530fc..621aaa67 100644 --- a/src/applications/tests/unit_tests/external_notification_test.cpp +++ b/src/applications/tests/unit_tests/external_notification_test.cpp @@ -8,6 +8,7 @@ using derecho::ExternalClientCaller; using derecho::Replicated; +using derecho::node_id_t; using std::cout; using std::endl; diff --git a/src/applications/tests/unit_tests/persistence_notification_test.cpp b/src/applications/tests/unit_tests/persistence_notification_test.cpp index 9e8175a6..38353d25 100644 --- a/src/applications/tests/unit_tests/persistence_notification_test.cpp +++ b/src/applications/tests/unit_tests/persistence_notification_test.cpp @@ -46,7 +46,7 @@ derecho::Bytes StorageNode::get(const persistent::version_t& version) const { return *object_log[version]; } -void StorageNode::request_notification(node_id_t client_node_id, +void StorageNode::request_notification(derecho::node_id_t client_node_id, NotificationMessageType notification_type, persistent::version_t version) const { dbg_default_debug("Received a call to request_notification from node {} for version {}", client_node_id, version); @@ -208,7 +208,7 @@ int main(int argc, char** argv) { + derecho::remote_invocation_utilities::header_space(); //An update plus the two other parameters must fit in the available payload size const std::size_t update_size = derecho::getConfUInt64(derecho::Conf::SUBGROUP_DEFAULT_MAX_PAYLOAD_SIZE) - - rpc_header_size - sizeof(node_id_t) - sizeof(uint32_t); + - rpc_header_size - sizeof(derecho::node_id_t) - sizeof(uint32_t); //For generating random updates const std::string characters("abcdefghijklmnopqrstuvwxyz"); std::mt19937 random_generator(getpid()); @@ -255,12 +255,12 @@ int main(int argc, char** argv) { derecho::ExternalGroupClient client(dummy_storage_factory); derecho::ExternalClientCaller& storage_caller = client.get_subgroup_caller(0); //Since we know there is only 1 subgroup and 1 shard of StorageNode, getting the node IDs is easy - std::vector storage_node_ids = client.get_shard_members(0, 0); + std::vector storage_node_ids = client.get_shard_members(0, 0); //Pick a node to contact to send updates - node_id_t update_node = storage_node_ids[0]; + derecho::node_id_t update_node = storage_node_ids[0]; //Right now, we must request notifications from that same node, because only the P2P //update method can record QueryResults; other replicas have no way of tracking their updates - node_id_t notification_node = update_node; + derecho::node_id_t notification_node = update_node; //Register the client callback handler storage_caller.add_p2p_connection(notification_node); storage_caller.register_notification_handler(client_callback_function); diff --git a/src/applications/tests/unit_tests/persistence_notification_test.hpp b/src/applications/tests/unit_tests/persistence_notification_test.hpp index 9ecbb111..36d580ff 100644 --- a/src/applications/tests/unit_tests/persistence_notification_test.hpp +++ b/src/applications/tests/unit_tests/persistence_notification_test.hpp @@ -16,7 +16,7 @@ enum NotificationMessageType : uint64_t { struct NotificationRequest { NotificationMessageType notification_type; - node_id_t client_id; + derecho::node_id_t client_id; persistent::version_t version; }; @@ -67,7 +67,7 @@ class StorageNode : public mutils::ByteRepresentable, * update (identified by its version) has reached a particular state (locally/globally * persisted, signed) */ - void request_notification(node_id_t client_node_id, NotificationMessageType notification_type, + void request_notification(derecho::node_id_t client_node_id, NotificationMessageType notification_type, persistent::version_t version) const; /** * Function that implements the notification-checking thread. This thread waits for diff --git a/src/applications/tests/unit_tests/rpc_function_types.cpp b/src/applications/tests/unit_tests/rpc_function_types.cpp index 4ebb5154..5b7779cc 100644 --- a/src/applications/tests/unit_tests/rpc_function_types.cpp +++ b/src/applications/tests/unit_tests/rpc_function_types.cpp @@ -73,6 +73,7 @@ class MapTest : public mutils::ByteRepresentable { using derecho::flexible_even_shards; using derecho::one_subgroup_policy; +using derecho::node_id_t; int main(int argc, char** argv) { // Read configurations from the command line options as well as the default config file diff --git a/src/applications/tests/unit_tests/rpc_reply_maps.cpp b/src/applications/tests/unit_tests/rpc_reply_maps.cpp index 59428076..4b88edec 100644 --- a/src/applications/tests/unit_tests/rpc_reply_maps.cpp +++ b/src/applications/tests/unit_tests/rpc_reply_maps.cpp @@ -15,6 +15,8 @@ #define RPC_NAME(...) 0ULL #endif +using derecho::node_id_t; + class StringObject : public mutils::ByteRepresentable { std::string log; diff --git a/src/applications/tests/unit_tests/subgroup_function_tester.cpp b/src/applications/tests/unit_tests/subgroup_function_tester.cpp index 5a0bdf1b..d932fd62 100644 --- a/src/applications/tests/unit_tests/subgroup_function_tester.cpp +++ b/src/applications/tests/unit_tests/subgroup_function_tester.cpp @@ -25,6 +25,7 @@ void test_fixed_allocation_functions() { using derecho::CrossProductPolicy; using derecho::DefaultSubgroupAllocator; using derecho::SubgroupAllocationPolicy; + using derecho::node_id_t; //Reduce the verbosity of specifying "ordered" for three custom subgroups std::vector three_ordered(3, derecho::Mode::ORDERED); std::vector three_default_profiles(3, "default"); @@ -106,6 +107,7 @@ void test_fixed_allocation_functions() { void test_flexible_allocation_functions() { using derecho::DefaultSubgroupAllocator; using derecho::SubgroupAllocationPolicy; + using derecho::node_id_t; //Reduce the verbosity of specifying "ordered" for three custom subgroups std::vector three_ordered(3, derecho::Mode::ORDERED); @@ -164,6 +166,8 @@ void test_flexible_allocation_functions() { } void test_json_layout() { + using derecho::node_id_t; + const char* json_layout_string = R"|([ { diff --git a/src/core/git_version.cpp b/src/core/git_version.cpp index 83cdf130..20da88fd 100644 --- a/src/core/git_version.cpp +++ b/src/core/git_version.cpp @@ -13,8 +13,8 @@ namespace derecho { const int MAJOR_VERSION = 2; const int MINOR_VERSION = 4; const int PATCH_VERSION = 0; -const int COMMITS_AHEAD_OF_VERSION = 1; +const int COMMITS_AHEAD_OF_VERSION = 2; const char* VERSION_STRING = "2.4.0"; -const char* VERSION_STRING_PLUS_COMMITS = "2.4.0+1"; +const char* VERSION_STRING_PLUS_COMMITS = "2.4.0+2"; } From 91f8b2ab96302d386493882f417cc32cd1937736 Mon Sep 17 00:00:00 2001 From: Edward Tremel Date: Thu, 9 May 2024 13:08:26 -0400 Subject: [PATCH 3/3] Minor fix to an include My earlier commit that fixed USE_VERBS_API compile issues used the quotes style for including headers with full paths, but now we've changed to using the brackets style. --- include/derecho/core/detail/p2p_connection.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/derecho/core/detail/p2p_connection.hpp b/include/derecho/core/detail/p2p_connection.hpp index db2dfb7a..ecf0e3dc 100644 --- a/include/derecho/core/detail/p2p_connection.hpp +++ b/include/derecho/core/detail/p2p_connection.hpp @@ -6,7 +6,7 @@ #else #include #endif -#include "derecho/utils/logger.hpp" +#include #include #include