diff --git a/.clang-format b/.clang-format new file mode 100644 index 0000000..63b90d6 --- /dev/null +++ b/.clang-format @@ -0,0 +1,198 @@ + +# from https://github.com/OpenAtomFoundation/pikiwidb/blob/5ea7bc0949cbd49122633cf086322143121c5985/.clang-format +Language: Cpp +# BasedOnStyle: Google +AccessModifierOffset: -1 +AlignAfterOpenBracket: Align +AlignConsecutiveMacros: None +AlignConsecutiveAssignments: None +AlignConsecutiveBitFields: None +AlignConsecutiveDeclarations: None +AlignEscapedNewlines: Left +AlignOperands: Align +AlignTrailingComments: true +AllowAllArgumentsOnNextLine: true +AllowAllConstructorInitializersOnNextLine: true +AllowAllParametersOfDeclarationOnNextLine: true +AllowShortEnumsOnASingleLine: true +AllowShortBlocksOnASingleLine: Never +AllowShortCaseLabelsOnASingleLine: false +AllowShortFunctionsOnASingleLine: All +AllowShortLambdasOnASingleLine: All +AllowShortIfStatementsOnASingleLine: WithoutElse +AllowShortLoopsOnASingleLine: true +AlwaysBreakAfterDefinitionReturnType: None +AlwaysBreakAfterReturnType: None +AlwaysBreakBeforeMultilineStrings: true +AlwaysBreakTemplateDeclarations: Yes +AttributeMacros: + - __capability +BinPackArguments: true +BinPackParameters: true +BraceWrapping: + AfterCaseLabel: false + AfterClass: false + AfterControlStatement: Never + AfterEnum: false + AfterFunction: false + AfterNamespace: false + AfterObjCDeclaration: false + AfterStruct: false + AfterUnion: false + AfterExternBlock: false + BeforeCatch: false + BeforeElse: false + BeforeLambdaBody: false + BeforeWhile: false + IndentBraces: false + SplitEmptyFunction: true + SplitEmptyRecord: true + SplitEmptyNamespace: true +BreakBeforeBinaryOperators: None +BreakBeforeConceptDeclarations: true +BreakBeforeBraces: Attach +BreakBeforeInheritanceComma: false +BreakInheritanceList: BeforeColon +BreakBeforeTernaryOperators: true +BreakConstructorInitializersBeforeComma: false +BreakConstructorInitializers: BeforeColon +BreakAfterJavaFieldAnnotations: false +BreakStringLiterals: true +ColumnLimit: 120 +CommentPragmas: '^ IWYU pragma:' +CompactNamespaces: false +ConstructorInitializerAllOnOneLineOrOnePerLine: true +ConstructorInitializerIndentWidth: 4 +ContinuationIndentWidth: 4 +Cpp11BracedListStyle: true +DeriveLineEnding: true +DerivePointerAlignment: true +DisableFormat: false +EmptyLineBeforeAccessModifier: LogicalBlock +ExperimentalAutoDetectBinPacking: false +FixNamespaceComments: true +ForEachMacros: + - foreach + - Q_FOREACH + - BOOST_FOREACH +StatementAttributeLikeMacros: + - Q_EMIT +IncludeBlocks: Preserve +IncludeCategories: + - Regex: '^' + Priority: 2 + SortPriority: 0 + CaseSensitive: false + - Regex: '^<.*\.h>' + Priority: 1 + SortPriority: 0 + CaseSensitive: false + - Regex: '^<.*' + Priority: 2 + SortPriority: 0 + CaseSensitive: false + - Regex: '.*' + Priority: 3 + SortPriority: 0 + CaseSensitive: false +IncludeIsMainRegex: '([-_](test|unittest))?$' +IncludeIsMainSourceRegex: '' +IndentCaseLabels: true +IndentCaseBlocks: false +IndentGotoLabels: true +IndentPPDirectives: AfterHash +IndentExternBlock: AfterExternBlock +IndentWidth: 2 +IndentWrappedFunctionNames: false +InsertTrailingCommas: None +JavaScriptQuotes: Leave +JavaScriptWrapImports: true +KeepEmptyLinesAtTheStartOfBlocks: false +MacroBlockBegin: '' +MacroBlockEnd: '' +MaxEmptyLinesToKeep: 1 +NamespaceIndentation: None +ObjCBinPackProtocolList: Never +ObjCBlockIndentWidth: 2 +ObjCBreakBeforeNestedBlockParam: true +ObjCSpaceAfterProperty: false +ObjCSpaceBeforeProtocolList: true +PenaltyBreakAssignment: 2 +PenaltyBreakBeforeFirstCallParameter: 1 +PenaltyBreakComment: 300 +PenaltyBreakFirstLessLess: 120 +PenaltyBreakString: 1000 +PenaltyBreakTemplateDeclaration: 10 +PenaltyExcessCharacter: 1000000 +PenaltyReturnTypeOnItsOwnLine: 200 +PenaltyIndentedWhitespace: 0 +PointerAlignment: Left +RawStringFormats: + - Language: Cpp + Delimiters: + - cc + - CC + - cpp + - Cpp + - CPP + - 'c++' + - 'C++' + CanonicalDelimiter: '' + BasedOnStyle: google + - Language: TextProto + Delimiters: + - pb + - PB + - proto + - PROTO + EnclosingFunctions: + - EqualsProto + - EquivToProto + - PARSE_PARTIAL_TEXT_PROTO + - PARSE_TEST_PROTO + - PARSE_TEXT_PROTO + - ParseTextOrDie + - ParseTextProtoOrDie + - ParseTestProto + - ParsePartialTestProto + CanonicalDelimiter: '' + BasedOnStyle: google +ReflowComments: true +SortIncludes: true +SortJavaStaticImport: Before +SortUsingDeclarations: true +SpaceAfterCStyleCast: false +SpaceAfterLogicalNot: false +SpaceAfterTemplateKeyword: true +SpaceBeforeAssignmentOperators: true +SpaceBeforeCaseColon: false +SpaceBeforeCpp11BracedList: false +SpaceBeforeCtorInitializerColon: true +SpaceBeforeInheritanceColon: true +SpaceBeforeParens: ControlStatements +SpaceAroundPointerQualifiers: Default +SpaceBeforeRangeBasedForLoopColon: true +SpaceInEmptyBlock: false +SpaceInEmptyParentheses: false +SpacesBeforeTrailingComments: 2 +SpacesInAngles: false +SpacesInConditionalStatement: false +SpacesInContainerLiterals: true +SpacesInCStyleCastParentheses: false +SpacesInParentheses: false +SpacesInSquareBrackets: false +SpaceBeforeSquareBrackets: false +BitFieldColonSpacing: Both +Standard: Auto +StatementMacros: + - Q_UNUSED + - QT_REQUIRE_VERSION +TabWidth: 8 +UseCRLF: false +UseTab: Never +WhitespaceSensitiveMacros: + - STRINGIZE + - PP_STRINGIZE + - BOOST_PP_STRINGIZE + - NS_SWIFT_NAME + - CF_SWIFT_NAME diff --git a/CMakeLists.txt b/CMakeLists.txt index ff4ccb2..6252160 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -2,6 +2,11 @@ cmake_minimum_required(VERSION 3.22) project(KVRaftCpp) + + + + + set(CMAKE_CXX_STANDARD 20) # 生成debug版本,可以进行gdb调试 set(CMAKE_BUILD_TYPE "Debug") @@ -33,4 +38,18 @@ add_subdirectory(example) add_library(skip_list_on_raft STATIC ${src_rpc} ${rpc_example} ${raftsource} ${src_raftCore} ${src_raftRpcPro}) -target_link_libraries(skip_list_on_raft muduo_net muduo_base pthread ) \ No newline at end of file +target_link_libraries(skip_list_on_raft muduo_net muduo_base pthread ) + + +# 添加格式化目标 start +# from : https://blog.csdn.net/guotianqing/article/details/121661067 + +add_custom_target(format + COMMAND bash ${PROJECT_SOURCE_DIR}/format.sh + COMMAND echo "format done!" + WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} +) + + +# 添加格式化目标 end + diff --git a/README.md b/README.md index b087c68..0c5eb49 100644 --- a/README.md +++ b/README.md @@ -15,7 +15,14 @@ - muduo - boost - protoc +- clang-format(可选) +**安装说明** + +- clang-format,如果你不设计提交pr,那么不用安装,这里也给出安装命令:`sudo apt-get install clang-format` +- protoc,本地版本为3.12.4,ubuntu22使用`sudo apt-get install protobuf-compiler libprotobuf-dev`安装默认就是这个版本 +- boost,`sudo apt-get install libboost-dev libboost-test-dev libboost-all-dev` +- muduo,https://blog.csdn.net/QIANGWEIYUAN/article/details/89023980 > 如果库安装编译本仓库的时候有错误或者需要确认版本信息,可以在issue页面查看其他人遇到的问题和分享: [链接](https://github.com/youngyangyang04/KVstorageBaseRaft-cpp/issues) ### 2.编译启动 diff --git a/example/raftCoreExample/caller.cpp b/example/raftCoreExample/caller.cpp index 262bc3f..878913a 100644 --- a/example/raftCoreExample/caller.cpp +++ b/example/raftCoreExample/caller.cpp @@ -1,20 +1,20 @@ // // Created by swx on 23-6-4. // -#include "clerk.h" #include +#include "clerk.h" #include "util.h" -int main(){ - Clerk client; - client.Init("test.conf"); - auto start = now(); - int count = 500; - int tmp = count; - while (tmp --){ - client.Put("x",std::to_string(tmp)); +int main() { + Clerk client; + client.Init("test.conf"); + auto start = now(); + int count = 500; + int tmp = count; + while (tmp--) { + client.Put("x", std::to_string(tmp)); - std::string get1 = client.Get("x"); - std::printf("get return :{%s}\r\n",get1.c_str()); - } - return 0; + std::string get1 = client.Get("x"); + std::printf("get return :{%s}\r\n", get1.c_str()); + } + return 0; } \ No newline at end of file diff --git a/example/raftCoreExample/raftKvDB.cpp b/example/raftCoreExample/raftKvDB.cpp index e16fcd3..f5b5f12 100644 --- a/example/raftCoreExample/raftKvDB.cpp +++ b/example/raftCoreExample/raftKvDB.cpp @@ -1,76 +1,74 @@ // // Created by swx on 23-12-28. // -#include +#include #include "raft.h" // #include "kvServer.h" -#include #include -#include #include +#include +#include void ShowArgsHelp(); int main(int argc, char **argv) { - //////////////////////////////////读取命令参数:节点数量、写入raft节点节点信息到哪个文件 - if (argc < 2) { + //////////////////////////////////读取命令参数:节点数量、写入raft节点节点信息到哪个文件 + if (argc < 2) { + ShowArgsHelp(); + exit(EXIT_FAILURE); + } + int c = 0; + int nodeNum = 0; + std::string configFileName; + std::random_device rd; + std::mt19937 gen(rd()); + std::uniform_int_distribution<> dis(10000, 29999); + unsigned short startPort = dis(gen); + while ((c = getopt(argc, argv, "n:f:")) != -1) { + switch (c) { + case 'n': + nodeNum = atoi(optarg); + break; + case 'f': + configFileName = optarg; + break; + default: ShowArgsHelp(); exit(EXIT_FAILURE); } - int c = 0; - int nodeNum = 0; - std::string configFileName; - std::random_device rd; - std::mt19937 gen(rd()); - std::uniform_int_distribution<> dis(10000, 29999); - unsigned short startPort = dis(gen); - while ((c = getopt(argc, argv, "n:f:")) != -1) { - switch (c) { - case 'n': - nodeNum = atoi(optarg); - break; - case 'f': - configFileName = optarg; - break; - default: - ShowArgsHelp(); - exit(EXIT_FAILURE); - } - } - std::ofstream file(configFileName, std::ios::out | std::ios::app); + } + std::ofstream file(configFileName, std::ios::out | std::ios::app); + file.close(); + file = std::ofstream(configFileName, std::ios::out | std::ios::trunc); + if (file.is_open()) { file.close(); - file = std::ofstream(configFileName, std::ios::out | std::ios::trunc); - if (file.is_open()) { - file.close(); - std::cout << configFileName << " 已清空" << std::endl; - } else { - std::cout << "无法打开 " << configFileName << std::endl; - exit(EXIT_FAILURE); - } - for (int i = 0; i < nodeNum; i++) { - short port = startPort + static_cast(i); - std::cout << "start to create raftkv node:" << i << " port:" << port << " pid:" << getpid() << std::endl; - pid_t pid = fork(); // 创建新进程 - if (pid == 0) { - // 如果是子进程 - // 子进程的代码 + std::cout << configFileName << " 已清空" << std::endl; + } else { + std::cout << "无法打开 " << configFileName << std::endl; + exit(EXIT_FAILURE); + } + for (int i = 0; i < nodeNum; i++) { + short port = startPort + static_cast(i); + std::cout << "start to create raftkv node:" << i << " port:" << port << " pid:" << getpid() << std::endl; + pid_t pid = fork(); // 创建新进程 + if (pid == 0) { + // 如果是子进程 + // 子进程的代码 - auto kvServer = new KvServer(i, 500, configFileName, port); - pause(); // 子进程进入等待状态,不会执行 return 语句 - } else if (pid > 0) { - // 如果是父进程 - // 父进程的代码 - sleep(1); - } else { - // 如果创建进程失败 - std::cerr << "Failed to create child process." << std::endl; - exit(EXIT_FAILURE); - } + auto kvServer = new KvServer(i, 500, configFileName, port); + pause(); // 子进程进入等待状态,不会执行 return 语句 + } else if (pid > 0) { + // 如果是父进程 + // 父进程的代码 + sleep(1); + } else { + // 如果创建进程失败 + std::cerr << "Failed to create child process." << std::endl; + exit(EXIT_FAILURE); } - pause(); - return 0; + } + pause(); + return 0; } -void ShowArgsHelp() { - std::cout << "format: command -n -f " << std::endl; -} +void ShowArgsHelp() { std::cout << "format: command -n -f " << std::endl; } diff --git a/example/rpcExample/callee/friendService.cpp b/example/rpcExample/callee/friendService.cpp index b5353d6..689dd1e 100644 --- a/example/rpcExample/callee/friendService.cpp +++ b/example/rpcExample/callee/friendService.cpp @@ -1,53 +1,50 @@ // // Created by swx on 23-12-21. // -#include #include +#include #include #include "rpcExample/friend.pb.h" -#include "rpcprovider.h" #include - +#include "rpcprovider.h" class FriendService : public fixbug::FiendServiceRpc { -public: - std::vector GetFriendsList(uint32_t userid) { - std::cout << "local do GetFriendsList service! userid:" << userid << std::endl; - std::vector vec; - vec.push_back("gao yang"); - vec.push_back("liu hong"); - vec.push_back("wang shuo"); - return vec; - } + public: + std::vector GetFriendsList(uint32_t userid) { + std::cout << "local do GetFriendsList service! userid:" << userid << std::endl; + std::vector vec; + vec.push_back("gao yang"); + vec.push_back("liu hong"); + vec.push_back("wang shuo"); + return vec; + } - // 重写基类方法 - void GetFriendsList(::google::protobuf::RpcController *controller, - const ::fixbug::GetFriendsListRequest *request, - ::fixbug::GetFriendsListResponse *response, - ::google::protobuf::Closure *done) { - uint32_t userid = request->userid(); - std::vector friendsList = GetFriendsList(userid); - response->mutable_result()->set_errcode(0); - response->mutable_result()->set_errmsg(""); - for (std::string &name: friendsList) { - std::string *p = response->add_friends(); - *p = name; - } - done->Run(); + // 重写基类方法 + void GetFriendsList(::google::protobuf::RpcController *controller, const ::fixbug::GetFriendsListRequest *request, + ::fixbug::GetFriendsListResponse *response, ::google::protobuf::Closure *done) { + uint32_t userid = request->userid(); + std::vector friendsList = GetFriendsList(userid); + response->mutable_result()->set_errcode(0); + response->mutable_result()->set_errmsg(""); + for (std::string &name : friendsList) { + std::string *p = response->add_friends(); + *p = name; } + done->Run(); + } }; int main(int argc, char **argv) { - std::string ip = "127.0.0.1"; - short port = 7788; - auto stub = new fixbug::FiendServiceRpc_Stub(new MprpcChannel(ip, port, false)); - // provider是一个rpc网络服务对象。把UserService对象发布到rpc节点上 - RpcProvider provider; - provider.NotifyService(new FriendService()); + std::string ip = "127.0.0.1"; + short port = 7788; + auto stub = new fixbug::FiendServiceRpc_Stub(new MprpcChannel(ip, port, false)); + // provider是一个rpc网络服务对象。把UserService对象发布到rpc节点上 + RpcProvider provider; + provider.NotifyService(new FriendService()); - // 启动一个rpc服务发布节点 Run以后,进程进入阻塞状态,等待远程的rpc调用请求 - provider.Run(1, 7788); + // 启动一个rpc服务发布节点 Run以后,进程进入阻塞状态,等待远程的rpc调用请求 + provider.Run(1, 7788); - return 0; + return 0; } diff --git a/example/rpcExample/caller/callFriendService.cpp b/example/rpcExample/caller/callFriendService.cpp index bf67bc8..3ac8ea2 100644 --- a/example/rpcExample/caller/callFriendService.cpp +++ b/example/rpcExample/caller/callFriendService.cpp @@ -6,52 +6,52 @@ // #include "mprpcapplication.h" #include "rpcExample/friend.pb.h" -#include "rpcprovider.h" -#include "mprpccontroller.h" #include "mprpcchannel.h" - +#include "mprpccontroller.h" +#include "rpcprovider.h" int main(int argc, char **argv) { - // https://askubuntu.com/questions/754213/what-is-difference-between-localhost-address-127-0-0-1-and-127-0-1-1 - std::string ip = "127.0.1.1"; - short port = 7788; + // https://askubuntu.com/questions/754213/what-is-difference-between-localhost-address-127-0-0-1-and-127-0-1-1 + std::string ip = "127.0.1.1"; + short port = 7788; - // 演示调用远程发布的rpc方法Login - fixbug::FiendServiceRpc_Stub stub(new MprpcChannel(ip, port, true)); //注册进自己写的channel类,channel类用于自定义发送格式和负责序列化等操作 - // rpc方法的请求参数 - fixbug::GetFriendsListRequest request; - request.set_userid(1000); - // rpc方法的响应 - fixbug::GetFriendsListResponse response; - // 发起rpc方法的调用,消费这的stub最后都会调用到channel的 call_method方法 同步的rpc调用过程 MprpcChannel::callmethod - MprpcController controller; - //長連接測試 ,發送10次請求 - int count = 10; - while (count--) { - std::cout << " 倒数" << count << "次发起RPC请求" << std::endl; - stub.GetFriendsList(&controller, &request, &response, nullptr); - // RpcChannel->RpcChannel::callMethod 集中来做所有rpc方法调用的参数序列化和网络发送 + // 演示调用远程发布的rpc方法Login + fixbug::FiendServiceRpc_Stub stub( + new MprpcChannel(ip, port, true)); //注册进自己写的channel类,channel类用于自定义发送格式和负责序列化等操作 + // rpc方法的请求参数 + fixbug::GetFriendsListRequest request; + request.set_userid(1000); + // rpc方法的响应 + fixbug::GetFriendsListResponse response; + // 发起rpc方法的调用,消费这的stub最后都会调用到channel的 call_method方法 同步的rpc调用过程 MprpcChannel::callmethod + MprpcController controller; + //長連接測試 ,發送10次請求 + int count = 10; + while (count--) { + std::cout << " 倒数" << count << "次发起RPC请求" << std::endl; + stub.GetFriendsList(&controller, &request, &response, nullptr); + // RpcChannel->RpcChannel::callMethod 集中来做所有rpc方法调用的参数序列化和网络发送 - // 一次rpc调用完成,读调用的结果 - //rpc调用是否失败由框架来决定(rpc调用失败 != 业务逻辑返回false) - // rpc和业务本质上是隔离的 - if (controller.Failed()) { - std::cout << controller.ErrorText() << std::endl; - } else { - if (0 == response.result().errcode()) { - std::cout << "rpc GetFriendsList response success!" << std::endl; - int size = response.friends_size(); - for (int i = 0; i < size; i++) { - std::cout << "index:" << (i + 1) << " name:" << response.friends(i) << std::endl; - } - } else { - //这里不是rpc失败, - // 而是业务逻辑的返回值是失败 - // 两者要区分清楚 - std::cout << "rpc GetFriendsList response error : " << response.result().errmsg() << std::endl; - } + // 一次rpc调用完成,读调用的结果 + // rpc调用是否失败由框架来决定(rpc调用失败 != 业务逻辑返回false) + // rpc和业务本质上是隔离的 + if (controller.Failed()) { + std::cout << controller.ErrorText() << std::endl; + } else { + if (0 == response.result().errcode()) { + std::cout << "rpc GetFriendsList response success!" << std::endl; + int size = response.friends_size(); + for (int i = 0; i < size; i++) { + std::cout << "index:" << (i + 1) << " name:" << response.friends(i) << std::endl; } - sleep(5); // sleep 5 seconds + } else { + //这里不是rpc失败, + // 而是业务逻辑的返回值是失败 + // 两者要区分清楚 + std::cout << "rpc GetFriendsList response error : " << response.result().errmsg() << std::endl; + } } - return 0; + sleep(5); // sleep 5 seconds + } + return 0; } diff --git a/format.sh b/format.sh new file mode 100644 index 0000000..c51440a --- /dev/null +++ b/format.sh @@ -0,0 +1,2 @@ +# https://www.cnblogs.com/__tudou__/p/13322854.html +find . -regex '.*\.\(cpp\|hpp\|cu\|c\|h\)' ! -regex '.*\(pb\.h\|pb\.cc\)$' -exec clang-format -style=file -i {} \; \ No newline at end of file diff --git a/lib/libskip_list_on_raft.a b/lib/libskip_list_on_raft.a index 654ce0a..a11e156 100644 Binary files a/lib/libskip_list_on_raft.a and b/lib/libskip_list_on_raft.a differ diff --git a/src/common/include/config.h b/src/common/include/config.h index 173fce5..5024c33 100644 --- a/src/common/include/config.h +++ b/src/common/include/config.h @@ -5,18 +5,15 @@ #ifndef CONFIG_H #define CONFIG_H - const bool Debug = true; const int debugMul = 1; -const int HeartBeatTimeout = 25 *debugMul;//心跳时间应该要比选举超时小一个数量级 -const int ApplyInterval = 10 *debugMul;//time.Millisecond - -const int minRandomizedElectionTime = 300 * debugMul; //ms -const int maxRandomizedElectionTime = 500 * debugMul; //ms - +const int HeartBeatTimeout = 25 * debugMul; //心跳时间应该要比选举超时小一个数量级 +const int ApplyInterval = 10 * debugMul; // time.Millisecond -const int CONSENSUS_TIMEOUT = 500 * debugMul;//ms +const int minRandomizedElectionTime = 300 * debugMul; // ms +const int maxRandomizedElectionTime = 500 * debugMul; // ms +const int CONSENSUS_TIMEOUT = 500 * debugMul; // ms -#endif //CONFIG_H +#endif // CONFIG_H diff --git a/src/common/include/util.h b/src/common/include/util.h index fb3a547..9903159 100644 --- a/src/common/include/util.h +++ b/src/common/include/util.h @@ -2,38 +2,34 @@ #ifndef UTIL_H #define UTIL_H - +#include +#include +#include +#include +#include +#include +#include // pthread_condition_t #include #include +#include // pthread_mutex_t #include -#include -#include // pthread_mutex_t -#include // pthread_condition_t +#include #include +#include #include "config.h" -#include -#include -#include -#include -#include -#include -#include -#include template class DeferClass { -public: - DeferClass(F&& f) : m_func(std::forward(f)) {} - DeferClass(const F& f) : m_func(f) {} - ~DeferClass() { - m_func(); - } + public: + DeferClass(F&& f) : m_func(std::forward(f)) {} + DeferClass(const F& f) : m_func(f) {} + ~DeferClass() { m_func(); } - DeferClass(const DeferClass& e) = delete; - DeferClass& operator=(const DeferClass& e) = delete; + DeferClass(const DeferClass& e) = delete; + DeferClass& operator=(const DeferClass& e) = delete; -private: - F m_func; + private: + F m_func; }; #define _CONCAT(a, b) a##b @@ -47,13 +43,11 @@ void DPrintf(const char* format, ...); void myAssert(bool condition, std::string message = "Assertion failed!"); template -std::string format(const char *format_str, Args... args) -{ - std::stringstream ss; - int _[] = {((ss << args), 0)...}; - (void)_; - return ss.str(); - +std::string format(const char* format_str, Args... args) { + std::stringstream ss; + int _[] = {((ss << args), 0)...}; + (void)_; + return ss.str(); } std::chrono::_V2::system_clock::time_point now(); @@ -61,124 +55,122 @@ std::chrono::_V2::system_clock::time_point now(); std::chrono::milliseconds getRandomizedElectionTimeout(); void sleepNMilliseconds(int N); - // ////////////////////////异步写日志的日志队列 -//read is blocking!!! LIKE go chan -template -class LockQueue -{ -public: - // 多个worker线程都会写日志queue - void Push(const T &data) - { - std::lock_guard lock(m_mutex); //使用lock_gurad,即RAII的思想保证锁正确释放 - m_queue.push(data); - m_condvariable.notify_one(); +// read is blocking!!! LIKE go chan +template +class LockQueue { + public: + // 多个worker线程都会写日志queue + void Push(const T& data) { + std::lock_guard lock(m_mutex); //使用lock_gurad,即RAII的思想保证锁正确释放 + m_queue.push(data); + m_condvariable.notify_one(); + } + + // 一个线程读日志queue,写日志文件 + T Pop() { + std::unique_lock lock(m_mutex); + while (m_queue.empty()) { + // 日志队列为空,线程进入wait状态 + m_condvariable.wait(lock); //这里用unique_lock是因为lock_guard不支持解锁,而unique_lock支持 } - - // 一个线程读日志queue,写日志文件 - T Pop() - { - std::unique_lock lock(m_mutex); - while (m_queue.empty()) - { - // 日志队列为空,线程进入wait状态 - m_condvariable.wait(lock);//这里用unique_lock是因为lock_guard不支持解锁,而unique_lock支持 - } - T data = m_queue.front(); - m_queue.pop(); - return data; + T data = m_queue.front(); + m_queue.pop(); + return data; + } + + bool timeOutPop(int timeout, T* ResData) // 添加一个超时时间参数,默认为 50 毫秒 + { + std::unique_lock lock(m_mutex); + + // 获取当前时间点,并计算出超时时刻 + auto now = std::chrono::system_clock::now(); + auto timeout_time = now + std::chrono::milliseconds(timeout); + + // 在超时之前,不断检查队列是否为空 + while (m_queue.empty()) { + // 如果已经超时了,就返回一个空对象 + if (m_condvariable.wait_until(lock, timeout_time) == std::cv_status::timeout) { + return false; + } else { + continue; + } } - bool timeOutPop(int timeout ,T* ResData) // 添加一个超时时间参数,默认为 50 毫秒 - { - std::unique_lock lock(m_mutex); - - // 获取当前时间点,并计算出超时时刻 - auto now = std::chrono::system_clock::now(); - auto timeout_time = now + std::chrono::milliseconds(timeout); - - // 在超时之前,不断检查队列是否为空 - while (m_queue.empty()) { - // 如果已经超时了,就返回一个空对象 - if (m_condvariable.wait_until(lock, timeout_time) == std::cv_status::timeout) { - return false; - }else{ - continue; - } - } - - T data = m_queue.front(); - m_queue.pop(); - *ResData = data; - return true; - } -private: - std::queue m_queue; - std::mutex m_mutex; - std::condition_variable m_condvariable; + T data = m_queue.front(); + m_queue.pop(); + *ResData = data; + return true; + } + + private: + std::queue m_queue; + std::mutex m_mutex; + std::condition_variable m_condvariable; }; // 两个对锁的管理用到了RAII的思想,防止中途出现问题而导致资源无法释放的问题!!! -// std::lock_guard 和 std::unique_lock 都是 C++11 中用来管理互斥锁的工具类,它们都封装了 RAII(Resource Acquisition Is Initialization)技术,使得互斥锁在需要时自动加锁,在不需要时自动解锁,从而避免了很多手动加锁和解锁的繁琐操作。 -// std::lock_guard 是一个模板类,它的模板参数是一个互斥量类型。当创建一个 std::lock_guard 对象时,它会自动地对传入的互斥量进行加锁操作,并在该对象被销毁时对互斥量进行自动解锁操作。std::lock_guard 不能手动释放锁,因为其所提供的锁的生命周期与其绑定对象的生命周期一致。 -// std::unique_lock 也是一个模板类,同样的,其模板参数也是互斥量类型。不同的是,std::unique_lock 提供了更灵活的锁管理功能。可以通过 lock()、unlock()、try_lock() 等方法手动控制锁的状态。当然,std::unique_lock 也支持 RAII 技术,即在对象被销毁时会自动解锁。另外, std::unique_lock 还支持超时等待和可中断等待的操作。 - +// std::lock_guard 和 std::unique_lock 都是 C++11 中用来管理互斥锁的工具类,它们都封装了 RAII(Resource Acquisition Is +// Initialization)技术,使得互斥锁在需要时自动加锁,在不需要时自动解锁,从而避免了很多手动加锁和解锁的繁琐操作。 +// std::lock_guard 是一个模板类,它的模板参数是一个互斥量类型。当创建一个 std::lock_guard +// 对象时,它会自动地对传入的互斥量进行加锁操作,并在该对象被销毁时对互斥量进行自动解锁操作。std::lock_guard +// 不能手动释放锁,因为其所提供的锁的生命周期与其绑定对象的生命周期一致。 std::unique_lock +// 也是一个模板类,同样的,其模板参数也是互斥量类型。不同的是,std::unique_lock 提供了更灵活的锁管理功能。可以通过 +// lock()、unlock()、try_lock() 等方法手动控制锁的状态。当然,std::unique_lock 也支持 RAII +// 技术,即在对象被销毁时会自动解锁。另外, std::unique_lock 还支持超时等待和可中断等待的操作。 // 这个Op是kv传递给raft的command class Op { -public: - // Your definitions here. - // Field names must start with capital letters, - // otherwise RPC will break. - std::string Operation; // "Get" "Put" "Append" - std::string Key; - std::string Value; - std::string ClientId; //客户端号码 - int RequestId; //客户端号码请求的Request的序列号,为了保证线性一致性 - // IfDuplicate bool // Duplicate command can't be applied twice , but only for PUT and APPEND - -public: - //todo - //为了协调raftRPC中的command只设置成了string,这个的限制就是正常字符中不能包含| - //当然后期可以换成更高级的序列化方法,比如protobuf - std::string asString() const { - std::stringstream ss; - boost::archive::text_oarchive oa(ss); - - - // write class instance to archive - oa << *this; - // close archive - - return ss.str(); - } - - bool parseFromString(std::string str) { - std::stringstream iss(str); - boost::archive::text_iarchive ia(iss); - // read class state from archive - ia >> *this; - return true; //todo : 解析失敗如何處理,要看一下boost庫了 - } - -public: - friend std::ostream& operator<<(std::ostream& os, const Op& obj) { - os << "[MyClass:Operation{"+obj.Operation+"},Key{"+obj.Key+"},Value{"+obj.Value +"},ClientId{"+obj.ClientId+"},RequestId{"+std::to_string( obj.RequestId)+"}"; // 在这里实现自定义的输出格式 - return os; - } + public: + // Your definitions here. + // Field names must start with capital letters, + // otherwise RPC will break. + std::string Operation; // "Get" "Put" "Append" + std::string Key; + std::string Value; + std::string ClientId; //客户端号码 + int RequestId; //客户端号码请求的Request的序列号,为了保证线性一致性 + // IfDuplicate bool // Duplicate command can't be applied twice , but only for PUT and APPEND + + public: + // todo + //为了协调raftRPC中的command只设置成了string,这个的限制就是正常字符中不能包含| + //当然后期可以换成更高级的序列化方法,比如protobuf + std::string asString() const { + std::stringstream ss; + boost::archive::text_oarchive oa(ss); + // write class instance to archive + oa << *this; + // close archive -private: - friend class boost::serialization::access; - template - void serialize(Archive & ar, const unsigned int version) - { - ar & Operation; - ar & Key; - ar & Value; - ar & ClientId; - ar & RequestId; - } + return ss.str(); + } + + bool parseFromString(std::string str) { + std::stringstream iss(str); + boost::archive::text_iarchive ia(iss); + // read class state from archive + ia >> *this; + return true; // todo : 解析失敗如何處理,要看一下boost庫了 + } + + public: + friend std::ostream& operator<<(std::ostream& os, const Op& obj) { + os << "[MyClass:Operation{" + obj.Operation + "},Key{" + obj.Key + "},Value{" + obj.Value + "},ClientId{" + + obj.ClientId + "},RequestId{" + std::to_string(obj.RequestId) + "}"; // 在这里实现自定义的输出格式 + return os; + } + + private: + friend class boost::serialization::access; + template + void serialize(Archive& ar, const unsigned int version) { + ar& Operation; + ar& Key; + ar& Value; + ar& ClientId; + ar& RequestId; + } }; ///////////////////////////////////////////////kvserver reply err to clerk @@ -189,24 +181,22 @@ const std::string ErrWrongLeader = "ErrWrongLeader"; ////////////////////////////////////获取可用端口 - bool isReleasePort(unsigned short usPort); bool getReleasePort(short& port); -//int main(int argc, char** argv) +// int main(int argc, char** argv) //{ -// short port = 9060; -// if(getReleasePort(port)) //在port的基础上获取一个可用的port -// { -// std::cout << "可用的端口号为:" << port << std::endl; -// } -// else -// { -// std::cout << "获取可用端口号失败!" << std::endl; -// } -// return 0; -//} - - -#endif // UTIL_H \ No newline at end of file +// short port = 9060; +// if(getReleasePort(port)) //在port的基础上获取一个可用的port +// { +// std::cout << "可用的端口号为:" << port << std::endl; +// } +// else +// { +// std::cout << "获取可用端口号失败!" << std::endl; +// } +// return 0; +// } + +#endif // UTIL_H \ No newline at end of file diff --git a/src/common/util.cpp b/src/common/util.cpp index c91bed1..36ca38f 100644 --- a/src/common/util.cpp +++ b/src/common/util.cpp @@ -1,74 +1,68 @@ #include "util.h" -#include #include +#include +#include #include #include -#include void myAssert(bool condition, std::string message) { - if (!condition) { - std::cerr << "Error: " << message << std::endl; - std::exit(EXIT_FAILURE); - } + if (!condition) { + std::cerr << "Error: " << message << std::endl; + std::exit(EXIT_FAILURE); + } } - -std::chrono::_V2::system_clock::time_point now() { - return std::chrono::high_resolution_clock::now(); -} +std::chrono::_V2::system_clock::time_point now() { return std::chrono::high_resolution_clock::now(); } std::chrono::milliseconds getRandomizedElectionTimeout() { - std::random_device rd; - std::mt19937 rng(rd()); - std::uniform_int_distribution dist(minRandomizedElectionTime, maxRandomizedElectionTime); + std::random_device rd; + std::mt19937 rng(rd()); + std::uniform_int_distribution dist(minRandomizedElectionTime, maxRandomizedElectionTime); - return std::chrono::milliseconds(dist(rng)); + return std::chrono::milliseconds(dist(rng)); } -void sleepNMilliseconds(int N) { - std::this_thread::sleep_for(std::chrono::milliseconds(N)); -}; - +void sleepNMilliseconds(int N) { std::this_thread::sleep_for(std::chrono::milliseconds(N)); }; bool getReleasePort(short &port) { - short num = 0; - while (!isReleasePort(port) && num < 30) { - ++port; - ++num; - } - if (num >= 30) { - port = -1; - return false; - } - return true; + short num = 0; + while (!isReleasePort(port) && num < 30) { + ++port; + ++num; + } + if (num >= 30) { + port = -1; + return false; + } + return true; } bool isReleasePort(unsigned short usPort) { - int s = socket(AF_INET, SOCK_STREAM, IPPROTO_IP); - sockaddr_in addr; - addr.sin_family = AF_INET; - addr.sin_port = htons(usPort); - addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK); - int ret = ::bind(s, (sockaddr *) &addr, sizeof(addr)); - if (ret != 0) { - close(s); - return false; - } + int s = socket(AF_INET, SOCK_STREAM, IPPROTO_IP); + sockaddr_in addr; + addr.sin_family = AF_INET; + addr.sin_port = htons(usPort); + addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK); + int ret = ::bind(s, (sockaddr *)&addr, sizeof(addr)); + if (ret != 0) { close(s); - return true; + return false; + } + close(s); + return true; } void DPrintf(const char *format, ...) { - if (Debug) { - // 获取当前的日期,然后取日志信息,写入相应的日志文件当中 a+ - time_t now = time(nullptr); - tm *nowtm = localtime(&now); - va_list args; - va_start(args, format); - std::printf("[%d-%d-%d-%d-%d-%d] ", nowtm->tm_year + 1900, nowtm->tm_mon + 1, nowtm->tm_mday, nowtm->tm_hour, - nowtm->tm_min, nowtm->tm_sec); - std::vprintf(format, args); - std::printf("\n"); - va_end(args); - } + if (Debug) { + // 获取当前的日期,然后取日志信息,写入相应的日志文件当中 a+ + time_t now = time(nullptr); + tm *nowtm = localtime(&now); + va_list args; + va_start(args, format); + std::printf("[%d-%d-%d-%d-%d-%d] ", nowtm->tm_year + 1900, nowtm->tm_mon + 1, nowtm->tm_mday, nowtm->tm_hour, + nowtm->tm_min, nowtm->tm_sec); + std::vprintf(format, args); + std::printf("\n"); + va_end(args); + } } diff --git a/src/raftClerk/clerk.cpp b/src/raftClerk/clerk.cpp index c142fd3..08cf24c 100644 --- a/src/raftClerk/clerk.cpp +++ b/src/raftClerk/clerk.cpp @@ -10,94 +10,94 @@ #include #include std::string Clerk::Get(std::string key) { - m_requestId++; - auto requestId = m_requestId; - int server = m_recentLeaderId; - raftKVRpcProctoc::GetArgs args; - args.set_key(key); - args.set_clientid(m_clientId); - args.set_requestid(requestId); + m_requestId++; + auto requestId = m_requestId; + int server = m_recentLeaderId; + raftKVRpcProctoc::GetArgs args; + args.set_key(key); + args.set_clientid(m_clientId); + args.set_requestid(requestId); - while (true) { - raftKVRpcProctoc::GetReply reply; - bool ok = m_servers[server]->Get(&args, &reply); - if (!ok || reply.err() == ErrWrongLeader) {//会一直重试,因为requestId没有改变,因此可能会因为RPC的丢失或者其他情况导致重试,kvserver层来保证不重复执行(线性一致性) - server = (server + 1) % m_servers.size(); - continue; - } - if(reply.err() == ErrNoKey){ - return ""; - } - if(reply.err() == OK){ - m_recentLeaderId = server; - return reply .value(); - } + while (true) { + raftKVRpcProctoc::GetReply reply; + bool ok = m_servers[server]->Get(&args, &reply); + if (!ok || + reply.err() == + ErrWrongLeader) { //会一直重试,因为requestId没有改变,因此可能会因为RPC的丢失或者其他情况导致重试,kvserver层来保证不重复执行(线性一致性) + server = (server + 1) % m_servers.size(); + continue; + } + if (reply.err() == ErrNoKey) { + return ""; } - return ""; + if (reply.err() == OK) { + m_recentLeaderId = server; + return reply.value(); + } + } + return ""; } void Clerk::PutAppend(std::string key, std::string value, std::string op) { - // You will have to modify this function. - m_requestId++; - auto requestId = m_requestId; - auto server = m_recentLeaderId; - while (true){ - raftKVRpcProctoc::PutAppendArgs args; - args.set_key(key);args.set_value(value);args.set_op(op);args.set_clientid(m_clientId);args.set_requestid(requestId); - raftKVRpcProctoc::PutAppendReply reply; - bool ok = m_servers[server]->PutAppend(&args,&reply); - if(!ok || reply.err()==ErrWrongLeader){ - - DPrintf("【Clerk::PutAppend】原以为的leader:{%d}请求失败,向新leader{%d}重试 ,操作:{%s}",server,server+1,op.c_str()); - if(!ok){ - DPrintf("重试原因 ,rpc失敗 ,"); - } - if(reply.err()==ErrWrongLeader){ - DPrintf("重試原因:非leader"); - } - server = (server+1)%m_servers.size(); // try the next server - continue; - } - if(reply.err()==OK){ //什么时候reply errno为ok呢??? - m_recentLeaderId = server; - return ; - } + // You will have to modify this function. + m_requestId++; + auto requestId = m_requestId; + auto server = m_recentLeaderId; + while (true) { + raftKVRpcProctoc::PutAppendArgs args; + args.set_key(key); + args.set_value(value); + args.set_op(op); + args.set_clientid(m_clientId); + args.set_requestid(requestId); + raftKVRpcProctoc::PutAppendReply reply; + bool ok = m_servers[server]->PutAppend(&args, &reply); + if (!ok || reply.err() == ErrWrongLeader) { + DPrintf("【Clerk::PutAppend】原以为的leader:{%d}请求失败,向新leader{%d}重试 ,操作:{%s}", server, server + 1, + op.c_str()); + if (!ok) { + DPrintf("重试原因 ,rpc失敗 ,"); + } + if (reply.err() == ErrWrongLeader) { + DPrintf("重試原因:非leader"); + } + server = (server + 1) % m_servers.size(); // try the next server + continue; + } + if (reply.err() == OK) { //什么时候reply errno为ok呢??? + m_recentLeaderId = server; + return; } + } } -void Clerk::Put(std::string key, std::string value) { - PutAppend(key, value, "Put"); -} +void Clerk::Put(std::string key, std::string value) { PutAppend(key, value, "Put"); } -void Clerk::Append(std::string key, std::string value) { - PutAppend(key, value, "Append"); -} +void Clerk::Append(std::string key, std::string value) { PutAppend(key, value, "Append"); } //初始化客户端 void Clerk::Init(std::string configFileName) { - //获取所有raft节点ip、port ,并进行连接 - MprpcConfig config; - config.LoadConfigFile(configFileName.c_str()); - std::vector> ipPortVt; - for (int i = 0; i < INT_MAX - 1 ; ++i) { - std::string node = "node" + std::to_string(i); + //获取所有raft节点ip、port ,并进行连接 + MprpcConfig config; + config.LoadConfigFile(configFileName.c_str()); + std::vector> ipPortVt; + for (int i = 0; i < INT_MAX - 1; ++i) { + std::string node = "node" + std::to_string(i); - std::string nodeIp = config.Load(node+"ip"); - std::string nodePortStr = config.Load(node+"port"); - if(nodeIp.empty()){ - break; - } - ipPortVt.emplace_back(nodeIp, atoi(nodePortStr.c_str())); //沒有atos方法,可以考慮自己实现 + std::string nodeIp = config.Load(node + "ip"); + std::string nodePortStr = config.Load(node + "port"); + if (nodeIp.empty()) { + break; } - //进行连接 - for (const auto &item:ipPortVt){ - std::string ip = item.first; short port = item.second; - //2024-01-04 todo:bug fix - auto* rpc = new raftServerRpcUtil(ip,port); - m_servers.push_back(std::shared_ptr(rpc)); - } -} - -Clerk::Clerk() :m_clientId(Uuid()),m_requestId(0),m_recentLeaderId(0){ - + ipPortVt.emplace_back(nodeIp, atoi(nodePortStr.c_str())); //沒有atos方法,可以考慮自己实现 + } + //进行连接 + for (const auto& item : ipPortVt) { + std::string ip = item.first; + short port = item.second; + // 2024-01-04 todo:bug fix + auto* rpc = new raftServerRpcUtil(ip, port); + m_servers.push_back(std::shared_ptr(rpc)); + } } +Clerk::Clerk() : m_clientId(Uuid()), m_requestId(0), m_recentLeaderId(0) {} diff --git a/src/raftClerk/include/clerk.h b/src/raftClerk/include/clerk.h index 1b0df7b..623d1c2 100644 --- a/src/raftClerk/include/clerk.h +++ b/src/raftClerk/include/clerk.h @@ -4,45 +4,42 @@ #ifndef SKIP_LIST_ON_RAFT_CLERK_H #define SKIP_LIST_ON_RAFT_CLERK_H -#include -#include #include #include +#include +#include +#include #include #include -#include #include #include #include "kvServerRPC.pb.h" #include "mprpcconfig.h" -class Clerk{ - private: - - std::vector> m_servers; //保存所有raft节点的fd //todo:全部初始化为-1,表示没有连接上 - std::string m_clientId; - int m_requestId; - int m_recentLeaderId ; //只是有可能是领导 - - std::string Uuid(){ - return std::to_string(rand()) + std::to_string(rand()) + std::to_string(rand()) + std::to_string(rand()); - }//用于返回随机的clientId - -// MakeClerk todo - void PutAppend(std::string key ,std::string value ,std::string op ); -public: - //对外暴露的三个功能和初始化 - void Init(std::string configFileName); - std::string Get(std::string key ) ; - - void Put(std::string key , std::string value ); - void Append(std::string key , std::string value ); -public: - Clerk(); +class Clerk { + private: + std::vector> + m_servers; //保存所有raft节点的fd //todo:全部初始化为-1,表示没有连接上 + std::string m_clientId; + int m_requestId; + int m_recentLeaderId; //只是有可能是领导 + + std::string Uuid() { + return std::to_string(rand()) + std::to_string(rand()) + std::to_string(rand()) + std::to_string(rand()); + } //用于返回随机的clientId + + // MakeClerk todo + void PutAppend(std::string key, std::string value, std::string op); + + public: + //对外暴露的三个功能和初始化 + void Init(std::string configFileName); + std::string Get(std::string key); + + void Put(std::string key, std::string value); + void Append(std::string key, std::string value); + + public: + Clerk(); }; - - - - - -#endif //SKIP_LIST_ON_RAFT_CLERK_H +#endif // SKIP_LIST_ON_RAFT_CLERK_H diff --git a/src/raftClerk/include/raftServerRpcUtil.h b/src/raftClerk/include/raftServerRpcUtil.h index 5902a5c..959e43e 100644 --- a/src/raftClerk/include/raftServerRpcUtil.h +++ b/src/raftClerk/include/raftServerRpcUtil.h @@ -6,31 +6,26 @@ #define RAFTSERVERRPC_H #include -#include "mprpcchannel.h" #include "kvServerRPC.pb.h" -#include "rpcprovider.h" +#include "mprpcchannel.h" #include "mprpccontroller.h" +#include "rpcprovider.h" /// @brief 维护当前节点对其他某一个结点的所有rpc通信,包括接收其他节点的rpc和发送 // 对于一个节点来说,对于任意其他的节点都要维护一个rpc连接, -class raftServerRpcUtil -{ -private: - raftKVRpcProctoc::kvServerRpc_Stub *stub; +class raftServerRpcUtil { + private: + raftKVRpcProctoc::kvServerRpc_Stub* stub; -public: - //主动调用其他节点的三个方法,可以按照mit6824来调用,但是别的节点调用自己的好像就不行了,要继承protoc提供的service类才行 + public: + //主动调用其他节点的三个方法,可以按照mit6824来调用,但是别的节点调用自己的好像就不行了,要继承protoc提供的service类才行 - //响应其他节点的方法 - bool Get(raftKVRpcProctoc::GetArgs* GetArgs,raftKVRpcProctoc::GetReply* reply); - bool PutAppend(raftKVRpcProctoc::PutAppendArgs* args,raftKVRpcProctoc::PutAppendReply* reply); + //响应其他节点的方法 + bool Get(raftKVRpcProctoc::GetArgs* GetArgs, raftKVRpcProctoc::GetReply* reply); + bool PutAppend(raftKVRpcProctoc::PutAppendArgs* args, raftKVRpcProctoc::PutAppendReply* reply); - raftServerRpcUtil(std::string ip, short port); - ~raftServerRpcUtil(); + raftServerRpcUtil(std::string ip, short port); + ~raftServerRpcUtil(); }; - - - - -#endif //RAFTSERVERRPC_H +#endif // RAFTSERVERRPC_H diff --git a/src/raftClerk/raftServerRpcUtil.cpp b/src/raftClerk/raftServerRpcUtil.cpp index 44d6f1c..0770bca 100644 --- a/src/raftClerk/raftServerRpcUtil.cpp +++ b/src/raftClerk/raftServerRpcUtil.cpp @@ -3,34 +3,29 @@ // #include "raftServerRpcUtil.h" - - -//kvserver不同于raft节点之间,kvserver的rpc是用于clerk向kvserver调用,不会被调用,因此只用写caller功能,不用写callee功能 +// kvserver不同于raft节点之间,kvserver的rpc是用于clerk向kvserver调用,不会被调用,因此只用写caller功能,不用写callee功能 //先开启服务器,再尝试连接其他的节点,中间给一个间隔时间,等待其他的rpc服务器节点启动 -raftServerRpcUtil::raftServerRpcUtil(std::string ip,short port) -{ - //********************************************* */ - // 接收rpc设置 - //********************************************* */ - //发送rpc设置 - stub = new raftKVRpcProctoc::kvServerRpc_Stub(new MprpcChannel(ip, port,false)); +raftServerRpcUtil::raftServerRpcUtil(std::string ip, short port) { + //********************************************* */ + // 接收rpc设置 + //********************************************* */ + //发送rpc设置 + stub = new raftKVRpcProctoc::kvServerRpc_Stub(new MprpcChannel(ip, port, false)); } -raftServerRpcUtil::~raftServerRpcUtil() { - delete stub; -} +raftServerRpcUtil::~raftServerRpcUtil() { delete stub; } bool raftServerRpcUtil::Get(raftKVRpcProctoc::GetArgs *GetArgs, raftKVRpcProctoc::GetReply *reply) { - MprpcController controller; - stub->Get(&controller, GetArgs, reply, nullptr); - return !controller.Failed(); + MprpcController controller; + stub->Get(&controller, GetArgs, reply, nullptr); + return !controller.Failed(); } bool raftServerRpcUtil::PutAppend(raftKVRpcProctoc::PutAppendArgs *args, raftKVRpcProctoc::PutAppendReply *reply) { - MprpcController controller; - stub->PutAppend(&controller, args, reply, nullptr); - if(controller.Failed()){ - std::cout<PutAppend(&controller, args, reply, nullptr); + if (controller.Failed()) { + std::cout << controller.ErrorText() << endl; + } + return !controller.Failed(); } diff --git a/src/raftCore/Persister.cpp b/src/raftCore/Persister.cpp index 6fab117..3ece02a 100644 --- a/src/raftCore/Persister.cpp +++ b/src/raftCore/Persister.cpp @@ -3,80 +3,80 @@ // #include "Persister.h" #include "util.h" -//todo:如果文件出现问题会怎么办?? +// todo:如果文件出现问题会怎么办?? void Persister::Save(const std::string raftstate, const std::string snapshot) { - std::lock_guard lg(mtx); - // 将raftstate和snapshot写入本地文件 - std::ofstream outfile; - m_raftStateOutStream << raftstate; - m_snapshotOutStream << snapshot; + std::lock_guard lg(mtx); + // 将raftstate和snapshot写入本地文件 + std::ofstream outfile; + m_raftStateOutStream << raftstate; + m_snapshotOutStream << snapshot; } std::string Persister::ReadSnapshot() { - std::lock_guard lg(mtx); - if (m_snapshotOutStream.is_open()) { - m_snapshotOutStream.close(); - } -// Defer ec1([this]()-> void { -// this->m_snapshotOutStream.open(snapshotFile); -// }); //这个变量后生成,会先销毁 - DEFER { - m_snapshotOutStream.open(snapshotFile); - }; //这个变量后生成,会先销毁 - std::fstream ifs(snapshotFile, std::ios_base::in); - if (!ifs.good()) { - return ""; - } - std::string snapshot; - ifs >> snapshot; - ifs.close(); - return snapshot; + std::lock_guard lg(mtx); + if (m_snapshotOutStream.is_open()) { + m_snapshotOutStream.close(); + } + // Defer ec1([this]()-> void { + // this->m_snapshotOutStream.open(snapshotFile); + // }); //这个变量后生成,会先销毁 + DEFER { m_snapshotOutStream.open(snapshotFile); }; //这个变量后生成,会先销毁 + std::fstream ifs(snapshotFile, std::ios_base::in); + if (!ifs.good()) { + return ""; + } + std::string snapshot; + ifs >> snapshot; + ifs.close(); + return snapshot; } void Persister::SaveRaftState(const std::string &data) { - std::lock_guard lg(mtx); - // 将raftstate和snapshot写入本地文件 - m_raftStateOutStream << data; + std::lock_guard lg(mtx); + // 将raftstate和snapshot写入本地文件 + m_raftStateOutStream << data; } long long Persister::RaftStateSize() { - std::lock_guard lg(mtx); + std::lock_guard lg(mtx); - return m_raftStateSize; + return m_raftStateSize; } std::string Persister::ReadRaftState() { - std::lock_guard lg(mtx); + std::lock_guard lg(mtx); - std::fstream ifs(raftStateFile, std::ios_base::in); - if (!ifs.good()) { - return ""; - } - std::string snapshot; - ifs >> snapshot; - ifs.close(); - return snapshot; + std::fstream ifs(raftStateFile, std::ios_base::in); + if (!ifs.good()) { + return ""; + } + std::string snapshot; + ifs >> snapshot; + ifs.close(); + return snapshot; } -Persister::Persister(int me) : raftStateFile("raftstatePersist" + std::to_string(me) + ".txt"), - snapshotFile("snapshotPersist" + std::to_string(me) + ".txt"), m_raftStateSize(0) { - std::fstream file(raftStateFile, std::ios::out | std::ios::trunc); - if (file.is_open()) { - file.close(); - } - file = std::fstream(snapshotFile, std::ios::out | std::ios::trunc); - if (file.is_open()) { - file.close(); - } - m_raftStateOutStream.open(raftStateFile); - m_snapshotOutStream.open(snapshotFile); +Persister::Persister(int me) + : raftStateFile("raftstatePersist" + std::to_string(me) + ".txt"), + snapshotFile("snapshotPersist" + std::to_string(me) + ".txt"), + m_raftStateSize(0) { + std::fstream file(raftStateFile, std::ios::out | std::ios::trunc); + if (file.is_open()) { + file.close(); + } + file = std::fstream(snapshotFile, std::ios::out | std::ios::trunc); + if (file.is_open()) { + file.close(); + } + m_raftStateOutStream.open(raftStateFile); + m_snapshotOutStream.open(snapshotFile); } Persister::~Persister() { - if (m_raftStateOutStream.is_open()) { - m_raftStateOutStream.close(); - } - if (m_snapshotOutStream.is_open()) { - m_snapshotOutStream.close(); - } + if (m_raftStateOutStream.is_open()) { + m_raftStateOutStream.close(); + } + if (m_snapshotOutStream.is_open()) { + m_snapshotOutStream.close(); + } } diff --git a/src/raftCore/include/ApplyMsg.h b/src/raftCore/include/ApplyMsg.h index 610a3cc..197465a 100644 --- a/src/raftCore/include/ApplyMsg.h +++ b/src/raftCore/include/ApplyMsg.h @@ -1,23 +1,27 @@ #ifndef APPLYMSG_H #define APPLYMSG_H #include -class ApplyMsg -{ -public: - bool CommandValid; - std::string Command; - int CommandIndex; - bool SnapshotValid; - std::string Snapshot; - int SnapshotTerm; - int SnapshotIndex; +class ApplyMsg { + public: + bool CommandValid; + std::string Command; + int CommandIndex; + bool SnapshotValid; + std::string Snapshot; + int SnapshotTerm; + int SnapshotIndex; -public: - //两个valid最开始要赋予false!! - ApplyMsg():CommandValid(false),Command(),CommandIndex(-1),SnapshotValid(false),SnapshotTerm(-1),SnapshotIndex(-1){ + public: + //两个valid最开始要赋予false!! + ApplyMsg() + : CommandValid(false), + Command(), + CommandIndex(-1), + SnapshotValid(false), + SnapshotTerm(-1), + SnapshotIndex(-1){ - }; + }; }; - -#endif //APPLYMSG_H \ No newline at end of file +#endif // APPLYMSG_H \ No newline at end of file diff --git a/src/raftCore/include/Persister.h b/src/raftCore/include/Persister.h index 898115c..695f9b9 100644 --- a/src/raftCore/include/Persister.h +++ b/src/raftCore/include/Persister.h @@ -4,27 +4,26 @@ #ifndef SKIP_LIST_ON_RAFT_PERSISTER_H #define SKIP_LIST_ON_RAFT_PERSISTER_H -#include #include -class Persister{ -private: - std::mutex mtx; - std::string m_raftState; - std::string m_snapshot; - const std::string raftStateFile; - const std::string snapshotFile; - std::ofstream m_raftStateOutStream; - std::ofstream m_snapshotOutStream; - long long m_raftStateSize; //避免每次都读取文件的具体大小 -public: - void Save(std::string raftstate , std::string snapshot ); - std::string ReadSnapshot(); - void SaveRaftState(const std::string& data); - long long RaftStateSize(); - std::string ReadRaftState(); - explicit Persister(int me); - ~Persister(); +#include +class Persister { + private: + std::mutex mtx; + std::string m_raftState; + std::string m_snapshot; + const std::string raftStateFile; + const std::string snapshotFile; + std::ofstream m_raftStateOutStream; + std::ofstream m_snapshotOutStream; + long long m_raftStateSize; //避免每次都读取文件的具体大小 + public: + void Save(std::string raftstate, std::string snapshot); + std::string ReadSnapshot(); + void SaveRaftState(const std::string& data); + long long RaftStateSize(); + std::string ReadRaftState(); + explicit Persister(int me); + ~Persister(); }; - -#endif //SKIP_LIST_ON_RAFT_PERSISTER_H +#endif // SKIP_LIST_ON_RAFT_PERSISTER_H diff --git a/src/raftCore/include/kvServer.h b/src/raftCore/include/kvServer.h index 5c90884..5f7c765 100644 --- a/src/raftCore/include/kvServer.h +++ b/src/raftCore/include/kvServer.h @@ -5,137 +5,130 @@ #ifndef SKIP_LIST_ON_RAFT_KVSERVER_H #define SKIP_LIST_ON_RAFT_KVSERVER_H +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include -#include "raft.h" #include #include "kvServerRPC.pb.h" +#include "raft.h" #include "skipList.h" -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - class KvServer : raftKVRpcProctoc::kvServerRpc { -private: - std::mutex m_mtx; - int m_me; - std::shared_ptr m_raftNode; - std::shared_ptr > applyChan; //kvServer和raft节点的通信管道 - int m_maxRaftState; // snapshot if log grows this big + private: + std::mutex m_mtx; + int m_me; + std::shared_ptr m_raftNode; + std::shared_ptr > applyChan; // kvServer和raft节点的通信管道 + int m_maxRaftState; // snapshot if log grows this big - // Your definitions here. - std::string m_serializedKVData; // todo : 序列化后的kv数据,理论上可以不用,但是目前没有找到特别好的替代方法 - SkipList m_skipList; - std::unordered_map m_kvDB; + // Your definitions here. + std::string m_serializedKVData; // todo : 序列化后的kv数据,理论上可以不用,但是目前没有找到特别好的替代方法 + SkipList m_skipList; + std::unordered_map m_kvDB; - std::unordered_map *> waitApplyCh; - // index(raft) -> chan //???字段含义 waitApplyCh是一个map,键是int,值是Op类型的管道 + std::unordered_map *> waitApplyCh; + // index(raft) -> chan //???字段含义 waitApplyCh是一个map,键是int,值是Op类型的管道 - std::unordered_map m_lastRequestId; // clientid -> requestID //一个kV服务器可能连接多个client + std::unordered_map m_lastRequestId; // clientid -> requestID //一个kV服务器可能连接多个client - // last SnapShot point , raftIndex - int m_lastSnapShotRaftLogIndex; + // last SnapShot point , raftIndex + int m_lastSnapShotRaftLogIndex; -public: - KvServer() = delete; + public: + KvServer() = delete; - KvServer(int me, int maxraftstate, std::string nodeInforFileName, short port); + KvServer(int me, int maxraftstate, std::string nodeInforFileName, short port); - void StartKVServer(); + void StartKVServer(); - void DprintfKVDB(); + void DprintfKVDB(); - void ExecuteAppendOpOnKVDB(Op op); + void ExecuteAppendOpOnKVDB(Op op); - void ExecuteGetOpOnKVDB(Op op, std::string *value, bool *exist); + void ExecuteGetOpOnKVDB(Op op, std::string *value, bool *exist); - void ExecutePutOpOnKVDB(Op op); + void ExecutePutOpOnKVDB(Op op); - void Get(const raftKVRpcProctoc::GetArgs *args, raftKVRpcProctoc::GetReply *reply); //将 GetArgs 改为rpc调用的,因为是远程客户端,即服务器宕机对客户端来说是无感的 - /** - * 從raft節點中獲取消息 (不要誤以爲是執行【GET】命令) - * @param message - */ - void GetCommandFromRaft(ApplyMsg message); + void Get(const raftKVRpcProctoc::GetArgs *args, + raftKVRpcProctoc::GetReply + *reply); //将 GetArgs 改为rpc调用的,因为是远程客户端,即服务器宕机对客户端来说是无感的 + /** + * 從raft節點中獲取消息 (不要誤以爲是執行【GET】命令) + * @param message + */ + void GetCommandFromRaft(ApplyMsg message); - bool ifRequestDuplicate(std::string ClientId, int RequestId); + bool ifRequestDuplicate(std::string ClientId, int RequestId); - // clerk 使用RPC远程调用 - void PutAppend(const raftKVRpcProctoc::PutAppendArgs *args, raftKVRpcProctoc::PutAppendReply *reply); + // clerk 使用RPC远程调用 + void PutAppend(const raftKVRpcProctoc::PutAppendArgs *args, raftKVRpcProctoc::PutAppendReply *reply); - ////一直等待raft传来的applyCh - void ReadRaftApplyCommandLoop(); + ////一直等待raft传来的applyCh + void ReadRaftApplyCommandLoop(); - void ReadSnapShotToInstall(std::string snapshot); + void ReadSnapShotToInstall(std::string snapshot); - bool SendMessageToWaitChan(const Op &op, int raftIndex); + bool SendMessageToWaitChan(const Op &op, int raftIndex); - // 检查是否需要制作快照,需要的话就向raft之下制作快照 - void IfNeedToSendSnapShotCommand(int raftIndex, int proportion); + // 检查是否需要制作快照,需要的话就向raft之下制作快照 + void IfNeedToSendSnapShotCommand(int raftIndex, int proportion); - // Handler the SnapShot from kv.rf.applyCh - void GetSnapShotFromRaft(ApplyMsg message); + // Handler the SnapShot from kv.rf.applyCh + void GetSnapShotFromRaft(ApplyMsg message); + std::string MakeSnapShot(); - std::string MakeSnapShot(); + public: // for rpc + void PutAppend(google::protobuf::RpcController *controller, const ::raftKVRpcProctoc::PutAppendArgs *request, + ::raftKVRpcProctoc::PutAppendReply *response, ::google::protobuf::Closure *done) override; -public: //for rpc - void PutAppend(google::protobuf::RpcController *controller, - const ::raftKVRpcProctoc::PutAppendArgs *request, - ::raftKVRpcProctoc::PutAppendReply *response, - ::google::protobuf::Closure *done) override; + void Get(google::protobuf::RpcController *controller, const ::raftKVRpcProctoc::GetArgs *request, + ::raftKVRpcProctoc::GetReply *response, ::google::protobuf::Closure *done) override; - void Get(google::protobuf::RpcController *controller, - const ::raftKVRpcProctoc::GetArgs *request, - ::raftKVRpcProctoc::GetReply *response, - ::google::protobuf::Closure *done) override; + /////////////////serialiazation start /////////////////////////////// + // notice : func serialize + private: + friend class boost::serialization::access; + // When the class Archive corresponds to an output archive, the + // & operator is defined similar to <<. Likewise, when the class Archive + // is a type of input archive the & operator is defined similar to >>. + template + void serialize(Archive &ar, const unsigned int version) //这里面写需要序列话和反序列化的字段 + { + ar &m_serializedKVData; - /////////////////serialiazation start /////////////////////////////// - //notice : func serialize -private: - friend class boost::serialization::access; + // ar & m_kvDB; + ar &m_lastRequestId; + } - // When the class Archive corresponds to an output archive, the - // & operator is defined similar to <<. Likewise, when the class Archive - // is a type of input archive the & operator is defined similar to >>. - template - void serialize(Archive &ar, const unsigned int version) //这里面写需要序列话和反序列化的字段 - { - ar & m_serializedKVData; + std::string getSnapshotData() { + m_serializedKVData = m_skipList.dump_file(); + std::stringstream ss; + boost::archive::text_oarchive oa(ss); + oa << *this; + m_serializedKVData.clear(); + return ss.str(); + } - // ar & m_kvDB; - ar & m_lastRequestId; - } + void parseFromString(const std::string &str) { + std::stringstream ss(str); + boost::archive::text_iarchive ia(ss); + ia >> *this; + m_skipList.load_file(m_serializedKVData); + m_serializedKVData.clear(); + } - std::string getSnapshotData() { - m_serializedKVData = m_skipList.dump_file(); - std::stringstream ss; - boost::archive::text_oarchive oa(ss); - oa << *this; - m_serializedKVData.clear(); - return ss.str(); - } - - void parseFromString(const std::string &str) { - std::stringstream ss(str); - boost::archive::text_iarchive ia(ss); - ia >> *this; - m_skipList.load_file(m_serializedKVData); - m_serializedKVData.clear(); - } - - /////////////////serialiazation end /////////////////////////////// + /////////////////serialiazation end /////////////////////////////// }; - -#endif //SKIP_LIST_ON_RAFT_KVSERVER_H +#endif // SKIP_LIST_ON_RAFT_KVSERVER_H diff --git a/src/raftCore/include/raft.h b/src/raftCore/include/raft.h index 1825813..839e805 100644 --- a/src/raftCore/include/raft.h +++ b/src/raftCore/include/raft.h @@ -1,177 +1,161 @@ #ifndef RAFT_H -#define RAFT_H +#define RAFT_H -#include "raftRpcUtil.h" -#include -#include +#include +#include #include +#include +#include +#include +#include +#include #include -#include "ApplyMsg.h" -#include "util.h" #include -#include "config.h" -#include -#include -#include -#include "boost/serialization/serialization.hpp" -#include "boost/any.hpp" -#include +#include "ApplyMsg.h" #include "Persister.h" -#include +#include "boost/any.hpp" +#include "boost/serialization/serialization.hpp" +#include "config.h" +#include "raftRpcUtil.h" +#include "util.h" /// @brief //////////// 网络状态表示 todo:可以在rpc中删除该字段,实际生产中是用不到的. -constexpr int Disconnected = 0; // 方便网络分区的时候debug,网络异常的时候为disconnected,只要网络正常就为AppNormal,防止matchIndex[]数组异常减小 +constexpr int Disconnected = + 0; // 方便网络分区的时候debug,网络异常的时候为disconnected,只要网络正常就为AppNormal,防止matchIndex[]数组异常减小 constexpr int AppNormal = 1; ///////////////投票状态 constexpr int Killed = 0; -constexpr int Voted = 1;//本轮已经投过票了 -constexpr int Expire = 2; //投票(消息、竞选者)过期 +constexpr int Voted = 1; //本轮已经投过票了 +constexpr int Expire = 2; //投票(消息、竞选者)过期 constexpr int Normal = 3; -class Raft : public raftRpcProctoc::raftRpc -{ - -private: - std::mutex m_mtx; - std::vector> m_peers; - std::shared_ptr m_persister; - int m_me; +class Raft : public raftRpcProctoc::raftRpc { + private: + std::mutex m_mtx; + std::vector> m_peers; + std::shared_ptr m_persister; + int m_me; + int m_currentTerm; + int m_votedFor; + std::vector m_logs; //// 日志条目数组,包含了状态机要执行的指令集,以及收到领导时的任期号 + // 这两个状态所有结点都在维护,易失 + int m_commitIndex; + int m_lastApplied; // 已经汇报给状态机(上层应用)的log 的index + + // 这两个状态是由服务器来维护,易失 + std::vector + m_nextIndex; // 这两个状态的下标1开始,因为通常commitIndex和lastApplied从0开始,应该是一个无效的index,因此下标从1开始 + std::vector m_matchIndex; + enum Status { Follower, Candidate, Leader }; + // 身份 + Status m_status; + + std::shared_ptr> applyChan; // client从这里取日志(2B),client与raft通信的接口 + // ApplyMsgQueue chan ApplyMsg // raft内部使用的chan,applyChan是用于和服务层交互,最后好像没用上 + + // 选举超时 + + std::chrono::_V2::system_clock::time_point m_lastResetElectionTime; + // 心跳超时,用于leader + std::chrono::_V2::system_clock::time_point m_lastResetHearBeatTime; + + // 2D中用于传入快照点 + // 储存了快照中的最后一个日志的Index和Term + int m_lastSnapshotIncludeIndex; + int m_lastSnapshotIncludeTerm; + + public: + void AppendEntries1(const raftRpcProctoc::AppendEntriesArgs *args, raftRpcProctoc::AppendEntriesReply *reply); + void applierTicker(); + bool CondInstallSnapshot(int lastIncludedTerm, int lastIncludedIndex, std::string snapshot); + void doElection(); + void doHeartBeat(); + // 每隔一段时间检查睡眠时间内有没有重置定时器,没有则说明超时了 + // 如果有则设置合适睡眠时间:睡眠到重置时间+超时时间 + void electionTimeOutTicker(); + std::vector getApplyLogs(); + int getNewCommandIndex(); + void getPrevLogInfo(int server, int *preIndex, int *preTerm); + void GetState(int *term, bool *isLeader); + void InstallSnapshot(const raftRpcProctoc::InstallSnapshotRequest *args, + raftRpcProctoc::InstallSnapshotResponse *reply); + void leaderHearBeatTicker(); + void leaderSendSnapShot(int server); + void leaderUpdateCommitIndex(); + bool matchLog(int logIndex, int logTerm); + void persist(); + void RequestVote(const raftRpcProctoc::RequestVoteArgs *args, raftRpcProctoc::RequestVoteReply *reply); + bool UpToDate(int index, int term); + int getLastLogIndex(); + void getLastLogIndexAndTerm(int *lastLogIndex, int *lastLogTerm); + int getLogTermFromLogIndex(int logIndex); + int GetRaftStateSize(); + int getSlicesIndexFromLogIndex(int logIndex); + + bool sendRequestVote(int server, std::shared_ptr args, + std::shared_ptr reply, std::shared_ptr votedNum); + bool sendAppendEntries(int server, std::shared_ptr args, + std::shared_ptr reply, std::shared_ptr appendNums); + + // rf.applyChan <- msg //不拿锁执行 可以单独创建一个线程执行,但是为了同意使用std:thread + // ,避免使用pthread_create,因此专门写一个函数来执行 + void pushMsgToKvServer(ApplyMsg msg); + void readPersist(std::string data); + std::string persistData(); + + void Start(Op command, int *newLogIndex, int *newLogTerm, bool *isLeader); + + // Snapshot the service says it has created a snapshot that has + // all info up to and including index. this means the + // service no longer needs the log through (and including) + // that index. Raft should now trim its log as much as possible. + // index代表是快照apply应用的index,而snapshot代表的是上层service传来的快照字节流,包括了Index之前的数据 + // 这个函数的目的是把安装到快照里的日志抛弃,并安装快照数据,同时更新快照下标,属于peers自身主动更新,与leader发送快照不冲突 + // 即服务层主动发起请求raft保存snapshot里面的数据,index是用来表示snapshot快照执行到了哪条命令 + void Snapshot(int index, std::string snapshot); + + public: + // 重写基类方法,因为rpc远程调用真正调用的是这个方法 + //序列化,反序列化等操作rpc框架都已经做完了,因此这里只需要获取值然后真正调用本地方法即可。 + void AppendEntries(google::protobuf::RpcController *controller, const ::raftRpcProctoc::AppendEntriesArgs *request, + ::raftRpcProctoc::AppendEntriesReply *response, ::google::protobuf::Closure *done) override; + void InstallSnapshot(google::protobuf::RpcController *controller, + const ::raftRpcProctoc::InstallSnapshotRequest *request, + ::raftRpcProctoc::InstallSnapshotResponse *response, ::google::protobuf::Closure *done) override; + void RequestVote(google::protobuf::RpcController *controller, const ::raftRpcProctoc::RequestVoteArgs *request, + ::raftRpcProctoc::RequestVoteReply *response, ::google::protobuf::Closure *done) override; + + public: + void init(std::vector> peers, int me, std::shared_ptr persister, + std::shared_ptr> applyCh); + + private: + // for persist + + class BoostPersistRaftNode { + public: + friend class boost::serialization::access; + // When the class Archive corresponds to an output archive, the + // & operator is defined similar to <<. Likewise, when the class Archive + // is a type of input archive the & operator is defined similar to >>. + template + void serialize(Archive &ar, const unsigned int version) { + ar &m_currentTerm; + ar &m_votedFor; + ar &m_lastSnapshotIncludeIndex; + ar &m_lastSnapshotIncludeTerm; + ar &m_logs; + } int m_currentTerm; int m_votedFor; - std::vector m_logs; //// 日志条目数组,包含了状态机要执行的指令集,以及收到领导时的任期号 - // 这两个状态所有结点都在维护,易失 - int m_commitIndex; - int m_lastApplied; // 已经汇报给状态机(上层应用)的log 的index - - // 这两个状态是由服务器来维护,易失 - std::vector m_nextIndex; // 这两个状态的下标1开始,因为通常commitIndex和lastApplied从0开始,应该是一个无效的index,因此下标从1开始 - std::vector m_matchIndex; - enum Status - { - Follower, - Candidate, - Leader - }; - // 身份 - Status m_status; - - std::shared_ptr> applyChan ; // client从这里取日志(2B),client与raft通信的接口 - // ApplyMsgQueue chan ApplyMsg // raft内部使用的chan,applyChan是用于和服务层交互,最后好像没用上 - - // 选举超时 - - std::chrono::_V2::system_clock::time_point m_lastResetElectionTime; - // 心跳超时,用于leader - std::chrono::_V2::system_clock::time_point m_lastResetHearBeatTime; - - // 2D中用于传入快照点 - // 储存了快照中的最后一个日志的Index和Term int m_lastSnapshotIncludeIndex; int m_lastSnapshotIncludeTerm; + std::vector m_logs; + std::unordered_map umap; -public: - void AppendEntries1(const raftRpcProctoc::AppendEntriesArgs *args, raftRpcProctoc::AppendEntriesReply *reply); - void applierTicker(); - bool CondInstallSnapshot(int lastIncludedTerm, int lastIncludedIndex, std::string snapshot); - void doElection(); - void doHeartBeat(); - // 每隔一段时间检查睡眠时间内有没有重置定时器,没有则说明超时了 -// 如果有则设置合适睡眠时间:睡眠到重置时间+超时时间 - void electionTimeOutTicker(); - std::vector getApplyLogs(); - int getNewCommandIndex(); - void getPrevLogInfo(int server, int *preIndex, int *preTerm); - void GetState(int *term, bool *isLeader); - void InstallSnapshot( const raftRpcProctoc::InstallSnapshotRequest *args, raftRpcProctoc::InstallSnapshotResponse *reply); - void leaderHearBeatTicker(); - void leaderSendSnapShot(int server); - void leaderUpdateCommitIndex(); - bool matchLog(int logIndex, int logTerm); - void persist(); - void RequestVote(const raftRpcProctoc::RequestVoteArgs *args, raftRpcProctoc::RequestVoteReply *reply); - bool UpToDate(int index, int term); - int getLastLogIndex(); - void getLastLogIndexAndTerm(int *lastLogIndex, int *lastLogTerm); - int getLogTermFromLogIndex(int logIndex); - int GetRaftStateSize(); - int getSlicesIndexFromLogIndex(int logIndex); - - - bool sendRequestVote(int server , std::shared_ptr args , std::shared_ptr reply, std::shared_ptr votedNum) ; - bool sendAppendEntries(int server ,std::shared_ptr args , std::shared_ptr reply , std::shared_ptr appendNums ) ; - - - //rf.applyChan <- msg //不拿锁执行 可以单独创建一个线程执行,但是为了同意使用std:thread ,避免使用pthread_create,因此专门写一个函数来执行 - void pushMsgToKvServer(ApplyMsg msg); - void readPersist(std::string data); - std::string persistData(); - - - void Start(Op command,int* newLogIndex,int* newLogTerm,bool* isLeader ) ; - -// Snapshot the service says it has created a snapshot that has -// all info up to and including index. this means the -// service no longer needs the log through (and including) -// that index. Raft should now trim its log as much as possible. -// index代表是快照apply应用的index,而snapshot代表的是上层service传来的快照字节流,包括了Index之前的数据 -// 这个函数的目的是把安装到快照里的日志抛弃,并安装快照数据,同时更新快照下标,属于peers自身主动更新,与leader发送快照不冲突 -// 即服务层主动发起请求raft保存snapshot里面的数据,index是用来表示snapshot快照执行到了哪条命令 - void Snapshot(int index , std::string snapshot ); -public: - // 重写基类方法,因为rpc远程调用真正调用的是这个方法 - //序列化,反序列化等操作rpc框架都已经做完了,因此这里只需要获取值然后真正调用本地方法即可。 - void AppendEntries(google::protobuf::RpcController *controller, - const ::raftRpcProctoc::AppendEntriesArgs *request, - ::raftRpcProctoc::AppendEntriesReply *response, - ::google::protobuf::Closure *done) override; - void InstallSnapshot(google::protobuf::RpcController *controller, - const ::raftRpcProctoc::InstallSnapshotRequest *request, - ::raftRpcProctoc::InstallSnapshotResponse *response, - ::google::protobuf::Closure *done) override; - void RequestVote(google::protobuf::RpcController *controller, - const ::raftRpcProctoc::RequestVoteArgs *request, - ::raftRpcProctoc::RequestVoteReply *response, - ::google::protobuf::Closure *done) override; - - -public: - void init(std::vector> peers,int me,std::shared_ptr persister,std::shared_ptr> applyCh); - - - - - - -private: - //for persist - - class BoostPersistRaftNode - { - public: - friend class boost::serialization::access; - // When the class Archive corresponds to an output archive, the - // & operator is defined similar to <<. Likewise, when the class Archive - // is a type of input archive the & operator is defined similar to >>. - template - void serialize(Archive & ar, const unsigned int version) - { - ar & m_currentTerm; - ar & m_votedFor; - ar & m_lastSnapshotIncludeIndex; - ar & m_lastSnapshotIncludeTerm; - ar & m_logs; - } - int m_currentTerm; - int m_votedFor; - int m_lastSnapshotIncludeIndex; - int m_lastSnapshotIncludeTerm; - std::vector m_logs; - std::unordered_map umap; - public: - - }; + public: + }; }; - -#endif //RAFT_H \ No newline at end of file +#endif // RAFT_H \ No newline at end of file diff --git a/src/raftCore/include/raftRpcUtil.h b/src/raftCore/include/raftRpcUtil.h index 80616db..a88488e 100644 --- a/src/raftCore/include/raftRpcUtil.h +++ b/src/raftCore/include/raftRpcUtil.h @@ -7,29 +7,25 @@ #include "raftRPC.pb.h" - /// @brief 维护当前节点对其他某一个结点的所有rpc发送通信的功能 // 对于一个raft节点来说,对于任意其他的节点都要维护一个rpc连接,即MprpcChannel -class RaftRpcUtil -{ -private: - raftRpcProctoc::raftRpc_Stub *stub_; -public: - //主动调用其他节点的三个方法,可以按照mit6824来调用,但是别的节点调用自己的好像就不行了,要继承protoc提供的service类才行 - bool AppendEntries(raftRpcProctoc::AppendEntriesArgs* args,raftRpcProctoc::AppendEntriesReply* response); - bool InstallSnapshot(raftRpcProctoc::InstallSnapshotRequest *args, raftRpcProctoc::InstallSnapshotResponse *response); - bool RequestVote(raftRpcProctoc::RequestVoteArgs *args, raftRpcProctoc::RequestVoteReply * response); - //响应其他节点的方法 - /** - * - * @param ip 远端ip - * @param port 远端端口 - */ - RaftRpcUtil(std::string ip, short port); - ~RaftRpcUtil(); +class RaftRpcUtil { + private: + raftRpcProctoc::raftRpc_Stub *stub_; + + public: + //主动调用其他节点的三个方法,可以按照mit6824来调用,但是别的节点调用自己的好像就不行了,要继承protoc提供的service类才行 + bool AppendEntries(raftRpcProctoc::AppendEntriesArgs *args, raftRpcProctoc::AppendEntriesReply *response); + bool InstallSnapshot(raftRpcProctoc::InstallSnapshotRequest *args, raftRpcProctoc::InstallSnapshotResponse *response); + bool RequestVote(raftRpcProctoc::RequestVoteArgs *args, raftRpcProctoc::RequestVoteReply *response); + //响应其他节点的方法 + /** + * + * @param ip 远端ip + * @param port 远端端口 + */ + RaftRpcUtil(std::string ip, short port); + ~RaftRpcUtil(); }; - - - -#endif //RAFTRPC_H +#endif // RAFTRPC_H diff --git a/src/raftCore/kvServer.cpp b/src/raftCore/kvServer.cpp index 0edb3c8..709ccb3 100644 --- a/src/raftCore/kvServer.cpp +++ b/src/raftCore/kvServer.cpp @@ -5,439 +5,446 @@ #include "mprpcconfig.h" void KvServer::DprintfKVDB() { - if (!Debug) { - return; - } - std::lock_guard lg(m_mtx); - DEFER { - // for (const auto &item: m_kvDB) { - // DPrintf("[DBInfo ----]Key : %s, Value : %s", &item.first, &item.second); - // } - m_skipList.display_list(); - }; - + if (!Debug) { + return; + } + std::lock_guard lg(m_mtx); + DEFER { + // for (const auto &item: m_kvDB) { + // DPrintf("[DBInfo ----]Key : %s, Value : %s", &item.first, &item.second); + // } + m_skipList.display_list(); + }; } void KvServer::ExecuteAppendOpOnKVDB(Op op) { - //if op.IfDuplicate { //get请求是可重复执行的,因此可以不用判复 - // return - //} - m_mtx.lock(); - - m_skipList.insert_set_element(op.Key,op.Value); - - // if (m_kvDB.find(op.Key) != m_kvDB.end()) { - // m_kvDB[op.Key] = m_kvDB[op.Key] + op.Value; - // } else { - // m_kvDB.insert(std::make_pair(op.Key, op.Value)); - // } - m_lastRequestId[op.ClientId] = op.RequestId; - m_mtx.unlock(); - - - // DPrintf("[KVServerExeAPPEND-----]ClientId :%d ,RequestID :%d ,Key : %v, value : %v", op.ClientId, op.RequestId, op.Key, op.Value) - DprintfKVDB(); + // if op.IfDuplicate { //get请求是可重复执行的,因此可以不用判复 + // return + // } + m_mtx.lock(); + + m_skipList.insert_set_element(op.Key, op.Value); + + // if (m_kvDB.find(op.Key) != m_kvDB.end()) { + // m_kvDB[op.Key] = m_kvDB[op.Key] + op.Value; + // } else { + // m_kvDB.insert(std::make_pair(op.Key, op.Value)); + // } + m_lastRequestId[op.ClientId] = op.RequestId; + m_mtx.unlock(); + + // DPrintf("[KVServerExeAPPEND-----]ClientId :%d ,RequestID :%d ,Key : %v, value : %v", op.ClientId, op.RequestId, + // op.Key, op.Value) + DprintfKVDB(); } void KvServer::ExecuteGetOpOnKVDB(Op op, std::string *value, bool *exist) { - m_mtx.lock(); - *value = ""; - *exist = false; - if(m_skipList.search_element(op.Key, *value)) { - *exist = true; - // *value = m_skipList.se //value已经完成赋值了 - } - // if (m_kvDB.find(op.Key) != m_kvDB.end()) { - // *exist = true; - // *value = m_kvDB[op.Key]; - // } - m_lastRequestId[op.ClientId] = op.RequestId; - m_mtx.unlock(); - - - if (*exist) { - // DPrintf("[KVServerExeGET----]ClientId :%d ,RequestID :%d ,Key : %v, value :%v", op.ClientId, op.RequestId, op.Key, value) - } else { - // DPrintf("[KVServerExeGET----]ClientId :%d ,RequestID :%d ,Key : %v, But No KEY!!!!", op.ClientId, op.RequestId, op.Key) - } - DprintfKVDB(); + m_mtx.lock(); + *value = ""; + *exist = false; + if (m_skipList.search_element(op.Key, *value)) { + *exist = true; + // *value = m_skipList.se //value已经完成赋值了 + } + // if (m_kvDB.find(op.Key) != m_kvDB.end()) { + // *exist = true; + // *value = m_kvDB[op.Key]; + // } + m_lastRequestId[op.ClientId] = op.RequestId; + m_mtx.unlock(); + + if (*exist) { + // DPrintf("[KVServerExeGET----]ClientId :%d ,RequestID :%d ,Key : %v, value :%v", op.ClientId, + // op.RequestId, op.Key, value) + } else { + // DPrintf("[KVServerExeGET----]ClientId :%d ,RequestID :%d ,Key : %v, But No KEY!!!!", op.ClientId, + // op.RequestId, op.Key) + } + DprintfKVDB(); } void KvServer::ExecutePutOpOnKVDB(Op op) { - m_mtx.lock(); - m_skipList.insert_set_element(op.Key,op.Value); - // m_kvDB[op.Key] = op.Value; - m_lastRequestId[op.ClientId] = op.RequestId; - m_mtx.unlock(); - - - // DPrintf("[KVServerExePUT----]ClientId :%d ,RequestID :%d ,Key : %v, value : %v", op.ClientId, op.RequestId, op.Key, op.Value) - DprintfKVDB(); + m_mtx.lock(); + m_skipList.insert_set_element(op.Key, op.Value); + // m_kvDB[op.Key] = op.Value; + m_lastRequestId[op.ClientId] = op.RequestId; + m_mtx.unlock(); + + // DPrintf("[KVServerExePUT----]ClientId :%d ,RequestID :%d ,Key : %v, value : %v", op.ClientId, op.RequestId, + // op.Key, op.Value) + DprintfKVDB(); } // 处理来自clerk的Get RPC void KvServer::Get(const raftKVRpcProctoc::GetArgs *args, raftKVRpcProctoc::GetReply *reply) { - Op op; - op.Operation = "Get"; - op.Key = args->key(); - op.Value = ""; - op.ClientId = args->clientid(); - op.RequestId = args->requestid(); - - - int raftIndex = -1; + Op op; + op.Operation = "Get"; + op.Key = args->key(); + op.Value = ""; + op.ClientId = args->clientid(); + op.RequestId = args->requestid(); + + int raftIndex = -1; + int _ = -1; + bool isLeader = false; + m_raftNode->Start(op, &raftIndex, &_, + &isLeader); // raftIndex:raft预计的logIndex + // ,虽然是预计,但是正确情况下是准确的,op的具体内容对raft来说 是隔离的 + + if (!isLeader) { + reply->set_err(ErrWrongLeader); + return; + } + + // create waitForCh + m_mtx.lock(); + + if (waitApplyCh.find(raftIndex) == waitApplyCh.end()) { + waitApplyCh.insert(std::make_pair(raftIndex, new LockQueue())); + } + auto chForRaftIndex = waitApplyCh[raftIndex]; + + m_mtx.unlock(); //直接解锁,等待任务执行完成,不能一直拿锁等待 + + // timeout + Op raftCommitOp; + + if (!chForRaftIndex->timeOutPop(CONSENSUS_TIMEOUT, &raftCommitOp)) { + // DPrintf("[GET TIMEOUT!!!]From Client %d (Request %d) To Server %d, key %v, raftIndex %d", args.ClientId, + // args.RequestId, kv.me, op.Key, raftIndex) + // todo 2023年06月01日 int _ = -1; bool isLeader = false; - m_raftNode->Start(op, &raftIndex, &_, &isLeader); //raftIndex:raft预计的logIndex ,虽然是预计,但是正确情况下是准确的,op的具体内容对raft来说 是隔离的 - - if (!isLeader) { - reply->set_err(ErrWrongLeader); - return; - } - - - // create waitForCh - m_mtx.lock(); - - if (waitApplyCh.find(raftIndex) == waitApplyCh.end()) { - waitApplyCh.insert(std::make_pair(raftIndex, new LockQueue())); + m_raftNode->GetState(&_, &isLeader); + + if (ifRequestDuplicate(op.ClientId, op.RequestId) && isLeader) { + //如果超时,代表raft集群不保证已经commitIndex该日志,但是如果是已经提交过的get请求,是可以再执行的。 + // 不会违反线性一致性 + std::string value; + bool exist = false; + ExecuteGetOpOnKVDB(op, &value, &exist); + if (exist) { + reply->set_err(OK); + reply->set_value(value); + } else { + reply->set_err(ErrNoKey); + reply->set_value(""); + } + } else { + reply->set_err(ErrWrongLeader); //返回这个,其实就是让clerk换一个节点重试 } - auto chForRaftIndex = waitApplyCh[raftIndex]; - - m_mtx.unlock(); //直接解锁,等待任务执行完成,不能一直拿锁等待 - - - // timeout - Op raftCommitOp; - - if (!chForRaftIndex->timeOutPop(CONSENSUS_TIMEOUT, &raftCommitOp)) { - // DPrintf("[GET TIMEOUT!!!]From Client %d (Request %d) To Server %d, key %v, raftIndex %d", args.ClientId, args.RequestId, kv.me, op.Key, raftIndex) - // todo 2023年06月01日 - int _ = -1; - bool isLeader = false; - m_raftNode->GetState(&_, &isLeader); - - if (ifRequestDuplicate(op.ClientId, op.RequestId) && isLeader) { - //如果超时,代表raft集群不保证已经commitIndex该日志,但是如果是已经提交过的get请求,是可以再执行的。 - // 不会违反线性一致性 - std::string value; - bool exist = false; - ExecuteGetOpOnKVDB(op, &value, &exist); - if (exist) { - reply->set_err(OK); - reply->set_value(value); - } else { - reply->set_err(ErrNoKey); - reply->set_value(""); - } - } else { - reply->set_err(ErrWrongLeader); //返回这个,其实就是让clerk换一个节点重试 - } + } else { + // raft已经提交了该command(op),可以正式开始执行了 + // DPrintf("[WaitChanGetRaftApplyMessage<--]Server %d , get Command <-- Index:%d , ClientId %d, RequestId + // %d, Opreation %v, Key :%v, Value :%v", kv.me, raftIndex, op.ClientId, op.RequestId, op.Operation, op.Key, + // op.Value) + // todo 这里还要再次检验的原因:感觉不用检验,因为leader只要正确的提交了,那么这些肯定是符合的 + if (raftCommitOp.ClientId == op.ClientId && raftCommitOp.RequestId == op.RequestId) { + std::string value; + bool exist = false; + ExecuteGetOpOnKVDB(op, &value, &exist); + if (exist) { + reply->set_err(OK); + reply->set_value(value); + } else { + reply->set_err(ErrNoKey); + reply->set_value(""); + } } else { - //raft已经提交了该command(op),可以正式开始执行了 - // DPrintf("[WaitChanGetRaftApplyMessage<--]Server %d , get Command <-- Index:%d , ClientId %d, RequestId %d, Opreation %v, Key :%v, Value :%v", kv.me, raftIndex, op.ClientId, op.RequestId, op.Operation, op.Key, op.Value) - //todo 这里还要再次检验的原因:感觉不用检验,因为leader只要正确的提交了,那么这些肯定是符合的 - if (raftCommitOp.ClientId == op.ClientId && raftCommitOp.RequestId == op.RequestId) { - std::string value; - bool exist = false; - ExecuteGetOpOnKVDB(op, &value, &exist); - if (exist) { - reply->set_err(OK); - reply->set_value(value); - } else { - reply->set_err(ErrNoKey); - reply->set_value(""); - } - } else { - reply->set_err(ErrWrongLeader); - // DPrintf("[GET ] 不满足:raftCommitOp.ClientId{%v} == op.ClientId{%v} && raftCommitOp.RequestId{%v} == op.RequestId{%v}", raftCommitOp.ClientId, op.ClientId, raftCommitOp.RequestId, op.RequestId) - } + reply->set_err(ErrWrongLeader); + // DPrintf("[GET ] 不满足:raftCommitOp.ClientId{%v} == op.ClientId{%v} && raftCommitOp.RequestId{%v} + // == op.RequestId{%v}", raftCommitOp.ClientId, op.ClientId, raftCommitOp.RequestId, op.RequestId) } - m_mtx.lock(); //todo 這個可以先弄一個defer,因爲刪除優先級並不高,先把rpc發回去更加重要 - auto tmp = waitApplyCh[raftIndex]; - waitApplyCh.erase(raftIndex); - delete tmp; - m_mtx.unlock(); + } + m_mtx.lock(); // todo 這個可以先弄一個defer,因爲刪除優先級並不高,先把rpc發回去更加重要 + auto tmp = waitApplyCh[raftIndex]; + waitApplyCh.erase(raftIndex); + delete tmp; + m_mtx.unlock(); } void KvServer::GetCommandFromRaft(ApplyMsg message) { - Op op; - op.parseFromString(message.Command); - - - DPrintf( - "[KvServer::GetCommandFromRaft-kvserver{%d}] , Got Command --> Index:{%d} , ClientId {%s}, RequestId {%d}, Opreation {%s}, Key :{%s}, Value :{%s}", - m_me, message.CommandIndex, &op.ClientId, op.RequestId, &op.Operation, &op.Key, &op.Value); - if (message.CommandIndex <= m_lastSnapShotRaftLogIndex) { - return; - } - - // State Machine (KVServer solute the duplicate problem) - // duplicate command will not be exed - if (!ifRequestDuplicate(op.ClientId, op.RequestId)) { - // execute command - if (op.Operation == "Put") { - ExecutePutOpOnKVDB(op); - } - if (op.Operation == "Append") { - ExecuteAppendOpOnKVDB(op); - } - // kv.lastRequestId[op.ClientId] = op.RequestId 在Executexxx函数里面更新的 + Op op; + op.parseFromString(message.Command); + + DPrintf( + "[KvServer::GetCommandFromRaft-kvserver{%d}] , Got Command --> Index:{%d} , ClientId {%s}, RequestId {%d}, " + "Opreation {%s}, Key :{%s}, Value :{%s}", + m_me, message.CommandIndex, &op.ClientId, op.RequestId, &op.Operation, &op.Key, &op.Value); + if (message.CommandIndex <= m_lastSnapShotRaftLogIndex) { + return; + } + + // State Machine (KVServer solute the duplicate problem) + // duplicate command will not be exed + if (!ifRequestDuplicate(op.ClientId, op.RequestId)) { + // execute command + if (op.Operation == "Put") { + ExecutePutOpOnKVDB(op); } - //到这里kvDB已经制作了快照 - if (m_maxRaftState != -1) { - IfNeedToSendSnapShotCommand(message.CommandIndex, 9); - //如果raft的log太大(大于指定的比例)就把制作快照 + if (op.Operation == "Append") { + ExecuteAppendOpOnKVDB(op); } - - // Send message to the chan of op.ClientId - SendMessageToWaitChan(op, message.CommandIndex); + // kv.lastRequestId[op.ClientId] = op.RequestId 在Executexxx函数里面更新的 + } + //到这里kvDB已经制作了快照 + if (m_maxRaftState != -1) { + IfNeedToSendSnapShotCommand(message.CommandIndex, 9); + //如果raft的log太大(大于指定的比例)就把制作快照 + } + + // Send message to the chan of op.ClientId + SendMessageToWaitChan(op, message.CommandIndex); } bool KvServer::ifRequestDuplicate(std::string ClientId, int RequestId) { - std::lock_guard lg(m_mtx); - if (m_lastRequestId.find(ClientId) == m_lastRequestId.end()) { - return false; - // todo :不存在这个client就创建 - } - return RequestId <= m_lastRequestId[ClientId]; + std::lock_guard lg(m_mtx); + if (m_lastRequestId.find(ClientId) == m_lastRequestId.end()) { + return false; + // todo :不存在这个client就创建 + } + return RequestId <= m_lastRequestId[ClientId]; } -//get和put//append執行的具體細節是不一樣的 -//PutAppend在收到raft消息之後執行,具體函數裏面只判斷冪等性(是否重複) -//get函數收到raft消息之後在,因爲get無論是否重複都可以再執行 +// get和put//append執行的具體細節是不一樣的 +// PutAppend在收到raft消息之後執行,具體函數裏面只判斷冪等性(是否重複) +// get函數收到raft消息之後在,因爲get無論是否重複都可以再執行 void KvServer::PutAppend(const raftKVRpcProctoc::PutAppendArgs *args, raftKVRpcProctoc::PutAppendReply *reply) { - Op op; - op.Operation = args->op(); - op.Key = args->key(); - op.Value = args->value(); - op.ClientId = args->clientid(); - op.RequestId = args->requestid(); - int raftIndex = -1; - int _ = -1; - bool isleader = false; - - m_raftNode->Start(op, &raftIndex, &_, &isleader); - - - if (!isleader) { - DPrintf( - "[func -KvServer::PutAppend -kvserver{%d}]From Client %s (Request %d) To Server %d, key %s, raftIndex %d , but not leader", - m_me, &args->clientid(), args->requestid(), m_me, &op.Key, raftIndex); - - reply->set_err(ErrWrongLeader); - return; - } + Op op; + op.Operation = args->op(); + op.Key = args->key(); + op.Value = args->value(); + op.ClientId = args->clientid(); + op.RequestId = args->requestid(); + int raftIndex = -1; + int _ = -1; + bool isleader = false; + + m_raftNode->Start(op, &raftIndex, &_, &isleader); + + if (!isleader) { DPrintf( - "[func -KvServer::PutAppend -kvserver{%d}]From Client %s (Request %d) To Server %d, key %s, raftIndex %d , is leader ", + "[func -KvServer::PutAppend -kvserver{%d}]From Client %s (Request %d) To Server %d, key %s, raftIndex %d , but " + "not leader", m_me, &args->clientid(), args->requestid(), m_me, &op.Key, raftIndex); - m_mtx.lock(); - if (waitApplyCh.find(raftIndex) == waitApplyCh.end()) { - waitApplyCh.insert(std::make_pair(raftIndex, new LockQueue())); - } - auto chForRaftIndex = waitApplyCh[raftIndex]; - - m_mtx.unlock(); //直接解锁,等待任务执行完成,不能一直拿锁等待 - - // timeout - Op raftCommitOp; - - if (!chForRaftIndex->timeOutPop(CONSENSUS_TIMEOUT, &raftCommitOp)) { - DPrintf( - "[func -KvServer::PutAppend -kvserver{%d}]TIMEOUT PUTAPPEND !!!! Server %d , get Command <-- Index:%d , ClientId %s, RequestId %s, Opreation %s Key :%s, Value :%s" - , m_me, m_me, raftIndex, &op.ClientId, op.RequestId, &op.Operation, &op.Key, &op.Value); + reply->set_err(ErrWrongLeader); + return; + } + DPrintf( + "[func -KvServer::PutAppend -kvserver{%d}]From Client %s (Request %d) To Server %d, key %s, raftIndex %d , is " + "leader ", + m_me, &args->clientid(), args->requestid(), m_me, &op.Key, raftIndex); + m_mtx.lock(); + if (waitApplyCh.find(raftIndex) == waitApplyCh.end()) { + waitApplyCh.insert(std::make_pair(raftIndex, new LockQueue())); + } + auto chForRaftIndex = waitApplyCh[raftIndex]; + + m_mtx.unlock(); //直接解锁,等待任务执行完成,不能一直拿锁等待 + + // timeout + Op raftCommitOp; + + if (!chForRaftIndex->timeOutPop(CONSENSUS_TIMEOUT, &raftCommitOp)) { + DPrintf( + "[func -KvServer::PutAppend -kvserver{%d}]TIMEOUT PUTAPPEND !!!! Server %d , get Command <-- Index:%d , " + "ClientId %s, RequestId %s, Opreation %s Key :%s, Value :%s", + m_me, m_me, raftIndex, &op.ClientId, op.RequestId, &op.Operation, &op.Key, &op.Value); - if (ifRequestDuplicate(op.ClientId, op.RequestId)) { - reply->set_err(OK); // 超时了,但因为是重复的请求,返回ok,实际上就算没有超时,在真正执行的时候也要判断是否重复 - } else { - reply->set_err(ErrWrongLeader); ///这里返回这个的目的让clerk重新尝试 - } + if (ifRequestDuplicate(op.ClientId, op.RequestId)) { + reply->set_err(OK); // 超时了,但因为是重复的请求,返回ok,实际上就算没有超时,在真正执行的时候也要判断是否重复 } else { - DPrintf( - "[func -KvServer::PutAppend -kvserver{%d}]WaitChanGetRaftApplyMessage<--Server %d , get Command <-- Index:%d , ClientId %s, RequestId %d, Opreation %s, Key :%s, Value :%s" - , m_me, m_me, raftIndex, &op.ClientId, op.RequestId, &op.Operation, &op.Key, &op.Value); - if (raftCommitOp.ClientId == op.ClientId && - raftCommitOp.RequestId == op.RequestId) { - //可能发生leader的变更导致日志被覆盖,因此必须检查 - reply->set_err(OK); - } else { - reply->set_err(ErrWrongLeader); - } + reply->set_err(ErrWrongLeader); ///这里返回这个的目的让clerk重新尝试 } + } else { + DPrintf( + "[func -KvServer::PutAppend -kvserver{%d}]WaitChanGetRaftApplyMessage<--Server %d , get Command <-- Index:%d , " + "ClientId %s, RequestId %d, Opreation %s, Key :%s, Value :%s", + m_me, m_me, raftIndex, &op.ClientId, op.RequestId, &op.Operation, &op.Key, &op.Value); + if (raftCommitOp.ClientId == op.ClientId && raftCommitOp.RequestId == op.RequestId) { + //可能发生leader的变更导致日志被覆盖,因此必须检查 + reply->set_err(OK); + } else { + reply->set_err(ErrWrongLeader); + } + } - m_mtx.lock(); + m_mtx.lock(); - auto tmp = waitApplyCh[raftIndex]; - waitApplyCh.erase(raftIndex); - delete tmp; - m_mtx.unlock(); + auto tmp = waitApplyCh[raftIndex]; + waitApplyCh.erase(raftIndex); + delete tmp; + m_mtx.unlock(); } void KvServer::ReadRaftApplyCommandLoop() { - while (true) { - //如果只操作applyChan不用拿锁,因为applyChan自己带锁 - auto message = applyChan->Pop(); //阻塞弹出 - DPrintf("---------------tmp-------------[func-KvServer::ReadRaftApplyCommandLoop()-kvserver{%d}] 收到了下raft的消息", - m_me); - // listen to every command applied by its raft ,delivery to relative RPC Handler - - if (message.CommandValid) { - GetCommandFromRaft(message); - } - if (message.SnapshotValid) { - GetSnapShotFromRaft(message); - } + while (true) { + //如果只操作applyChan不用拿锁,因为applyChan自己带锁 + auto message = applyChan->Pop(); //阻塞弹出 + DPrintf( + "---------------tmp-------------[func-KvServer::ReadRaftApplyCommandLoop()-kvserver{%d}] 收到了下raft的消息", + m_me); + // listen to every command applied by its raft ,delivery to relative RPC Handler + + if (message.CommandValid) { + GetCommandFromRaft(message); + } + if (message.SnapshotValid) { + GetSnapShotFromRaft(message); } + } } -//raft会与persist层交互,kvserver层也会,因为kvserver层开始的时候需要恢复kvdb的状态 -// 关于快照raft层与persist的交互:保存kvserver传来的snapshot;生成leaderInstallSnapshot RPC的时候也需要读取snapshot; -// 因此snapshot的具体格式是由kvserver层来定的,raft只负责传递这个东西 -// snapShot里面包含kvserver需要维护的persist_lastRequestId 以及kvDB真正保存的数据persist_kvdb +// raft会与persist层交互,kvserver层也会,因为kvserver层开始的时候需要恢复kvdb的状态 +// 关于快照raft层与persist的交互:保存kvserver传来的snapshot;生成leaderInstallSnapshot RPC的时候也需要读取snapshot; +// 因此snapshot的具体格式是由kvserver层来定的,raft只负责传递这个东西 +// snapShot里面包含kvserver需要维护的persist_lastRequestId 以及kvDB真正保存的数据persist_kvdb void KvServer::ReadSnapShotToInstall(std::string snapshot) { - if (snapshot.empty()) { - // bootstrap without any state? - return; - } - parseFromString(snapshot); - - // r := bytes.NewBuffer(snapshot) - // d := labgob.NewDecoder(r) - // - // var persist_kvdb map[string]string //理应快照 - // var persist_lastRequestId map[int64]int //快照这个为了维护线性一致性 - // - // if d.Decode(&persist_kvdb) != nil || d.Decode(&persist_lastRequestId) != nil { - // DPrintf("KVSERVER %d read persister got a problem!!!!!!!!!!",kv.me) - // } else { - // kv.kvDB = persist_kvdb - // kv.lastRequestId = persist_lastRequestId - // } + if (snapshot.empty()) { + // bootstrap without any state? + return; + } + parseFromString(snapshot); + + // r := bytes.NewBuffer(snapshot) + // d := labgob.NewDecoder(r) + // + // var persist_kvdb map[string]string //理应快照 + // var persist_lastRequestId map[int64]int //快照这个为了维护线性一致性 + // + // if d.Decode(&persist_kvdb) != nil || d.Decode(&persist_lastRequestId) != nil { + // DPrintf("KVSERVER %d read persister got a problem!!!!!!!!!!",kv.me) + // } else { + // kv.kvDB = persist_kvdb + // kv.lastRequestId = persist_lastRequestId + // } } bool KvServer::SendMessageToWaitChan(const Op &op, int raftIndex) { - std::lock_guard lg(m_mtx); - DPrintf( - "[RaftApplyMessageSendToWaitChan--> raftserver{%d}] , Send Command --> Index:{%d} , ClientId {%d}, RequestId {%d}, Opreation {%v}, Key :{%v}, Value :{%v}", - m_me, raftIndex, &op.ClientId, op.RequestId, &op.Operation, &op.Key, &op.Value); - - if (waitApplyCh.find(raftIndex) == waitApplyCh.end()) { - return false; - } - waitApplyCh[raftIndex]->Push(op); - DPrintf( - "[RaftApplyMessageSendToWaitChan--> raftserver{%d}] , Send Command --> Index:{%d} , ClientId {%d}, RequestId {%d}, Opreation {%v}, Key :{%v}, Value :{%v}", - m_me, raftIndex, &op.ClientId, op.RequestId, &op.Operation, &op.Key, &op.Value); - return true; + std::lock_guard lg(m_mtx); + DPrintf( + "[RaftApplyMessageSendToWaitChan--> raftserver{%d}] , Send Command --> Index:{%d} , ClientId {%d}, RequestId " + "{%d}, Opreation {%v}, Key :{%v}, Value :{%v}", + m_me, raftIndex, &op.ClientId, op.RequestId, &op.Operation, &op.Key, &op.Value); + + if (waitApplyCh.find(raftIndex) == waitApplyCh.end()) { + return false; + } + waitApplyCh[raftIndex]->Push(op); + DPrintf( + "[RaftApplyMessageSendToWaitChan--> raftserver{%d}] , Send Command --> Index:{%d} , ClientId {%d}, RequestId " + "{%d}, Opreation {%v}, Key :{%v}, Value :{%v}", + m_me, raftIndex, &op.ClientId, op.RequestId, &op.Operation, &op.Key, &op.Value); + return true; } void KvServer::IfNeedToSendSnapShotCommand(int raftIndex, int proportion) { - if (m_raftNode->GetRaftStateSize() > m_maxRaftState / 10.0) { - // Send SnapShot Command - auto snapshot = MakeSnapShot(); - m_raftNode->Snapshot(raftIndex, snapshot); - } + if (m_raftNode->GetRaftStateSize() > m_maxRaftState / 10.0) { + // Send SnapShot Command + auto snapshot = MakeSnapShot(); + m_raftNode->Snapshot(raftIndex, snapshot); + } } void KvServer::GetSnapShotFromRaft(ApplyMsg message) { - std::lock_guard lg(m_mtx); + std::lock_guard lg(m_mtx); - if (m_raftNode->CondInstallSnapshot(message.SnapshotTerm, message.SnapshotIndex, message.Snapshot)) { - ReadSnapShotToInstall(message.Snapshot); - m_lastSnapShotRaftLogIndex = message.SnapshotIndex; - } + if (m_raftNode->CondInstallSnapshot(message.SnapshotTerm, message.SnapshotIndex, message.Snapshot)) { + ReadSnapShotToInstall(message.Snapshot); + m_lastSnapShotRaftLogIndex = message.SnapshotIndex; + } } std::string KvServer::MakeSnapShot() { - std::lock_guard lg(m_mtx); - std::string snapshotData = getSnapshotData(); - return snapshotData; + std::lock_guard lg(m_mtx); + std::string snapshotData = getSnapshotData(); + return snapshotData; } void KvServer::PutAppend(google::protobuf::RpcController *controller, const ::raftKVRpcProctoc::PutAppendArgs *request, ::raftKVRpcProctoc::PutAppendReply *response, ::google::protobuf::Closure *done) { - KvServer::PutAppend(request, response); - done->Run(); + KvServer::PutAppend(request, response); + done->Run(); } void KvServer::Get(google::protobuf::RpcController *controller, const ::raftKVRpcProctoc::GetArgs *request, ::raftKVRpcProctoc::GetReply *response, ::google::protobuf::Closure *done) { - KvServer::Get(request, response); - done->Run(); + KvServer::Get(request, response); + done->Run(); } -KvServer::KvServer(int me, int maxraftstate, std::string nodeInforFileName, short port): -m_skipList(6){ - std::shared_ptr persister = std::make_shared(me); - - m_me = me; - m_maxRaftState = maxraftstate; - - applyChan = std::make_shared >(); - - m_raftNode = std::make_shared(); - ////////////////clerk层面 kvserver开启rpc接受功能 - // 同时raft与raft节点之间也要开启rpc功能,因此有两个注册 - std::thread t([this, port]()-> void { - // provider是一个rpc网络服务对象。把UserService对象发布到rpc节点上 - RpcProvider provider; - provider.NotifyService(this); - provider.NotifyService(this->m_raftNode.get()); //todo:这里获取了原始指针,后面检查一下有没有泄露的问题 或者 shareptr释放的问题 - // 启动一个rpc服务发布节点 Run以后,进程进入阻塞状态,等待远程的rpc调用请求 - provider.Run(m_me, port); - }); - t.detach(); - - ////开启rpc远程调用能力,需要注意必须要保证所有节点都开启rpc接受功能之后才能开启rpc远程调用能力 - ////这里使用睡眠来保证 - std::cout << "raftServer node:" << m_me << " start to sleep to wait all ohter raftnode start!!!!" << std::endl; - sleep(6); - std::cout << "raftServer node:" << m_me << " wake up!!!! start to connect other raftnode" << std::endl; - //获取所有raft节点ip、port ,并进行连接 ,要排除自己 - MprpcConfig config; - config.LoadConfigFile(nodeInforFileName.c_str()); - std::vector > ipPortVt; - for (int i = 0; i < INT_MAX - 1; ++i) { - std::string node = "node" + std::to_string(i); - - std::string nodeIp = config.Load(node + "ip"); - std::string nodePortStr = config.Load(node + "port"); - if (nodeIp.empty()) { - break; - } - ipPortVt.emplace_back(nodeIp, atoi(nodePortStr.c_str())); //沒有atos方法,可以考慮自己实现 - } - std::vector > servers; - //进行连接 - for (int i = 0; i < ipPortVt.size(); ++i) { - if (i == m_me) { - servers.push_back(nullptr); - continue; - } - std::string otherNodeIp = ipPortVt[i].first; - short otherNodePort = ipPortVt[i].second; - auto *rpc = new RaftRpcUtil(otherNodeIp, otherNodePort); - servers.push_back(std::shared_ptr(rpc)); - - std::cout << "node" << m_me << " 连接node" << i << "success!" << std::endl; +KvServer::KvServer(int me, int maxraftstate, std::string nodeInforFileName, short port) : m_skipList(6) { + std::shared_ptr persister = std::make_shared(me); + + m_me = me; + m_maxRaftState = maxraftstate; + + applyChan = std::make_shared >(); + + m_raftNode = std::make_shared(); + ////////////////clerk层面 kvserver开启rpc接受功能 + // 同时raft与raft节点之间也要开启rpc功能,因此有两个注册 + std::thread t([this, port]() -> void { + // provider是一个rpc网络服务对象。把UserService对象发布到rpc节点上 + RpcProvider provider; + provider.NotifyService(this); + provider.NotifyService( + this->m_raftNode.get()); // todo:这里获取了原始指针,后面检查一下有没有泄露的问题 或者 shareptr释放的问题 + // 启动一个rpc服务发布节点 Run以后,进程进入阻塞状态,等待远程的rpc调用请求 + provider.Run(m_me, port); + }); + t.detach(); + + ////开启rpc远程调用能力,需要注意必须要保证所有节点都开启rpc接受功能之后才能开启rpc远程调用能力 + ////这里使用睡眠来保证 + std::cout << "raftServer node:" << m_me << " start to sleep to wait all ohter raftnode start!!!!" << std::endl; + sleep(6); + std::cout << "raftServer node:" << m_me << " wake up!!!! start to connect other raftnode" << std::endl; + //获取所有raft节点ip、port ,并进行连接 ,要排除自己 + MprpcConfig config; + config.LoadConfigFile(nodeInforFileName.c_str()); + std::vector > ipPortVt; + for (int i = 0; i < INT_MAX - 1; ++i) { + std::string node = "node" + std::to_string(i); + + std::string nodeIp = config.Load(node + "ip"); + std::string nodePortStr = config.Load(node + "port"); + if (nodeIp.empty()) { + break; } - sleep(ipPortVt.size() - me); //等待所有节点相互连接成功,再启动raft - m_raftNode->init(servers, m_me, persister, applyChan); - //kv的server直接与raft通信,但kv不直接与raft通信,所以需要把ApplyMsg的chan传递下去用于通信,两者的persist也是共用的 - - ////////////////////////////////// - - // You may need initialization code here. - // m_kvDB; //kvdb初始化 - m_skipList; - waitApplyCh; - m_lastRequestId; - m_lastSnapShotRaftLogIndex = 0; //todo:感覺這個函數沒什麼用,不如直接調用raft節點中的snapshot值??? - auto snapshot = persister->ReadSnapshot(); - if (!snapshot.empty()) { - ReadSnapShotToInstall(snapshot); + ipPortVt.emplace_back(nodeIp, atoi(nodePortStr.c_str())); //沒有atos方法,可以考慮自己实现 + } + std::vector > servers; + //进行连接 + for (int i = 0; i < ipPortVt.size(); ++i) { + if (i == m_me) { + servers.push_back(nullptr); + continue; } - std::thread t2(&KvServer::ReadRaftApplyCommandLoop, this); //马上向其他节点宣告自己就是leader - t2.join(); //由於ReadRaftApplyCommandLoop一直不會結束,相當於一直卡死在這裏了 + std::string otherNodeIp = ipPortVt[i].first; + short otherNodePort = ipPortVt[i].second; + auto *rpc = new RaftRpcUtil(otherNodeIp, otherNodePort); + servers.push_back(std::shared_ptr(rpc)); + + std::cout << "node" << m_me << " 连接node" << i << "success!" << std::endl; + } + sleep(ipPortVt.size() - me); //等待所有节点相互连接成功,再启动raft + m_raftNode->init(servers, m_me, persister, applyChan); + // kv的server直接与raft通信,但kv不直接与raft通信,所以需要把ApplyMsg的chan传递下去用于通信,两者的persist也是共用的 + + ////////////////////////////////// + + // You may need initialization code here. + // m_kvDB; //kvdb初始化 + m_skipList; + waitApplyCh; + m_lastRequestId; + m_lastSnapShotRaftLogIndex = 0; // todo:感覺這個函數沒什麼用,不如直接調用raft節點中的snapshot值??? + auto snapshot = persister->ReadSnapshot(); + if (!snapshot.empty()) { + ReadSnapShotToInstall(snapshot); + } + std::thread t2(&KvServer::ReadRaftApplyCommandLoop, this); //马上向其他节点宣告自己就是leader + t2.join(); //由於ReadRaftApplyCommandLoop一直不會結束,相當於一直卡死在這裏了 } diff --git a/src/raftCore/raft.cpp b/src/raftCore/raft.cpp index 0b43e95..629bd1e 100644 --- a/src/raftCore/raft.cpp +++ b/src/raftCore/raft.cpp @@ -1,897 +1,904 @@ -#include -#include #include "raft.h" +#include +#include #include "util.h" void Raft::AppendEntries1(const raftRpcProctoc::AppendEntriesArgs *args, raftRpcProctoc::AppendEntriesReply *reply) { - std::lock_guard locker(m_mtx); - reply->set_appstate(AppNormal); // 能接收到代表网络是正常的 - // Your code here (2A, 2B). - // 不同的人收到AppendEntries的反应是不同的,要注意无论什么时候收到rpc请求和响应都要检查term - - - if (args->term() < m_currentTerm) { - reply->set_success(false); - reply->set_term(m_currentTerm); - reply->set_updatenextindex(-100); // 论文中:让领导人可以及时更新自己 - DPrintf("[func-AppendEntries-rf{%d}] 拒绝了 因为Leader{%d}的term{%v}< rf{%d}.term{%d}\n", m_me, args->leaderid(), - args->term(), m_me, m_currentTerm); - return; // 注意从过期的领导人收到消息不要重设定时器 - } -// Defer ec1([this]() -> void { this->persist(); }); //由于这个局部变量创建在锁之后,因此执行persist的时候应该也是拿到锁的. - DEFER { persist(); }; //由于这个局部变量创建在锁之后,因此执行persist的时候应该也是拿到锁的. - if (args->term() > m_currentTerm) { - // 三变 ,防止遗漏,无论什么时候都是三变 - // DPrintf("[func-AppendEntries-rf{%v} ] 变成follower且更新term 因为Leader{%v}的term{%v}> rf{%v}.term{%v}\n", rf.me, args.LeaderId, args.Term, rf.me, rf.currentTerm) - m_status = Follower; - m_currentTerm = args->term(); - m_votedFor = -1; // 这里设置成-1有意义,如果突然宕机然后上线理论上是可以投票的 - // 这里可不返回,应该改成让改节点尝试接收日志 - // 如果是领导人和candidate突然转到Follower好像也不用其他操作 - // 如果本来就是Follower,那么其term变化,相当于“不言自明”的换了追随的对象,因为原来的leader的term更小,是不会再接收其消息了 - } - myAssert(args->term() == m_currentTerm, format("assert {args.Term == rf.currentTerm} fail")); - // 如果发生网络分区,那么candidate可能会收到同一个term的leader的消息,要转变为Follower,为了和上面,因此直接写 - m_status = Follower; // 这里是有必要的,因为如果candidate收到同一个term的leader的AE,需要变成follower - // term相等 - m_lastResetElectionTime = now(); - // DPrintf("[ AppendEntries-func-rf(%v) ] 重置了选举超时定时器\n", rf.me); - - // 不能无脑的从prevlogIndex开始阶段日志,因为rpc可能会延迟,导致发过来的log是很久之前的 - - // 那么就比较日志,日志有3种情况 - if (args->prevlogindex() > getLastLogIndex()) { - reply->set_success(false); - reply->set_term(m_currentTerm); - reply->set_updatenextindex(getLastLogIndex() + 1); - // DPrintf("[func-AppendEntries-rf{%v}] 拒绝了节点{%v},因为日志太新,args.PrevLogIndex{%v} > lastLogIndex{%v},返回值:{%v}\n", rf.me, args.LeaderId, args.PrevLogIndex, rf.getLastLogIndex(), reply) - return; - } else if (args->prevlogindex() < m_lastSnapshotIncludeIndex) { - // 如果prevlogIndex还没有更上快照 - reply->set_success(false); - reply->set_term(m_currentTerm); - reply->set_updatenextindex(m_lastSnapshotIncludeIndex + 1); // todo 如果想直接弄到最新好像不对,因为是从后慢慢往前匹配的,这里不匹配说明后面的都不匹配 - // DPrintf("[func-AppendEntries-rf{%v}] 拒绝了节点{%v},因为log太老,返回值:{%v}\n", rf.me, args.LeaderId, reply) return - } - // 本机日志有那么长,冲突(same index,different term),截断日志 - // 注意:这里目前当args.PrevLogIndex == rf.lastSnapshotIncludeIndex与不等的时候要分开考虑,可以看看能不能优化这块 - if (matchLog(args->prevlogindex(), args->prevlogterm())) { - // todo: 整理logs ,不能直接截断,必须一个一个检查,因为发送来的log可能是之前的,直接截断可能导致“取回”已经在follower日志中的条目 - // 那意思是不是可能会有一段发来的AE中的logs中前半是匹配的,后半是不匹配的,这种应该:1.follower如何处理? 2.如何给leader回复 - // 3. leader如何处理 - - for (int i = 0; i < args->entries_size(); i++) { - auto log = args->entries(i); - if (log.logindex() > getLastLogIndex()) { - //超过就直接添加日志 - m_logs.push_back(log); - } else { - //没超过就比较是否匹配,不匹配再更新,而不是直接截断 - // todo : 这里可以改进为比较对应logIndex位置的term是否相等,term相等就代表匹配 - // todo:这个地方放出来会出问题,按理说index相同,term相同,log也应该相同才对 - // rf.logs[entry.Index-firstIndex].Term ?= entry.Term - - if (m_logs[getSlicesIndexFromLogIndex(log.logindex())].logterm() == log.logterm() && - m_logs[getSlicesIndexFromLogIndex(log.logindex())].command() != log.command() - ) { - //相同位置的log ,其logTerm相等,但是命令却不相同,不符合raft的前向匹配,异常了! - myAssert(false, - format( - "[func-AppendEntries-rf{%d}] 两节点logIndex{%d}和term{%d}相同,但是其command{%d:%d} {%d:%d}却不同!!\n", + std::lock_guard locker(m_mtx); + reply->set_appstate(AppNormal); // 能接收到代表网络是正常的 + // Your code here (2A, 2B). + // 不同的人收到AppendEntries的反应是不同的,要注意无论什么时候收到rpc请求和响应都要检查term + + if (args->term() < m_currentTerm) { + reply->set_success(false); + reply->set_term(m_currentTerm); + reply->set_updatenextindex(-100); // 论文中:让领导人可以及时更新自己 + DPrintf("[func-AppendEntries-rf{%d}] 拒绝了 因为Leader{%d}的term{%v}< rf{%d}.term{%d}\n", m_me, args->leaderid(), + args->term(), m_me, m_currentTerm); + return; // 注意从过期的领导人收到消息不要重设定时器 + } + // Defer ec1([this]() -> void { this->persist(); }); + // //由于这个局部变量创建在锁之后,因此执行persist的时候应该也是拿到锁的. + DEFER { persist(); }; //由于这个局部变量创建在锁之后,因此执行persist的时候应该也是拿到锁的. + if (args->term() > m_currentTerm) { + // 三变 ,防止遗漏,无论什么时候都是三变 + // DPrintf("[func-AppendEntries-rf{%v} ] 变成follower且更新term 因为Leader{%v}的term{%v}> rf{%v}.term{%v}\n", rf.me, + // args.LeaderId, args.Term, rf.me, rf.currentTerm) + m_status = Follower; + m_currentTerm = args->term(); + m_votedFor = -1; // 这里设置成-1有意义,如果突然宕机然后上线理论上是可以投票的 + // 这里可不返回,应该改成让改节点尝试接收日志 + // 如果是领导人和candidate突然转到Follower好像也不用其他操作 + // 如果本来就是Follower,那么其term变化,相当于“不言自明”的换了追随的对象,因为原来的leader的term更小,是不会再接收其消息了 + } + myAssert(args->term() == m_currentTerm, format("assert {args.Term == rf.currentTerm} fail")); + // 如果发生网络分区,那么candidate可能会收到同一个term的leader的消息,要转变为Follower,为了和上面,因此直接写 + m_status = Follower; // 这里是有必要的,因为如果candidate收到同一个term的leader的AE,需要变成follower + // term相等 + m_lastResetElectionTime = now(); + // DPrintf("[ AppendEntries-func-rf(%v) ] 重置了选举超时定时器\n", rf.me); + + // 不能无脑的从prevlogIndex开始阶段日志,因为rpc可能会延迟,导致发过来的log是很久之前的 + + // 那么就比较日志,日志有3种情况 + if (args->prevlogindex() > getLastLogIndex()) { + reply->set_success(false); + reply->set_term(m_currentTerm); + reply->set_updatenextindex(getLastLogIndex() + 1); + // DPrintf("[func-AppendEntries-rf{%v}] 拒绝了节点{%v},因为日志太新,args.PrevLogIndex{%v} > + // lastLogIndex{%v},返回值:{%v}\n", rf.me, args.LeaderId, args.PrevLogIndex, rf.getLastLogIndex(), reply) + return; + } else if (args->prevlogindex() < m_lastSnapshotIncludeIndex) { + // 如果prevlogIndex还没有更上快照 + reply->set_success(false); + reply->set_term(m_currentTerm); + reply->set_updatenextindex( + m_lastSnapshotIncludeIndex + + 1); // todo 如果想直接弄到最新好像不对,因为是从后慢慢往前匹配的,这里不匹配说明后面的都不匹配 + // DPrintf("[func-AppendEntries-rf{%v}] 拒绝了节点{%v},因为log太老,返回值:{%v}\n", rf.me, args.LeaderId, reply) + // return + } + // 本机日志有那么长,冲突(same index,different term),截断日志 + // 注意:这里目前当args.PrevLogIndex == rf.lastSnapshotIncludeIndex与不等的时候要分开考虑,可以看看能不能优化这块 + if (matchLog(args->prevlogindex(), args->prevlogterm())) { + // todo: 整理logs + //,不能直接截断,必须一个一个检查,因为发送来的log可能是之前的,直接截断可能导致“取回”已经在follower日志中的条目 + // 那意思是不是可能会有一段发来的AE中的logs中前半是匹配的,后半是不匹配的,这种应该:1.follower如何处理? 2.如何给leader回复 + // 3. leader如何处理 + + for (int i = 0; i < args->entries_size(); i++) { + auto log = args->entries(i); + if (log.logindex() > getLastLogIndex()) { + //超过就直接添加日志 + m_logs.push_back(log); + } else { + //没超过就比较是否匹配,不匹配再更新,而不是直接截断 + // todo : 这里可以改进为比较对应logIndex位置的term是否相等,term相等就代表匹配 + // todo:这个地方放出来会出问题,按理说index相同,term相同,log也应该相同才对 + // rf.logs[entry.Index-firstIndex].Term ?= entry.Term + + if (m_logs[getSlicesIndexFromLogIndex(log.logindex())].logterm() == log.logterm() && + m_logs[getSlicesIndexFromLogIndex(log.logindex())].command() != log.command()) { + //相同位置的log ,其logTerm相等,但是命令却不相同,不符合raft的前向匹配,异常了! + myAssert(false, format("[func-AppendEntries-rf{%d}] 两节点logIndex{%d}和term{%d}相同,但是其command{%d:%d} " + " {%d:%d}却不同!!\n", m_me, log.logindex(), log.logterm(), m_me, m_logs[getSlicesIndexFromLogIndex(log.logindex())].command(), args->leaderid(), log.command())); - } - if (m_logs[getSlicesIndexFromLogIndex(log.logindex())].logterm() != log.logterm()) { - //不匹配就更新 - m_logs[getSlicesIndexFromLogIndex(log.logindex())] = log; - } - } } - - - // 错误写法like: rf.shrinkLogsToIndex(args.PrevLogIndex) - // rf.logs = append(rf.logs, args.Entries...) - // 因为可能会收到过期的log!!! 因此这里是大于等于 - myAssert(getLastLogIndex() >= args->prevlogindex() + args->entries_size(), - format( - "[func-AppendEntries1-rf{%d}]rf.getLastLogIndex(){%d} != args.PrevLogIndex{%d}+len(args.Entries){%d}", - m_me, getLastLogIndex(), args->prevlogindex(), args->entries_size())); - // if len(args.Entries) > 0 { - // fmt.Printf("[func-AppendEntries rf:{%v}] ] : args.term:%v, rf.term:%v ,rf.logs的长度:%v\n", rf.me, args.Term, rf.currentTerm, len(rf.logs)) - // } - if (args->leadercommit() > m_commitIndex) { - m_commitIndex = std::min(args->leadercommit(), getLastLogIndex()); - // 这个地方不能无脑跟上getLastLogIndex(),因为可能存在args->leadercommit()落后于 getLastLogIndex()的情况 + if (m_logs[getSlicesIndexFromLogIndex(log.logindex())].logterm() != log.logterm()) { + //不匹配就更新 + m_logs[getSlicesIndexFromLogIndex(log.logindex())] = log; } + } + } + // 错误写法like: rf.shrinkLogsToIndex(args.PrevLogIndex) + // rf.logs = append(rf.logs, args.Entries...) + // 因为可能会收到过期的log!!! 因此这里是大于等于 + myAssert( + getLastLogIndex() >= args->prevlogindex() + args->entries_size(), + format("[func-AppendEntries1-rf{%d}]rf.getLastLogIndex(){%d} != args.PrevLogIndex{%d}+len(args.Entries){%d}", + m_me, getLastLogIndex(), args->prevlogindex(), args->entries_size())); + // if len(args.Entries) > 0 { + // fmt.Printf("[func-AppendEntries rf:{%v}] ] : args.term:%v, rf.term:%v ,rf.logs的长度:%v\n", rf.me, args.Term, + //rf.currentTerm, len(rf.logs)) + // } + if (args->leadercommit() > m_commitIndex) { + m_commitIndex = std::min(args->leadercommit(), getLastLogIndex()); + // 这个地方不能无脑跟上getLastLogIndex(),因为可能存在args->leadercommit()落后于 getLastLogIndex()的情况 + } - // 领导会一次发送完所有的日志 - myAssert(getLastLogIndex() >= m_commitIndex, - format("[func-AppendEntries1-rf{%d}] rf.getLastLogIndex{%d} < rf.commitIndex{%d}", m_me, - getLastLogIndex(), m_commitIndex)); - reply->set_success(true); - reply->set_term(m_currentTerm); - - // DPrintf("[func-AppendEntries-rf{%v}] 接收了来自节点{%v}的log,当前lastLogIndex{%v},返回值:{%v}\n", rf.me, - // args.LeaderId, rf.getLastLogIndex(), reply) + // 领导会一次发送完所有的日志 + myAssert(getLastLogIndex() >= m_commitIndex, + format("[func-AppendEntries1-rf{%d}] rf.getLastLogIndex{%d} < rf.commitIndex{%d}", m_me, + getLastLogIndex(), m_commitIndex)); + reply->set_success(true); + reply->set_term(m_currentTerm); - return; - } else { - // 优化 - // PrevLogIndex 长度合适,但是不匹配,因此往前寻找 矛盾的term的第一个元素 - // 为什么该term的日志都是矛盾的呢?也不一定都是矛盾的,只是这么优化减少rpc而已 - // ?什么时候term会矛盾呢?很多情况,比如leader接收了日志之后马上就崩溃等等 - reply->set_updatenextindex(args->prevlogindex()); - - for (int index = args->prevlogindex(); index >= m_lastSnapshotIncludeIndex; --index) { - if (getLogTermFromLogIndex(index) != getLogTermFromLogIndex(args->prevlogindex())) { - reply->set_updatenextindex(index + 1); - break; - } - } - reply->set_success(false); - reply->set_term(m_currentTerm); - // 对UpdateNextIndex待优化 todo 找到符合的term的最后一个 - // DPrintf("[func-AppendEntries-rf{%v}] 拒绝了节点{%v},因为prevLodIndex{%v}的args.term{%v}不匹配当前节点的logterm{%v},返回值:{%v}\n", - // rf.me, args.LeaderId, args.PrevLogIndex, args.PrevLogTerm, - // rf.logs[rf.getSlicesIndexFromLogIndex(args.PrevLogIndex)].LogTerm, reply) - // DPrintf("[func-AppendEntries-rf{%v}] 返回值: reply.UpdateNextIndex从{%v}优化到{%v},优化了{%v}\n", rf.me, - // args.PrevLogIndex, reply.UpdateNextIndex, args.PrevLogIndex - reply.UpdateNextIndex) // 很多都是优化了0 - return; + // DPrintf("[func-AppendEntries-rf{%v}] 接收了来自节点{%v}的log,当前lastLogIndex{%v},返回值:{%v}\n", + // rf.me, + // args.LeaderId, rf.getLastLogIndex(), reply) + + return; + } else { + // 优化 + // PrevLogIndex 长度合适,但是不匹配,因此往前寻找 矛盾的term的第一个元素 + // 为什么该term的日志都是矛盾的呢?也不一定都是矛盾的,只是这么优化减少rpc而已 + // ?什么时候term会矛盾呢?很多情况,比如leader接收了日志之后马上就崩溃等等 + reply->set_updatenextindex(args->prevlogindex()); + + for (int index = args->prevlogindex(); index >= m_lastSnapshotIncludeIndex; --index) { + if (getLogTermFromLogIndex(index) != getLogTermFromLogIndex(args->prevlogindex())) { + reply->set_updatenextindex(index + 1); + break; + } } - - // fmt.Printf("[func-AppendEntries,rf{%v}]:len(rf.logs):%v, rf.commitIndex:%v\n", rf.me, len(rf.logs), rf.commitIndex) + reply->set_success(false); + reply->set_term(m_currentTerm); + // 对UpdateNextIndex待优化 todo 找到符合的term的最后一个 + // DPrintf("[func-AppendEntries-rf{%v}] + // 拒绝了节点{%v},因为prevLodIndex{%v}的args.term{%v}不匹配当前节点的logterm{%v},返回值:{%v}\n", + // rf.me, args.LeaderId, args.PrevLogIndex, args.PrevLogTerm, + // rf.logs[rf.getSlicesIndexFromLogIndex(args.PrevLogIndex)].LogTerm, reply) + // DPrintf("[func-AppendEntries-rf{%v}] 返回值: reply.UpdateNextIndex从{%v}优化到{%v},优化了{%v}\n", rf.me, + // args.PrevLogIndex, reply.UpdateNextIndex, args.PrevLogIndex - reply.UpdateNextIndex) // + // 很多都是优化了0 + return; + } + + // fmt.Printf("[func-AppendEntries,rf{%v}]:len(rf.logs):%v, rf.commitIndex:%v\n", rf.me, len(rf.logs), rf.commitIndex) } void Raft::applierTicker() { - while (true) { - m_mtx.lock(); - if (m_status == Leader) { - DPrintf("[Raft::applierTicker() - raft{%d}] m_lastApplied{%d} m_commitIndex{%d}", m_me, m_lastApplied, - m_commitIndex); - } - auto applyMsgs = getApplyLogs(); - m_mtx.unlock(); - //使用匿名函数是因为传递管道的时候不用拿锁 todo:好像必须拿锁,因为不拿锁的话如果调用多次applyLog函数,可能会导致应用的顺序不一样 - if (!applyMsgs.empty()) { - DPrintf("[func- Raft::applierTicker()-raft{%d}] 向kvserver報告的applyMsgs長度爲:{%d}", m_me, - applyMsgs.size()); - } - for (auto &message: applyMsgs) { - applyChan->Push(message); - } - sleepNMilliseconds(ApplyInterval); + while (true) { + m_mtx.lock(); + if (m_status == Leader) { + DPrintf("[Raft::applierTicker() - raft{%d}] m_lastApplied{%d} m_commitIndex{%d}", m_me, m_lastApplied, + m_commitIndex); + } + auto applyMsgs = getApplyLogs(); + m_mtx.unlock(); + //使用匿名函数是因为传递管道的时候不用拿锁 + //todo:好像必须拿锁,因为不拿锁的话如果调用多次applyLog函数,可能会导致应用的顺序不一样 + if (!applyMsgs.empty()) { + DPrintf("[func- Raft::applierTicker()-raft{%d}] 向kvserver報告的applyMsgs長度爲:{%d}", m_me, applyMsgs.size()); + } + for (auto &message : applyMsgs) { + applyChan->Push(message); } + sleepNMilliseconds(ApplyInterval); + } } bool Raft::CondInstallSnapshot(int lastIncludedTerm, int lastIncludedIndex, std::string snapshot) { - return true; - //// Your code here (2D). - //rf.mu.Lock() - //defer rf.mu.Unlock() - //DPrintf("{Node %v} service calls CondInstallSnapshot with lastIncludedTerm %v and lastIncludedIndex {%v} to check whether snapshot is still valid in term %v", rf.me, lastIncludedTerm, lastIncludedIndex, rf.currentTerm) - //// outdated snapshot - //if lastIncludedIndex <= rf.commitIndex { - // return false - //} - // - //lastLogIndex, _ := rf.getLastLogIndexAndTerm() - //if lastIncludedIndex > lastLogIndex { - // rf.logs = make([]LogEntry, 0) - //} else { - // rf.logs = rf.logs[rf.getSlicesIndexFromLogIndex(lastIncludedIndex)+1:] - //} - //// update dummy entry with lastIncludedTerm and lastIncludedIndex - //rf.lastApplied, rf.commitIndex = lastIncludedIndex, lastIncludedIndex - // - //rf.persister.Save(rf.persistData(), snapshot) - //return true + return true; + //// Your code here (2D). + // rf.mu.Lock() + // defer rf.mu.Unlock() + // DPrintf("{Node %v} service calls CondInstallSnapshot with lastIncludedTerm %v and lastIncludedIndex {%v} to check + // whether snapshot is still valid in term %v", rf.me, lastIncludedTerm, lastIncludedIndex, rf.currentTerm) + //// outdated snapshot + // if lastIncludedIndex <= rf.commitIndex { + // return false + // } + // + // lastLogIndex, _ := rf.getLastLogIndexAndTerm() + // if lastIncludedIndex > lastLogIndex { + // rf.logs = make([]LogEntry, 0) + // } else { + // rf.logs = rf.logs[rf.getSlicesIndexFromLogIndex(lastIncludedIndex)+1:] + // } + //// update dummy entry with lastIncludedTerm and lastIncludedIndex + // rf.lastApplied, rf.commitIndex = lastIncludedIndex, lastIncludedIndex + // + // rf.persister.Save(rf.persistData(), snapshot) + // return true } void Raft::doElection() { - std::lock_guard g(m_mtx); - - if (m_status == Leader) { - //fmt.Printf("[ ticker-func-rf(%v) ] is a Leader,wait the lock\n", rf.me) - } - //fmt.Printf("[ ticker-func-rf(%v) ] get the lock\n", rf.me) - - if (m_status != Leader) { - DPrintf("[ ticker-func-rf(%d) ] 选举定时器到期且不是leader,开始选举 \n", m_me); - //当选举的时候定时器超时就必须重新选举,不然没有选票就会一直卡主 - //重竞选超时,term也会增加的 - m_status = Candidate; - ///开始新一轮的选举 - m_currentTerm += 1; - m_votedFor = m_me; //即是自己给自己投,也避免candidate给同辈的candidate投 - persist(); - std::shared_ptr votedNum = std::make_shared(1); // 使用 make_shared 函数初始化 !! 亮点 - // 重新设置定时器 - m_lastResetElectionTime = now(); - // 发布RequestVote RPC - for (int i = 0; i < m_peers.size(); i++) { - if (i == m_me) { - continue; - } - int lastLogIndex = -1, lastLogTerm = -1; - getLastLogIndexAndTerm(&lastLogIndex, &lastLogTerm); //获取最后一个log的term和下标 - - std::shared_ptr requestVoteArgs = std::make_shared< - raftRpcProctoc::RequestVoteArgs>(); - requestVoteArgs->set_term(m_currentTerm); - requestVoteArgs->set_candidateid(m_me); - requestVoteArgs->set_lastlogindex(lastLogIndex); - requestVoteArgs->set_lastlogterm(lastLogTerm); - std::shared_ptr requestVoteReply = std::make_shared< - raftRpcProctoc::RequestVoteReply>(); - - //使用匿名函数执行避免其拿到锁 - - std::thread t(&Raft::sendRequestVote, this, i, requestVoteArgs, requestVoteReply, - votedNum); // 创建新线程并执行b函数,并传递参数 - t.detach(); - } + std::lock_guard g(m_mtx); + + if (m_status == Leader) { + // fmt.Printf("[ ticker-func-rf(%v) ] is a Leader,wait the lock\n", rf.me) + } + // fmt.Printf("[ ticker-func-rf(%v) ] get the lock\n", rf.me) + + if (m_status != Leader) { + DPrintf("[ ticker-func-rf(%d) ] 选举定时器到期且不是leader,开始选举 \n", m_me); + //当选举的时候定时器超时就必须重新选举,不然没有选票就会一直卡主 + //重竞选超时,term也会增加的 + m_status = Candidate; + ///开始新一轮的选举 + m_currentTerm += 1; + m_votedFor = m_me; //即是自己给自己投,也避免candidate给同辈的candidate投 + persist(); + std::shared_ptr votedNum = std::make_shared(1); // 使用 make_shared 函数初始化 !! 亮点 + // 重新设置定时器 + m_lastResetElectionTime = now(); + // 发布RequestVote RPC + for (int i = 0; i < m_peers.size(); i++) { + if (i == m_me) { + continue; + } + int lastLogIndex = -1, lastLogTerm = -1; + getLastLogIndexAndTerm(&lastLogIndex, &lastLogTerm); //获取最后一个log的term和下标 + + std::shared_ptr requestVoteArgs = + std::make_shared(); + requestVoteArgs->set_term(m_currentTerm); + requestVoteArgs->set_candidateid(m_me); + requestVoteArgs->set_lastlogindex(lastLogIndex); + requestVoteArgs->set_lastlogterm(lastLogTerm); + std::shared_ptr requestVoteReply = + std::make_shared(); + + //使用匿名函数执行避免其拿到锁 + + std::thread t(&Raft::sendRequestVote, this, i, requestVoteArgs, requestVoteReply, + votedNum); // 创建新线程并执行b函数,并传递参数 + t.detach(); } + } } void Raft::doHeartBeat() { - std::lock_guard g(m_mtx); + std::lock_guard g(m_mtx); - if (m_status == Leader) { - DPrintf("[func-Raft::doHeartBeat()-Leader: {%d}] Leader的心跳定时器触发了\n", m_me); - auto appendNums = std::make_shared(1); //正确返回的节点的数量 - - //对Follower(除了自己外的所有节点发送AE) - //todo 这里肯定是要修改的,最好使用一个单独的goruntime来负责管理发送log,因为后面的log发送涉及优化之类的 - //最少要单独写一个函数来管理,而不是在这一坨 - for (int i = 0; i < m_peers.size(); i++) { - if (i == m_me) { - continue; - } - DPrintf("[func-Raft::doHeartBeat()-Leader: {%d}] Leader的心跳定时器触发了 index:{%d}\n", m_me, i); - myAssert(m_nextIndex[i] >= 1, format("rf.nextIndex[%d] = {%d}", i, m_nextIndex[i])); - //日志压缩加入后要判断是发送快照还是发送AE - if (m_nextIndex[i] <= m_lastSnapshotIncludeIndex) { - // DPrintf("[func-ticker()-rf{%v}]rf.nextIndex[%v] {%v} <= rf.lastSnapshotIncludeIndex{%v},so leaderSendSnapShot", rf.me, i, rf.nextIndex[i], rf.lastSnapshotIncludeIndex) - std::thread t(&Raft::leaderSendSnapShot, this, i); // 创建新线程并执行b函数,并传递参数 - t.detach(); - continue; - } - //构造发送值 - int preLogIndex = -1; - int PrevLogTerm = -1; - getPrevLogInfo(i, &preLogIndex, &PrevLogTerm); - std::shared_ptr appendEntriesArgs = std::make_shared< - raftRpcProctoc::AppendEntriesArgs>(); - appendEntriesArgs->set_term(m_currentTerm); - appendEntriesArgs->set_leaderid(m_me); - appendEntriesArgs->set_prevlogindex(preLogIndex); - appendEntriesArgs->set_prevlogterm(PrevLogTerm); - appendEntriesArgs->clear_entries(); - appendEntriesArgs->set_leadercommit(m_commitIndex); - if (preLogIndex != m_lastSnapshotIncludeIndex) { - for (int j = getSlicesIndexFromLogIndex(preLogIndex) + 1; j < m_logs.size(); ++j) { - raftRpcProctoc::LogEntry *sendEntryPtr = appendEntriesArgs->add_entries(); - *sendEntryPtr = m_logs[j]; //=是可以点进去的,可以点进去看下protobuf如何重写这个的 - } - } else { - for (const auto &item: m_logs) { - raftRpcProctoc::LogEntry *sendEntryPtr = appendEntriesArgs->add_entries(); - *sendEntryPtr = item; //=是可以点进去的,可以点进去看下protobuf如何重写这个的 - } - } - int lastLogIndex = getLastLogIndex(); - //leader对每个节点发送的日志长短不一,但是都保证从prevIndex发送直到最后 - myAssert(appendEntriesArgs->prevlogindex() + appendEntriesArgs->entries_size() == lastLogIndex, - format("appendEntriesArgs.PrevLogIndex{%d}+len(appendEntriesArgs.Entries){%d} != lastLogIndex{%d}", - appendEntriesArgs->prevlogindex(), appendEntriesArgs->entries_size(), lastLogIndex)); - //构造返回值 - const std::shared_ptr appendEntriesReply = std::make_shared< - raftRpcProctoc::AppendEntriesReply>(); - appendEntriesReply->set_appstate(Disconnected); - - std::thread t(&Raft::sendAppendEntries, this, i, appendEntriesArgs, appendEntriesReply, - appendNums); // 创建新线程并执行b函数,并传递参数 - t.detach(); + if (m_status == Leader) { + DPrintf("[func-Raft::doHeartBeat()-Leader: {%d}] Leader的心跳定时器触发了\n", m_me); + auto appendNums = std::make_shared(1); //正确返回的节点的数量 + + //对Follower(除了自己外的所有节点发送AE) + // todo 这里肯定是要修改的,最好使用一个单独的goruntime来负责管理发送log,因为后面的log发送涉及优化之类的 + //最少要单独写一个函数来管理,而不是在这一坨 + for (int i = 0; i < m_peers.size(); i++) { + if (i == m_me) { + continue; + } + DPrintf("[func-Raft::doHeartBeat()-Leader: {%d}] Leader的心跳定时器触发了 index:{%d}\n", m_me, i); + myAssert(m_nextIndex[i] >= 1, format("rf.nextIndex[%d] = {%d}", i, m_nextIndex[i])); + //日志压缩加入后要判断是发送快照还是发送AE + if (m_nextIndex[i] <= m_lastSnapshotIncludeIndex) { + // DPrintf("[func-ticker()-rf{%v}]rf.nextIndex[%v] {%v} <= + // rf.lastSnapshotIncludeIndex{%v},so leaderSendSnapShot", rf.me, i, rf.nextIndex[i], + // rf.lastSnapshotIncludeIndex) + std::thread t(&Raft::leaderSendSnapShot, this, i); // 创建新线程并执行b函数,并传递参数 + t.detach(); + continue; + } + //构造发送值 + int preLogIndex = -1; + int PrevLogTerm = -1; + getPrevLogInfo(i, &preLogIndex, &PrevLogTerm); + std::shared_ptr appendEntriesArgs = + std::make_shared(); + appendEntriesArgs->set_term(m_currentTerm); + appendEntriesArgs->set_leaderid(m_me); + appendEntriesArgs->set_prevlogindex(preLogIndex); + appendEntriesArgs->set_prevlogterm(PrevLogTerm); + appendEntriesArgs->clear_entries(); + appendEntriesArgs->set_leadercommit(m_commitIndex); + if (preLogIndex != m_lastSnapshotIncludeIndex) { + for (int j = getSlicesIndexFromLogIndex(preLogIndex) + 1; j < m_logs.size(); ++j) { + raftRpcProctoc::LogEntry *sendEntryPtr = appendEntriesArgs->add_entries(); + *sendEntryPtr = m_logs[j]; //=是可以点进去的,可以点进去看下protobuf如何重写这个的 + } + } else { + for (const auto &item : m_logs) { + raftRpcProctoc::LogEntry *sendEntryPtr = appendEntriesArgs->add_entries(); + *sendEntryPtr = item; //=是可以点进去的,可以点进去看下protobuf如何重写这个的 } - m_lastResetHearBeatTime = now(); //leader发送心跳,就不是随机时间了 + } + int lastLogIndex = getLastLogIndex(); + // leader对每个节点发送的日志长短不一,但是都保证从prevIndex发送直到最后 + myAssert(appendEntriesArgs->prevlogindex() + appendEntriesArgs->entries_size() == lastLogIndex, + format("appendEntriesArgs.PrevLogIndex{%d}+len(appendEntriesArgs.Entries){%d} != lastLogIndex{%d}", + appendEntriesArgs->prevlogindex(), appendEntriesArgs->entries_size(), lastLogIndex)); + //构造返回值 + const std::shared_ptr appendEntriesReply = + std::make_shared(); + appendEntriesReply->set_appstate(Disconnected); + + std::thread t(&Raft::sendAppendEntries, this, i, appendEntriesArgs, appendEntriesReply, + appendNums); // 创建新线程并执行b函数,并传递参数 + t.detach(); } + m_lastResetHearBeatTime = now(); // leader发送心跳,就不是随机时间了 + } } void Raft::electionTimeOutTicker() { - // Your code here (2A) - // Check if a Leader election should be started. - while (true) { - m_mtx.lock(); - auto nowTime = now(); - auto suitableSleepTime = getRandomizedElectionTimeout() + m_lastResetElectionTime - nowTime; - m_mtx.unlock(); - if (suitableSleepTime.count() > 1) { - std::this_thread::sleep_for(suitableSleepTime); - } + // Your code here (2A) + // Check if a Leader election should be started. + while (true) { + m_mtx.lock(); + auto nowTime = now(); + auto suitableSleepTime = getRandomizedElectionTimeout() + m_lastResetElectionTime - nowTime; + m_mtx.unlock(); + if (suitableSleepTime.count() > 1) { + std::this_thread::sleep_for(suitableSleepTime); + } - if ((m_lastResetElectionTime - nowTime).count() > 0) { - //说明睡眠的这段时间有重置定时器,那么就没有超时,再次睡眠 - continue; - } - doElection(); + if ((m_lastResetElectionTime - nowTime).count() > 0) { + //说明睡眠的这段时间有重置定时器,那么就没有超时,再次睡眠 + continue; } + doElection(); + } } std::vector Raft::getApplyLogs() { - std::vector applyMsgs; - myAssert(m_commitIndex <= getLastLogIndex(), - format("[func-getApplyLogs-rf{%d}] commitIndex{%d} >getLastLogIndex{%d}", m_me, m_commitIndex, - getLastLogIndex())); - - while (m_lastApplied < m_commitIndex) { - m_lastApplied++; - myAssert(m_logs[getSlicesIndexFromLogIndex(m_lastApplied)].logindex() == m_lastApplied, - format("rf.logs[rf.getSlicesIndexFromLogIndex(rf.lastApplied)].LogIndex{%d} != rf.lastApplied{%d} ", - m_logs[getSlicesIndexFromLogIndex(m_lastApplied)].logindex(), m_lastApplied)); - ApplyMsg applyMsg; - applyMsg.CommandValid = true; - applyMsg.SnapshotValid = false; - applyMsg.Command = m_logs[getSlicesIndexFromLogIndex(m_lastApplied)].command(); - applyMsg.CommandIndex = m_lastApplied; - applyMsgs.emplace_back(applyMsg); - // DPrintf("[ applyLog func-rf{%v} ] apply Log,logIndex:%v ,logTerm:{%v},command:{%v}\n", rf.me, rf.lastApplied, rf.logs[rf.getSlicesIndexFromLogIndex(rf.lastApplied)].LogTerm, rf.logs[rf.getSlicesIndexFromLogIndex(rf.lastApplied)].Command) - } - return applyMsgs; + std::vector applyMsgs; + myAssert(m_commitIndex <= getLastLogIndex(), format("[func-getApplyLogs-rf{%d}] commitIndex{%d} >getLastLogIndex{%d}", + m_me, m_commitIndex, getLastLogIndex())); + + while (m_lastApplied < m_commitIndex) { + m_lastApplied++; + myAssert(m_logs[getSlicesIndexFromLogIndex(m_lastApplied)].logindex() == m_lastApplied, + format("rf.logs[rf.getSlicesIndexFromLogIndex(rf.lastApplied)].LogIndex{%d} != rf.lastApplied{%d} ", + m_logs[getSlicesIndexFromLogIndex(m_lastApplied)].logindex(), m_lastApplied)); + ApplyMsg applyMsg; + applyMsg.CommandValid = true; + applyMsg.SnapshotValid = false; + applyMsg.Command = m_logs[getSlicesIndexFromLogIndex(m_lastApplied)].command(); + applyMsg.CommandIndex = m_lastApplied; + applyMsgs.emplace_back(applyMsg); + // DPrintf("[ applyLog func-rf{%v} ] apply Log,logIndex:%v ,logTerm:{%v},command:{%v}\n", rf.me, + // rf.lastApplied, rf.logs[rf.getSlicesIndexFromLogIndex(rf.lastApplied)].LogTerm, + // rf.logs[rf.getSlicesIndexFromLogIndex(rf.lastApplied)].Command) + } + return applyMsgs; } // 获取新命令应该分配的Index int Raft::getNewCommandIndex() { - // 如果len(logs)==0,就为快照的index+1,否则为log最后一个日志+1 - auto lastLogIndex = getLastLogIndex(); - return lastLogIndex + 1; + // 如果len(logs)==0,就为快照的index+1,否则为log最后一个日志+1 + auto lastLogIndex = getLastLogIndex(); + return lastLogIndex + 1; } // getPrevLogInfo // leader调用,传入:服务器index,传出:发送的AE的preLogIndex和PrevLogTerm void Raft::getPrevLogInfo(int server, int *preIndex, int *preTerm) { - //logs长度为0返回0,0,不是0就根据nextIndex数组的数值返回 - if (m_nextIndex[server] == - m_lastSnapshotIncludeIndex + 1) { - //要发送的日志是第一个日志,因此直接返回m_lastSnapshotIncludeIndex和m_lastSnapshotIncludeTerm - *preIndex = m_lastSnapshotIncludeIndex; - *preTerm = m_lastSnapshotIncludeTerm; - return; - } - auto nextIndex = m_nextIndex[server]; - *preIndex = nextIndex - 1; - *preTerm = m_logs[getSlicesIndexFromLogIndex(*preIndex)].logterm(); + // logs长度为0返回0,0,不是0就根据nextIndex数组的数值返回 + if (m_nextIndex[server] == m_lastSnapshotIncludeIndex + 1) { + //要发送的日志是第一个日志,因此直接返回m_lastSnapshotIncludeIndex和m_lastSnapshotIncludeTerm + *preIndex = m_lastSnapshotIncludeIndex; + *preTerm = m_lastSnapshotIncludeTerm; + return; + } + auto nextIndex = m_nextIndex[server]; + *preIndex = nextIndex - 1; + *preTerm = m_logs[getSlicesIndexFromLogIndex(*preIndex)].logterm(); } // GetState return currentTerm and whether this server // believes it is the Leader. void Raft::GetState(int *term, bool *isLeader) { - m_mtx.lock(); - DEFER { - //todo 暂时不清楚会不会导致死锁 - m_mtx.unlock(); - }; - - // Your code here (2A). - *term = m_currentTerm; - *isLeader = (m_status == Leader); + m_mtx.lock(); + DEFER { + // todo 暂时不清楚会不会导致死锁 + m_mtx.unlock(); + }; + + // Your code here (2A). + *term = m_currentTerm; + *isLeader = (m_status == Leader); } void Raft::InstallSnapshot(const raftRpcProctoc::InstallSnapshotRequest *args, raftRpcProctoc::InstallSnapshotResponse *reply) { - m_mtx.lock(); - DEFER { - m_mtx.unlock(); - }; - if (args->term() < m_currentTerm) { - reply->set_term(m_currentTerm); - // DPrintf("[func-InstallSnapshot-rf{%v}] leader{%v}.term{%v}term() > m_currentTerm) { - //后面两种情况都要接收日志 - m_currentTerm = args->term(); - m_votedFor = -1; - m_status = Follower; - persist(); - } - m_status = Follower; - m_lastResetElectionTime = now(); - // outdated snapshot - if (args->lastsnapshotincludeindex() <= m_lastSnapshotIncludeIndex) { - // DPrintf("[func-InstallSnapshot-rf{%v}] leader{%v}.LastSnapShotIncludeIndex{%v} <= rf{%v}.lastSnapshotIncludeIndex{%v} ", rf.me, args.LeaderId, args.LastSnapShotIncludeIndex, rf.me, rf.lastSnapshotIncludeIndex) - return; - } - //截断日志,修改commitIndex和lastApplied - //截断日志包括:日志长了,截断一部分,日志短了,全部清空,其实两个是一种情况 - //但是由于现在getSlicesIndexFromLogIndex的实现,不能传入不存在logIndex,否则会panic - auto lastLogIndex = getLastLogIndex(); - - if (lastLogIndex > args->lastsnapshotincludeindex()) { - m_logs.erase(m_logs.begin(), m_logs.begin() + getSlicesIndexFromLogIndex(args->lastsnapshotincludeindex()) + 1); - } else { - m_logs.clear(); - } - m_commitIndex = std::max(m_commitIndex, args->lastsnapshotincludeindex()); - m_lastApplied = std::max(m_lastApplied, args->lastsnapshotincludeindex()); - m_lastSnapshotIncludeIndex = args->lastsnapshotincludeindex(); - m_lastSnapshotIncludeTerm = args->lastsnapshotincludeterm(); - + m_mtx.lock(); + DEFER { m_mtx.unlock(); }; + if (args->term() < m_currentTerm) { reply->set_term(m_currentTerm); - ApplyMsg msg; - msg.SnapshotValid = true; - msg.Snapshot = args->data(); - msg.SnapshotTerm = args->lastsnapshotincludeterm(); - msg.SnapshotIndex = args->lastsnapshotincludeindex(); - - applyChan->Push(msg); - std::thread t(&Raft::pushMsgToKvServer, this, msg); // 创建新线程并执行b函数,并传递参数 - t.detach(); - //看下这里能不能再优化 - // DPrintf("[func-InstallSnapshot-rf{%v}] receive snapshot from {%v} ,LastSnapShotIncludeIndex ={%v} ", rf.me, args.LeaderId, args.LastSnapShotIncludeIndex) - //持久化 - m_persister->Save(persistData(), args->data()); -} - -void Raft::pushMsgToKvServer(ApplyMsg msg) { - applyChan->Push(msg); + // DPrintf("[func-InstallSnapshot-rf{%v}] leader{%v}.term{%v}term() > m_currentTerm) { + //后面两种情况都要接收日志 + m_currentTerm = args->term(); + m_votedFor = -1; + m_status = Follower; + persist(); + } + m_status = Follower; + m_lastResetElectionTime = now(); + // outdated snapshot + if (args->lastsnapshotincludeindex() <= m_lastSnapshotIncludeIndex) { + // DPrintf("[func-InstallSnapshot-rf{%v}] leader{%v}.LastSnapShotIncludeIndex{%v} <= + // rf{%v}.lastSnapshotIncludeIndex{%v} ", rf.me, args.LeaderId, args.LastSnapShotIncludeIndex, rf.me, + // rf.lastSnapshotIncludeIndex) + return; + } + //截断日志,修改commitIndex和lastApplied + //截断日志包括:日志长了,截断一部分,日志短了,全部清空,其实两个是一种情况 + //但是由于现在getSlicesIndexFromLogIndex的实现,不能传入不存在logIndex,否则会panic + auto lastLogIndex = getLastLogIndex(); + + if (lastLogIndex > args->lastsnapshotincludeindex()) { + m_logs.erase(m_logs.begin(), m_logs.begin() + getSlicesIndexFromLogIndex(args->lastsnapshotincludeindex()) + 1); + } else { + m_logs.clear(); + } + m_commitIndex = std::max(m_commitIndex, args->lastsnapshotincludeindex()); + m_lastApplied = std::max(m_lastApplied, args->lastsnapshotincludeindex()); + m_lastSnapshotIncludeIndex = args->lastsnapshotincludeindex(); + m_lastSnapshotIncludeTerm = args->lastsnapshotincludeterm(); + + reply->set_term(m_currentTerm); + ApplyMsg msg; + msg.SnapshotValid = true; + msg.Snapshot = args->data(); + msg.SnapshotTerm = args->lastsnapshotincludeterm(); + msg.SnapshotIndex = args->lastsnapshotincludeindex(); + + applyChan->Push(msg); + std::thread t(&Raft::pushMsgToKvServer, this, msg); // 创建新线程并执行b函数,并传递参数 + t.detach(); + //看下这里能不能再优化 + // DPrintf("[func-InstallSnapshot-rf{%v}] receive snapshot from {%v} ,LastSnapShotIncludeIndex ={%v} ", rf.me, + // args.LeaderId, args.LastSnapShotIncludeIndex) + //持久化 + m_persister->Save(persistData(), args->data()); } +void Raft::pushMsgToKvServer(ApplyMsg msg) { applyChan->Push(msg); } void Raft::leaderHearBeatTicker() { - while (true) { - // Your code here (2A) - auto nowTime = now(); - m_mtx.lock(); - - auto suitableSleepTime = std::chrono::milliseconds(HeartBeatTimeout) + m_lastResetHearBeatTime - nowTime; - m_mtx.unlock(); - if (suitableSleepTime.count() < 1) { - suitableSleepTime = std::chrono::milliseconds(1); - } - std::this_thread::sleep_for(suitableSleepTime); - if ((m_lastResetHearBeatTime - nowTime).count() > 0) { - //说明睡眠的这段时间有重置定时器,那么就没有超时,再次睡眠 - continue; - } - doHeartBeat(); - } -} - -void Raft::leaderSendSnapShot(int server) { + while (true) { + // Your code here (2A) + auto nowTime = now(); m_mtx.lock(); - raftRpcProctoc::InstallSnapshotRequest args; - args.set_leaderid(m_me); - args.set_term(m_currentTerm); - args.set_lastsnapshotincludeindex(m_lastSnapshotIncludeIndex); - args.set_lastsnapshotincludeterm(m_lastSnapshotIncludeTerm); - args.set_data(m_persister->ReadSnapshot()); - - raftRpcProctoc::InstallSnapshotResponse reply; + + auto suitableSleepTime = std::chrono::milliseconds(HeartBeatTimeout) + m_lastResetHearBeatTime - nowTime; m_mtx.unlock(); - bool ok = m_peers[server]->InstallSnapshot(&args, &reply); - m_mtx.lock(); - DEFER { - m_mtx.unlock(); - }; - if (!ok) { return; } - if (m_status != Leader || m_currentTerm != args.term()) { - return; //中间释放过锁,可能状态已经改变了 + if (suitableSleepTime.count() < 1) { + suitableSleepTime = std::chrono::milliseconds(1); } - // 无论什么时候都要判断term - if (reply.term() > m_currentTerm) { - //三变 - m_currentTerm = reply.term(); - m_votedFor = -1; - m_status = Follower; - persist(); - m_lastResetElectionTime = now(); - return; + std::this_thread::sleep_for(suitableSleepTime); + if ((m_lastResetHearBeatTime - nowTime).count() > 0) { + //说明睡眠的这段时间有重置定时器,那么就没有超时,再次睡眠 + continue; } - m_matchIndex[server] = args.lastsnapshotincludeindex(); - m_nextIndex[server] = m_matchIndex[server] + 1; + doHeartBeat(); + } +} + +void Raft::leaderSendSnapShot(int server) { + m_mtx.lock(); + raftRpcProctoc::InstallSnapshotRequest args; + args.set_leaderid(m_me); + args.set_term(m_currentTerm); + args.set_lastsnapshotincludeindex(m_lastSnapshotIncludeIndex); + args.set_lastsnapshotincludeterm(m_lastSnapshotIncludeTerm); + args.set_data(m_persister->ReadSnapshot()); + + raftRpcProctoc::InstallSnapshotResponse reply; + m_mtx.unlock(); + bool ok = m_peers[server]->InstallSnapshot(&args, &reply); + m_mtx.lock(); + DEFER { m_mtx.unlock(); }; + if (!ok) { + return; + } + if (m_status != Leader || m_currentTerm != args.term()) { + return; //中间释放过锁,可能状态已经改变了 + } + // 无论什么时候都要判断term + if (reply.term() > m_currentTerm) { + //三变 + m_currentTerm = reply.term(); + m_votedFor = -1; + m_status = Follower; + persist(); + m_lastResetElectionTime = now(); + return; + } + m_matchIndex[server] = args.lastsnapshotincludeindex(); + m_nextIndex[server] = m_matchIndex[server] + 1; } void Raft::leaderUpdateCommitIndex() { - m_commitIndex = m_lastSnapshotIncludeIndex; - //for index := rf.commitIndex+1;index < len(rf.log);index++ { - //for index := rf.getLastIndex();index>=rf.commitIndex+1;index--{ - for (int index = getLastLogIndex(); index >= m_lastSnapshotIncludeIndex + 1; index--) { - int sum = 0; - for (int i = 0; i < m_peers.size(); i++) { - if (i == m_me) { - sum += 1; - continue; - } - if (m_matchIndex[i] >= index) { - sum += 1; - } - } + m_commitIndex = m_lastSnapshotIncludeIndex; + // for index := rf.commitIndex+1;index < len(rf.log);index++ { + // for index := rf.getLastIndex();index>=rf.commitIndex+1;index--{ + for (int index = getLastLogIndex(); index >= m_lastSnapshotIncludeIndex + 1; index--) { + int sum = 0; + for (int i = 0; i < m_peers.size(); i++) { + if (i == m_me) { + sum += 1; + continue; + } + if (m_matchIndex[i] >= index) { + sum += 1; + } + } - // !!!只有当前term有新提交的,才会更新commitIndex!!!! - //log.Printf("lastSSP:%d, index: %d, commitIndex: %d, lastIndex: %d",rf.lastSSPointIndex, index, rf.commitIndex, rf.getLastIndex()) - if (sum >= m_peers.size() / 2 + 1 && getLogTermFromLogIndex(index) == m_currentTerm) { - m_commitIndex = index; - break; - } + // !!!只有当前term有新提交的,才会更新commitIndex!!!! + // log.Printf("lastSSP:%d, index: %d, commitIndex: %d, lastIndex: %d",rf.lastSSPointIndex, index, rf.commitIndex, + // rf.getLastIndex()) + if (sum >= m_peers.size() / 2 + 1 && getLogTermFromLogIndex(index) == m_currentTerm) { + m_commitIndex = index; + break; } - // DPrintf("[func-leaderUpdateCommitIndex()-rf{%v}] Leader %d(term%d) commitIndex %d",rf.me,rf.me,rf.currentTerm,rf.commitIndex) + } + // DPrintf("[func-leaderUpdateCommitIndex()-rf{%v}] Leader %d(term%d) commitIndex + // %d",rf.me,rf.me,rf.currentTerm,rf.commitIndex) } //进来前要保证logIndex是存在的,即≥rf.lastSnapshotIncludeIndex ,而且小于等于rf.getLastLogIndex() bool Raft::matchLog(int logIndex, int logTerm) { - myAssert(logIndex >= m_lastSnapshotIncludeIndex && logIndex <= getLastLogIndex(), - format("不满足:logIndex{%d}>=rf.lastSnapshotIncludeIndex{%d}&&logIndex{%d}<=rf.getLastLogIndex{%d}", - logIndex, m_lastSnapshotIncludeIndex, logIndex, getLastLogIndex())); - return logTerm == getLogTermFromLogIndex(logIndex); - // if logIndex == rf.lastSnapshotIncludeIndex { - // return logTerm == rf.lastSnapshotIncludeTerm - // } else { - // return logTerm == rf.logs[rf.getSlicesIndexFromLogIndex(logIndex)].LogTerm - // } + myAssert(logIndex >= m_lastSnapshotIncludeIndex && logIndex <= getLastLogIndex(), + format("不满足:logIndex{%d}>=rf.lastSnapshotIncludeIndex{%d}&&logIndex{%d}<=rf.getLastLogIndex{%d}", + logIndex, m_lastSnapshotIncludeIndex, logIndex, getLastLogIndex())); + return logTerm == getLogTermFromLogIndex(logIndex); + // if logIndex == rf.lastSnapshotIncludeIndex { + // return logTerm == rf.lastSnapshotIncludeTerm + // } else { + // return logTerm == rf.logs[rf.getSlicesIndexFromLogIndex(logIndex)].LogTerm + // } } void Raft::persist() { - // Your code here (2C). - auto data = persistData(); - m_persister->SaveRaftState(data); - //fmt.Printf("RaftNode[%d] persist starts, currentTerm[%d] voteFor[%d] log[%v]\n", rf.me, rf.currentTerm, rf.votedFor, rf.logs) - //fmt.Printf("%v\n", string(data)) + // Your code here (2C). + auto data = persistData(); + m_persister->SaveRaftState(data); + // fmt.Printf("RaftNode[%d] persist starts, currentTerm[%d] voteFor[%d] log[%v]\n", rf.me, rf.currentTerm, + // rf.votedFor, rf.logs) fmt.Printf("%v\n", string(data)) } void Raft::RequestVote(const raftRpcProctoc::RequestVoteArgs *args, raftRpcProctoc::RequestVoteReply *reply) { - std::lock_guard lg(m_mtx); - - // Your code here (2A, 2B). - DEFER { - //应该先持久化,再撤销lock - persist(); - }; - //对args的term的三种情况分别进行处理,大于小于等于自己的term都是不同的处理 - //reason: 出现网络分区,该竞选者已经OutOfDate(过时) - if (args->term() < m_currentTerm) { - reply->set_term(m_currentTerm); - reply->set_votestate(Expire); - reply->set_votegranted(false); - return; - } - //fig2:右下角,如果任何时候rpc请求或者响应的term大于自己的term,更新term,并变成follower - if (args->term() > m_currentTerm) { - // DPrintf("[ func-RequestVote-rf(%v) ] : 变成follower且更新term 因为candidate{%v}的term{%v}> rf{%v}.term{%v}\n ", rf.me, args.CandidateId, args.Term, rf.me, rf.currentTerm) - m_status = Follower; - m_currentTerm = args->term(); - m_votedFor = -1; - - // 重置定时器:收到leader的ae,开始选举,透出票 - //这时候更新了term之后,votedFor也要置为-1 - } - myAssert(args->term() == m_currentTerm, - format("[func--rf{%d}] 前面校验过args.Term==rf.currentTerm,这里却不等", m_me)); - // 现在节点任期都是相同的(任期小的也已经更新到新的args的term了) - // ,要检查log的term和index是不是匹配的了 - int lastLogTerm = getLastLogIndex(); - //只有没投票,且candidate的日志的新的程度 ≥ 接受者的日志新的程度 才会授票 - if (!UpToDate(args->lastlogindex(), args->lastlogterm())) { - //args.LastLogTerm < lastLogTerm || (args.LastLogTerm == lastLogTerm && args.LastLogIndex < lastLogIndex) { - //日志太旧了 - if (args->lastlogterm() < lastLogTerm) { - // DPrintf("[ func-RequestVote-rf(%v) ] : refuse voted rf[%v] ,because candidate_lastlog_term{%v} < lastlog_term{%v}\n", rf.me, args.CandidateId, args.LastLogTerm, lastLogTerm) - } else { - // DPrintf("[ func-RequestVote-rf(%v) ] : refuse voted rf[%v] ,because candidate_log_index{%v} < log_index{%v}\n", rf.me, args.CandidateId, args.LastLogIndex, rf.getLastLogIndex()) - } - reply->set_term(m_currentTerm); - reply->set_votestate(Voted); - reply->set_votegranted(false); + std::lock_guard lg(m_mtx); + // Your code here (2A, 2B). + DEFER { + //应该先持久化,再撤销lock + persist(); + }; + //对args的term的三种情况分别进行处理,大于小于等于自己的term都是不同的处理 + // reason: 出现网络分区,该竞选者已经OutOfDate(过时) + if (args->term() < m_currentTerm) { + reply->set_term(m_currentTerm); + reply->set_votestate(Expire); + reply->set_votegranted(false); + return; + } + // fig2:右下角,如果任何时候rpc请求或者响应的term大于自己的term,更新term,并变成follower + if (args->term() > m_currentTerm) { + // DPrintf("[ func-RequestVote-rf(%v) ] : 变成follower且更新term 因为candidate{%v}的term{%v}> + // rf{%v}.term{%v}\n ", rf.me, args.CandidateId, args.Term, rf.me, rf.currentTerm) + m_status = Follower; + m_currentTerm = args->term(); + m_votedFor = -1; - return; - } - //todo : 啥时候会出现rf.votedFor == args.CandidateId ,就算candidate选举超时再选举,其term也是不一样的呀 - // 当因为网络质量不好导致的请求丢失重发就有可能!!!! - if (m_votedFor != -1 && m_votedFor != args->candidateid()) { - // DPrintf("[ func-RequestVote-rf(%v) ] : refuse voted rf[%v] ,because has voted\n", rf.me, args.CandidateId) - reply->set_term(m_currentTerm); - reply->set_votestate(Voted); - reply->set_votegranted(false); - - return; + // 重置定时器:收到leader的ae,开始选举,透出票 + //这时候更新了term之后,votedFor也要置为-1 + } + myAssert(args->term() == m_currentTerm, + format("[func--rf{%d}] 前面校验过args.Term==rf.currentTerm,这里却不等", m_me)); + // 现在节点任期都是相同的(任期小的也已经更新到新的args的term了) + // ,要检查log的term和index是不是匹配的了 + int lastLogTerm = getLastLogIndex(); + //只有没投票,且candidate的日志的新的程度 ≥ 接受者的日志新的程度 才会授票 + if (!UpToDate(args->lastlogindex(), args->lastlogterm())) { + // args.LastLogTerm < lastLogTerm || (args.LastLogTerm == lastLogTerm && args.LastLogIndex < lastLogIndex) { + //日志太旧了 + if (args->lastlogterm() < lastLogTerm) { + // DPrintf("[ func-RequestVote-rf(%v) ] : refuse voted rf[%v] ,because + // candidate_lastlog_term{%v} < lastlog_term{%v}\n", rf.me, args.CandidateId, args.LastLogTerm, + // lastLogTerm) } else { - m_votedFor = args->candidateid(); - m_lastResetElectionTime = now(); //认为必须要在投出票的时候才重置定时器, - // DPrintf("[ func-RequestVote-rf(%v) ] : voted rf[%v]\n", rf.me, rf.votedFor) - reply->set_term(m_currentTerm); - reply->set_votestate(Normal); - reply->set_votegranted(true); - - return; + // DPrintf("[ func-RequestVote-rf(%v) ] : refuse voted rf[%v] ,because + // candidate_log_index{%v} < log_index{%v}\n", rf.me, args.CandidateId, args.LastLogIndex, + // rf.getLastLogIndex()) } + reply->set_term(m_currentTerm); + reply->set_votestate(Voted); + reply->set_votegranted(false); + + return; + } + // todo : 啥时候会出现rf.votedFor == args.CandidateId ,就算candidate选举超时再选举,其term也是不一样的呀 + // 当因为网络质量不好导致的请求丢失重发就有可能!!!! + if (m_votedFor != -1 && m_votedFor != args->candidateid()) { + // DPrintf("[ func-RequestVote-rf(%v) ] : refuse voted rf[%v] ,because has voted\n", rf.me, + // args.CandidateId) + reply->set_term(m_currentTerm); + reply->set_votestate(Voted); + reply->set_votegranted(false); + + return; + } else { + m_votedFor = args->candidateid(); + m_lastResetElectionTime = now(); //认为必须要在投出票的时候才重置定时器, + // DPrintf("[ func-RequestVote-rf(%v) ] : voted rf[%v]\n", rf.me, rf.votedFor) + reply->set_term(m_currentTerm); + reply->set_votestate(Normal); + reply->set_votegranted(true); + + return; + } } bool Raft::UpToDate(int index, int term) { - //lastEntry := rf.log[len(rf.log)-1] + // lastEntry := rf.log[len(rf.log)-1] - int lastIndex = -1; - int lastTerm = -1; - getLastLogIndexAndTerm(&lastIndex, &lastTerm); - return term > lastTerm || (term == lastTerm && index >= lastIndex); + int lastIndex = -1; + int lastTerm = -1; + getLastLogIndexAndTerm(&lastIndex, &lastTerm); + return term > lastTerm || (term == lastTerm && index >= lastIndex); } void Raft::getLastLogIndexAndTerm(int *lastLogIndex, int *lastLogTerm) { - if (m_logs.empty()) { - *lastLogIndex = m_lastSnapshotIncludeIndex; - *lastLogTerm = m_lastSnapshotIncludeTerm; - return; - } else { - *lastLogIndex = m_logs[m_logs.size() - 1].logindex(); - *lastLogTerm = m_logs[m_logs.size() - 1].logterm(); - return; - } + if (m_logs.empty()) { + *lastLogIndex = m_lastSnapshotIncludeIndex; + *lastLogTerm = m_lastSnapshotIncludeTerm; + return; + } else { + *lastLogIndex = m_logs[m_logs.size() - 1].logindex(); + *lastLogTerm = m_logs[m_logs.size() - 1].logterm(); + return; + } } int Raft::getLastLogIndex() { - int lastLogIndex = -1; - int _ = -1; - getLastLogIndexAndTerm(&lastLogIndex, &_); - return lastLogIndex; + int lastLogIndex = -1; + int _ = -1; + getLastLogIndexAndTerm(&lastLogIndex, &_); + return lastLogIndex; } int Raft::getLogTermFromLogIndex(int logIndex) { - myAssert(logIndex >= m_lastSnapshotIncludeIndex, - format("[func-getSlicesIndexFromLogIndex-rf{%d}] index{%d} < rf.lastSnapshotIncludeIndex{%d}", m_me, - logIndex, m_lastSnapshotIncludeIndex)); - - int lastLogIndex = getLastLogIndex(); + myAssert(logIndex >= m_lastSnapshotIncludeIndex, + format("[func-getSlicesIndexFromLogIndex-rf{%d}] index{%d} < rf.lastSnapshotIncludeIndex{%d}", m_me, + logIndex, m_lastSnapshotIncludeIndex)); + int lastLogIndex = getLastLogIndex(); - myAssert(logIndex <= lastLogIndex, - format("[func-getSlicesIndexFromLogIndex-rf{%d}] logIndex{%d} > lastLogIndex{%d}", m_me, logIndex, - lastLogIndex)); + myAssert(logIndex <= lastLogIndex, format("[func-getSlicesIndexFromLogIndex-rf{%d}] logIndex{%d} > lastLogIndex{%d}", + m_me, logIndex, lastLogIndex)); - if (logIndex == m_lastSnapshotIncludeIndex) { - return m_lastSnapshotIncludeTerm; - } else { - return m_logs[getSlicesIndexFromLogIndex(logIndex)].logterm(); - } + if (logIndex == m_lastSnapshotIncludeIndex) { + return m_lastSnapshotIncludeTerm; + } else { + return m_logs[getSlicesIndexFromLogIndex(logIndex)].logterm(); + } } -int Raft::GetRaftStateSize() { - return m_persister->RaftStateSize(); -} +int Raft::GetRaftStateSize() { return m_persister->RaftStateSize(); } // 找到index对应的真实下标位置!!! // 限制,输入的logIndex必须保存在当前的logs里面(不包含snapshot) int Raft::getSlicesIndexFromLogIndex(int logIndex) { - myAssert(logIndex > m_lastSnapshotIncludeIndex, - format("[func-getSlicesIndexFromLogIndex-rf{%d}] index{%d} <= rf.lastSnapshotIncludeIndex{%d}", m_me, - logIndex, m_lastSnapshotIncludeIndex)); - int lastLogIndex = getLastLogIndex(); - myAssert(logIndex <= lastLogIndex, - format("[func-getSlicesIndexFromLogIndex-rf{%d}] logIndex{%d} > lastLogIndex{%d}", m_me, logIndex, - lastLogIndex)); - int SliceIndex = logIndex - m_lastSnapshotIncludeIndex - 1; - return SliceIndex; + myAssert(logIndex > m_lastSnapshotIncludeIndex, + format("[func-getSlicesIndexFromLogIndex-rf{%d}] index{%d} <= rf.lastSnapshotIncludeIndex{%d}", m_me, + logIndex, m_lastSnapshotIncludeIndex)); + int lastLogIndex = getLastLogIndex(); + myAssert(logIndex <= lastLogIndex, format("[func-getSlicesIndexFromLogIndex-rf{%d}] logIndex{%d} > lastLogIndex{%d}", + m_me, logIndex, lastLogIndex)); + int SliceIndex = logIndex - m_lastSnapshotIncludeIndex - 1; + return SliceIndex; } bool Raft::sendRequestVote(int server, std::shared_ptr args, - std::shared_ptr reply, - std::shared_ptr votedNum) { - //这个ok是网络是否正常通信的ok,而不是requestVote rpc是否投票的rpc - //ok := rf.peers[server].Call("Raft.RequestVote", args, reply) - //todo - auto start = now(); - DPrintf("[func-sendRequestVote rf{%d}] 向server{%d} 發送 RequestVote 開始", m_me, m_currentTerm, getLastLogIndex()); - bool ok = m_peers[server]->RequestVote(args.get(), reply.get()); - DPrintf("[func-sendRequestVote rf{%d}] 向server{%d} 發送 RequestVote 完畢,耗時:{%d} ms", m_me, m_currentTerm, - getLastLogIndex(), now() - start); - - if (!ok) { - return ok; //不知道为什么不加这个的话如果服务器宕机会出现问题的,通不过2B todo - } - //for !ok { - // - // //ok := rf.peers[server].Call("Raft.RequestVote", args, reply) - // //if ok { - // // break - // //} - //} //这里是发送出去了,但是不能保证他一定到达 - //对回应进行处理,要记得无论什么时候收到回复就要检查term - std::lock_guard lg(m_mtx); - if (reply->term() > m_currentTerm) { - m_status = Follower; //三变:身份,term,和投票 - m_currentTerm = reply->term(); - m_votedFor = -1; - persist(); - return true; - } else if (reply->term() < m_currentTerm) { - return true; - } - myAssert(reply->term() == m_currentTerm, - format("assert {reply.Term==rf.currentTerm} fail")); + std::shared_ptr reply, std::shared_ptr votedNum) { + //这个ok是网络是否正常通信的ok,而不是requestVote rpc是否投票的rpc + // ok := rf.peers[server].Call("Raft.RequestVote", args, reply) + // todo + auto start = now(); + DPrintf("[func-sendRequestVote rf{%d}] 向server{%d} 發送 RequestVote 開始", m_me, m_currentTerm, getLastLogIndex()); + bool ok = m_peers[server]->RequestVote(args.get(), reply.get()); + DPrintf("[func-sendRequestVote rf{%d}] 向server{%d} 發送 RequestVote 完畢,耗時:{%d} ms", m_me, m_currentTerm, + getLastLogIndex(), now() - start); + + if (!ok) { + return ok; //不知道为什么不加这个的话如果服务器宕机会出现问题的,通不过2B todo + } + // for !ok { + // + // //ok := rf.peers[server].Call("Raft.RequestVote", args, reply) + // //if ok { + // // break + // //} + // } //这里是发送出去了,但是不能保证他一定到达 + //对回应进行处理,要记得无论什么时候收到回复就要检查term + std::lock_guard lg(m_mtx); + if (reply->term() > m_currentTerm) { + m_status = Follower; //三变:身份,term,和投票 + m_currentTerm = reply->term(); + m_votedFor = -1; + persist(); + return true; + } else if (reply->term() < m_currentTerm) { + return true; + } + myAssert(reply->term() == m_currentTerm, format("assert {reply.Term==rf.currentTerm} fail")); - //todo:这里没有按博客写 - if (!reply->votegranted()) { - return true; - } + // todo:这里没有按博客写 + if (!reply->votegranted()) { + return true; + } - *votedNum = *votedNum + 1; - if (*votedNum >= m_peers.size() / 2 + 1) { - //变成leader - *votedNum = 0; - if (m_status == Leader) { - //如果已经是leader了,那么是就是了,不会进行下一步处理了k - myAssert(false, - format("[func-sendRequestVote-rf{%d}] term:{%d} 同一个term当两次领导,error", m_me, m_currentTerm)); - } - // 第一次变成leader,初始化状态和nextIndex、matchIndex - m_status = Leader; + *votedNum = *votedNum + 1; + if (*votedNum >= m_peers.size() / 2 + 1) { + //变成leader + *votedNum = 0; + if (m_status == Leader) { + //如果已经是leader了,那么是就是了,不会进行下一步处理了k + myAssert(false, + format("[func-sendRequestVote-rf{%d}] term:{%d} 同一个term当两次领导,error", m_me, m_currentTerm)); + } + // 第一次变成leader,初始化状态和nextIndex、matchIndex + m_status = Leader; + DPrintf("[func-sendRequestVote rf{%d}] elect success ,current term:{%d} ,lastLogIndex:{%d}\n", m_me, m_currentTerm, + getLastLogIndex()); - DPrintf("[func-sendRequestVote rf{%d}] elect success ,current term:{%d} ,lastLogIndex:{%d}\n", m_me, - m_currentTerm, getLastLogIndex()); + int lastLogIndex = getLastLogIndex(); + for (int i = 0; i < m_nextIndex.size(); i++) { + m_nextIndex[i] = lastLogIndex + 1; //有效下标从1开始,因此要+1 + m_matchIndex[i] = 0; //每换一个领导都是从0开始,见fig2 + } + std::thread t(&Raft::doHeartBeat, this); //马上向其他节点宣告自己就是leader + t.detach(); + persist(); + } + return true; +} - int lastLogIndex = getLastLogIndex(); - for (int i = 0; i < m_nextIndex.size(); i++) { - m_nextIndex[i] = lastLogIndex + 1; //有效下标从1开始,因此要+1 - m_matchIndex[i] = 0; //每换一个领导都是从0开始,见fig2 - } - std::thread t(&Raft::doHeartBeat, this); //马上向其他节点宣告自己就是leader - t.detach(); +bool Raft::sendAppendEntries(int server, std::shared_ptr args, + std::shared_ptr reply, + std::shared_ptr appendNums) { + //这个ok是网络是否正常通信的ok,而不是requestVote rpc是否投票的rpc + // 如果网络不通的话肯定是没有返回的,不用一直重试 + // todo: paper中5.3节第一段末尾提到,如果append失败应该不断的retries ,直到这个log成功的被store + DPrintf("[func-Raft::sendAppendEntries-raft{%d}] leader 向节点{%d}发送AE rpc開始 , args->entries_size():{%d}", m_me, + server, args->entries_size()); + bool ok = m_peers[server]->AppendEntries(args.get(), reply.get()); + + if (!ok) { + DPrintf("[func-Raft::sendAppendEntries-raft{%d}] leader 向节点{%d}发送AE rpc失敗", m_me, server); + return ok; + } + DPrintf("[func-Raft::sendAppendEntries-raft{%d}] leader 向节点{%d}发送AE rpc成功", m_me, server); + if (reply->appstate() == Disconnected) { + return ok; + } + std::lock_guard lg1(m_mtx); - persist(); - } - return true; -} + //对reply进行处理 + // 对于rpc通信,无论什么时候都要检查term + if (reply->term() > m_currentTerm) { + m_status = Follower; + m_currentTerm = reply->term(); + m_votedFor = -1; + return ok; + } else if (reply->term() < m_currentTerm) { + DPrintf("[func -sendAppendEntries rf{%d}] 节点:{%d}的term{%d}term(), + m_me, m_currentTerm); + return ok; + } -bool -Raft::sendAppendEntries(int server, std::shared_ptr args, - std::shared_ptr reply, - std::shared_ptr appendNums) { - //这个ok是网络是否正常通信的ok,而不是requestVote rpc是否投票的rpc - // 如果网络不通的话肯定是没有返回的,不用一直重试 - // todo: paper中5.3节第一段末尾提到,如果append失败应该不断的retries ,直到这个log成功的被store - DPrintf("[func-Raft::sendAppendEntries-raft{%d}] leader 向节点{%d}发送AE rpc開始 , args->entries_size():{%d}", m_me, - server, args->entries_size()); - bool ok = m_peers[server]->AppendEntries(args.get(), reply.get()); - - if (!ok) { - DPrintf("[func-Raft::sendAppendEntries-raft{%d}] leader 向节点{%d}发送AE rpc失敗", m_me, server); - return ok; - } - DPrintf("[func-Raft::sendAppendEntries-raft{%d}] leader 向节点{%d}发送AE rpc成功", m_me, server); - if (reply->appstate() == Disconnected) { - return ok; - } - std::lock_guard lg1(m_mtx); - - //对reply进行处理 - // 对于rpc通信,无论什么时候都要检查term - if (reply->term() > m_currentTerm) { - m_status = Follower; - m_currentTerm = reply->term(); - m_votedFor = -1; - return ok; - } else if (reply->term() < m_currentTerm) { - DPrintf("[func -sendAppendEntries rf{%d}] 节点:{%d}的term{%d}term(), - m_me, m_currentTerm); - return ok; + if (m_status != Leader) { + //如果不是leader,那么就不要对返回的情况进行处理了 + return ok; + } + // term相等 + + myAssert(reply->term() == m_currentTerm, + format("reply.Term{%d} != rf.currentTerm{%d} ", reply->term(), m_currentTerm)); + if (!reply->success()) { + //日志不匹配,正常来说就是index要往前-1,既然能到这里,第一个日志(idnex = + //1)发送后肯定是匹配的,因此不用考虑变成负数 因为真正的环境不会知道是服务器宕机还是发生网络分区了 + if (reply->updatenextindex() != -100) { + // todo:待总结,就算term匹配,失败的时候nextIndex也不是照单全收的,因为如果发生rpc延迟,leader的term可能从不符合term要求 + //变得符合term要求 + //但是不能直接赋值reply.UpdateNextIndex + DPrintf("[func -sendAppendEntries rf{%d}] 返回的日志term相等,但是不匹配,回缩nextIndex[%d]:{%d}\n", m_me, + server, reply->updatenextindex()); + m_nextIndex[server] = reply->updatenextindex(); //失败是不更新mathIndex的 } + // 怎么越写越感觉rf.nextIndex数组是冗余的呢,看下论文fig2,其实不是冗余的 + } else { + *appendNums = *appendNums + 1; + DPrintf("---------------------------tmp------------------------- 節點{%d}返回true,當前*appendNums{%d}", server, + *appendNums); + // rf.matchIndex[server] = len(args.Entries) //只要返回一个响应就对其matchIndex应该对其做出反应, + //但是这么修改是有问题的,如果对某个消息发送了多遍(心跳时就会再发送),那么一条消息会导致n次上涨 + m_matchIndex[server] = std::max(m_matchIndex[server], args->prevlogindex() + args->entries_size()); + m_nextIndex[server] = m_matchIndex[server] + 1; + int lastLogIndex = getLastLogIndex(); - if (m_status != Leader) { - //如果不是leader,那么就不要对返回的情况进行处理了 - return ok; - } - //term相等 - - myAssert(reply->term() == m_currentTerm, - format("reply.Term{%d} != rf.currentTerm{%d} ", reply->term(), m_currentTerm)); - if (!reply->success()) { - //日志不匹配,正常来说就是index要往前-1,既然能到这里,第一个日志(idnex = 1)发送后肯定是匹配的,因此不用考虑变成负数 - //因为真正的环境不会知道是服务器宕机还是发生网络分区了 - if (reply->updatenextindex() != -100) { - //todo:待总结,就算term匹配,失败的时候nextIndex也不是照单全收的,因为如果发生rpc延迟,leader的term可能从不符合term要求 - //变得符合term要求 - //但是不能直接赋值reply.UpdateNextIndex - DPrintf("[func -sendAppendEntries rf{%d}] 返回的日志term相等,但是不匹配,回缩nextIndex[%d]:{%d}\n", m_me, server, - reply->updatenextindex()); - m_nextIndex[server] = reply->updatenextindex(); //失败是不更新mathIndex的 - } - // 怎么越写越感觉rf.nextIndex数组是冗余的呢,看下论文fig2,其实不是冗余的 - } else { - *appendNums = *appendNums + 1; - DPrintf("---------------------------tmp------------------------- 節點{%d}返回true,當前*appendNums{%d}", server, - *appendNums); - //rf.matchIndex[server] = len(args.Entries) //只要返回一个响应就对其matchIndex应该对其做出反应, - //但是这么修改是有问题的,如果对某个消息发送了多遍(心跳时就会再发送),那么一条消息会导致n次上涨 - m_matchIndex[server] = std::max(m_matchIndex[server], args->prevlogindex() + args->entries_size()); - m_nextIndex[server] = m_matchIndex[server] + 1; - int lastLogIndex = getLastLogIndex(); - - myAssert(m_nextIndex[server] <= lastLogIndex + 1, - format("error msg:rf.nextIndex[%d] > lastLogIndex+1, len(rf.logs) = %d lastLogIndex{%d} = %d", - server, m_logs.size(), server, lastLogIndex)); - if (*appendNums >= 1 + m_peers.size() / 2) { - //可以commit了 - //两种方法保证幂等性,1.赋值为0 2.上面≥改为== - - *appendNums = 0; - // todo https://578223592-laughing-halibut-wxvpggvw69qh99q4.github.dev/ 不断遍历来统计rf.commitIndex - //改了好久!!!!! leader只有在当前term有日志提交的时候才更新commitIndex,因为raft无法保证之前term的Index是否提交 - //只有当前term有日志提交,之前term的log才可以被提交,只有这样才能保证“领导人完备性{当选领导人的节点拥有之前被提交的所有log,当然也可能有一些没有被提交的}” - // rf.leaderUpdateCommitIndex() - if (args->entries_size() > 0) { - DPrintf("args->entries(args->entries_size()-1).logterm(){%d} m_currentTerm{%d}", - args->entries(args->entries_size() - 1).logterm(), m_currentTerm); - } - if (args->entries_size() > 0 && args->entries(args->entries_size() - 1).logterm() == m_currentTerm) { - DPrintf( - "---------------------------tmp------------------------- 當前term有log成功提交,更新leader的m_commitIndex from{%d} to{%d}", - m_commitIndex, args->prevlogindex() + args->entries_size()); - - m_commitIndex = std::max(m_commitIndex, args->prevlogindex() + args->entries_size()); - } - myAssert(m_commitIndex <= lastLogIndex, - format("[func-sendAppendEntries,rf{%d}] lastLogIndex:%d rf.commitIndex:%d\n", m_me, lastLogIndex, - m_commitIndex)); - //fmt.Printf("[func-sendAppendEntries,rf{%v}] len(rf.logs):%v rf.commitIndex:%v\n", rf.me, len(rf.logs), rf.commitIndex) - } + myAssert(m_nextIndex[server] <= lastLogIndex + 1, + format("error msg:rf.nextIndex[%d] > lastLogIndex+1, len(rf.logs) = %d lastLogIndex{%d} = %d", server, + m_logs.size(), server, lastLogIndex)); + if (*appendNums >= 1 + m_peers.size() / 2) { + //可以commit了 + //两种方法保证幂等性,1.赋值为0 2.上面≥改为== + + *appendNums = 0; + // todo https://578223592-laughing-halibut-wxvpggvw69qh99q4.github.dev/ 不断遍历来统计rf.commitIndex + //改了好久!!!!! + //leader只有在当前term有日志提交的时候才更新commitIndex,因为raft无法保证之前term的Index是否提交 + //只有当前term有日志提交,之前term的log才可以被提交,只有这样才能保证“领导人完备性{当选领导人的节点拥有之前被提交的所有log,当然也可能有一些没有被提交的}” + // rf.leaderUpdateCommitIndex() + if (args->entries_size() > 0) { + DPrintf("args->entries(args->entries_size()-1).logterm(){%d} m_currentTerm{%d}", + args->entries(args->entries_size() - 1).logterm(), m_currentTerm); + } + if (args->entries_size() > 0 && args->entries(args->entries_size() - 1).logterm() == m_currentTerm) { + DPrintf( + "---------------------------tmp------------------------- 當前term有log成功提交,更新leader的m_commitIndex " + "from{%d} to{%d}", + m_commitIndex, args->prevlogindex() + args->entries_size()); + + m_commitIndex = std::max(m_commitIndex, args->prevlogindex() + args->entries_size()); + } + myAssert(m_commitIndex <= lastLogIndex, + format("[func-sendAppendEntries,rf{%d}] lastLogIndex:%d rf.commitIndex:%d\n", m_me, lastLogIndex, + m_commitIndex)); + // fmt.Printf("[func-sendAppendEntries,rf{%v}] len(rf.logs):%v rf.commitIndex:%v\n", rf.me, len(rf.logs), + // rf.commitIndex) } - return ok; + } + return ok; } void Raft::AppendEntries(google::protobuf::RpcController *controller, const ::raftRpcProctoc::AppendEntriesArgs *request, ::raftRpcProctoc::AppendEntriesReply *response, ::google::protobuf::Closure *done) { - AppendEntries1(request, response); - done->Run(); + AppendEntries1(request, response); + done->Run(); } void Raft::InstallSnapshot(google::protobuf::RpcController *controller, const ::raftRpcProctoc::InstallSnapshotRequest *request, ::raftRpcProctoc::InstallSnapshotResponse *response, ::google::protobuf::Closure *done) { - InstallSnapshot(request, response); + InstallSnapshot(request, response); - done->Run(); + done->Run(); } void Raft::RequestVote(google::protobuf::RpcController *controller, const ::raftRpcProctoc::RequestVoteArgs *request, ::raftRpcProctoc::RequestVoteReply *response, ::google::protobuf::Closure *done) { - RequestVote(request, response); - done->Run(); + RequestVote(request, response); + done->Run(); } void Raft::Start(Op command, int *newLogIndex, int *newLogTerm, bool *isLeader) { - std::lock_guard lg1(m_mtx); - // m_mtx.lock(); - // Defer ec1([this]()->void { - // m_mtx.unlock(); - // }); - if (m_status != Leader) { - DPrintf("[func-Start-rf{%d}] is not leader"); - *newLogIndex = -1; - *newLogTerm = -1; - *isLeader = false; - return; - } - - raftRpcProctoc::LogEntry newLogEntry; - newLogEntry.set_command(command.asString()); - newLogEntry.set_logterm(m_currentTerm); - newLogEntry.set_logindex(getNewCommandIndex()); - m_logs.emplace_back(newLogEntry); - - - int lastLogIndex = getLastLogIndex(); - - //leader应该不停的向各个Follower发送AE来维护心跳和保持日志同步,目前的做法是新的命令来了不会直接执行,而是等待leader的心跳触发 - DPrintf("[func-Start-rf{%d}] lastLogIndex:%d,command:%s\n", m_me, lastLogIndex, &command); - //rf.timer.Reset(10) //接收到命令后马上给follower发送,改成这样不知为何会出现问题,待修正 todo - persist(); - *newLogIndex = newLogEntry.logindex(); - *newLogTerm = newLogEntry.logterm(); - *isLeader = true; + std::lock_guard lg1(m_mtx); + // m_mtx.lock(); + // Defer ec1([this]()->void { + // m_mtx.unlock(); + // }); + if (m_status != Leader) { + DPrintf("[func-Start-rf{%d}] is not leader"); + *newLogIndex = -1; + *newLogTerm = -1; + *isLeader = false; + return; + } + + raftRpcProctoc::LogEntry newLogEntry; + newLogEntry.set_command(command.asString()); + newLogEntry.set_logterm(m_currentTerm); + newLogEntry.set_logindex(getNewCommandIndex()); + m_logs.emplace_back(newLogEntry); + + int lastLogIndex = getLastLogIndex(); + + // leader应该不停的向各个Follower发送AE来维护心跳和保持日志同步,目前的做法是新的命令来了不会直接执行,而是等待leader的心跳触发 + DPrintf("[func-Start-rf{%d}] lastLogIndex:%d,command:%s\n", m_me, lastLogIndex, &command); + // rf.timer.Reset(10) //接收到命令后马上给follower发送,改成这样不知为何会出现问题,待修正 todo + persist(); + *newLogIndex = newLogEntry.logindex(); + *newLogTerm = newLogEntry.logterm(); + *isLeader = true; } // Make @@ -906,126 +913,124 @@ void Raft::Start(Op command, int *newLogIndex, int *newLogTerm, bool *isLeader) // for any long-running work. void Raft::init(std::vector > peers, int me, std::shared_ptr persister, std::shared_ptr > applyCh) { - m_peers = peers; - m_persister = persister; - m_me = me; - // Your initialization code here (2A, 2B, 2C). - m_mtx.lock(); - - //applier - this->applyChan = applyCh; - // rf.ApplyMsgQueue = make(chan ApplyMsg) - m_currentTerm = 0; - m_status = Follower; - m_commitIndex = 0; - m_lastApplied = 0; - m_logs.clear(); - for (int i = 0; i < m_peers.size(); i++) { - m_matchIndex.push_back(0); - m_nextIndex.push_back(0); - } - m_votedFor = -1; - - m_lastSnapshotIncludeIndex = 0; - m_lastSnapshotIncludeTerm = 0; - m_lastResetElectionTime = now(); - m_lastResetHearBeatTime = now(); - - - // initialize from state persisted before a crash - readPersist(m_persister->ReadRaftState()); - if (m_lastSnapshotIncludeIndex > 0) { - m_lastApplied = m_lastSnapshotIncludeIndex; - //rf.commitIndex = rf.lastSnapshotIncludeIndex todo :崩溃恢复为何不能读取commitIndex - } - - - DPrintf("[Init&ReInit] Sever %d, term %d, lastSnapshotIncludeIndex {%d} , lastSnapshotIncludeTerm {%d}", m_me, - m_currentTerm, m_lastSnapshotIncludeIndex, m_lastSnapshotIncludeTerm); - - m_mtx.unlock(); - // start ticker goroutine to start elections - std::thread t(&Raft::leaderHearBeatTicker, this); - t.detach(); - - std::thread t2(&Raft::electionTimeOutTicker, this); - t2.detach(); - - std::thread t3(&Raft::applierTicker, this); - t3.detach(); + m_peers = peers; + m_persister = persister; + m_me = me; + // Your initialization code here (2A, 2B, 2C). + m_mtx.lock(); + + // applier + this->applyChan = applyCh; + // rf.ApplyMsgQueue = make(chan ApplyMsg) + m_currentTerm = 0; + m_status = Follower; + m_commitIndex = 0; + m_lastApplied = 0; + m_logs.clear(); + for (int i = 0; i < m_peers.size(); i++) { + m_matchIndex.push_back(0); + m_nextIndex.push_back(0); + } + m_votedFor = -1; + + m_lastSnapshotIncludeIndex = 0; + m_lastSnapshotIncludeTerm = 0; + m_lastResetElectionTime = now(); + m_lastResetHearBeatTime = now(); + + // initialize from state persisted before a crash + readPersist(m_persister->ReadRaftState()); + if (m_lastSnapshotIncludeIndex > 0) { + m_lastApplied = m_lastSnapshotIncludeIndex; + // rf.commitIndex = rf.lastSnapshotIncludeIndex todo :崩溃恢复为何不能读取commitIndex + } + + DPrintf("[Init&ReInit] Sever %d, term %d, lastSnapshotIncludeIndex {%d} , lastSnapshotIncludeTerm {%d}", m_me, + m_currentTerm, m_lastSnapshotIncludeIndex, m_lastSnapshotIncludeTerm); + + m_mtx.unlock(); + // start ticker goroutine to start elections + std::thread t(&Raft::leaderHearBeatTicker, this); + t.detach(); + + std::thread t2(&Raft::electionTimeOutTicker, this); + t2.detach(); + + std::thread t3(&Raft::applierTicker, this); + t3.detach(); } std::string Raft::persistData() { - BoostPersistRaftNode boostPersistRaftNode; - boostPersistRaftNode.m_currentTerm = m_currentTerm; - boostPersistRaftNode.m_votedFor = m_votedFor; - boostPersistRaftNode.m_lastSnapshotIncludeIndex = m_lastSnapshotIncludeIndex; - boostPersistRaftNode.m_lastSnapshotIncludeTerm = m_lastSnapshotIncludeTerm; - for (auto &item: m_logs) { - boostPersistRaftNode.m_logs.push_back(item.SerializeAsString()); - } - - std::stringstream ss; - boost::archive::text_oarchive oa(ss); - oa << boostPersistRaftNode; - return ss.str(); + BoostPersistRaftNode boostPersistRaftNode; + boostPersistRaftNode.m_currentTerm = m_currentTerm; + boostPersistRaftNode.m_votedFor = m_votedFor; + boostPersistRaftNode.m_lastSnapshotIncludeIndex = m_lastSnapshotIncludeIndex; + boostPersistRaftNode.m_lastSnapshotIncludeTerm = m_lastSnapshotIncludeTerm; + for (auto &item : m_logs) { + boostPersistRaftNode.m_logs.push_back(item.SerializeAsString()); + } + + std::stringstream ss; + boost::archive::text_oarchive oa(ss); + oa << boostPersistRaftNode; + return ss.str(); } void Raft::readPersist(std::string data) { - if (data.empty()) { return; } - std::stringstream iss(data); - boost::archive::text_iarchive ia(iss); - // read class state from archive - BoostPersistRaftNode boostPersistRaftNode; - ia >> boostPersistRaftNode; - - m_currentTerm = boostPersistRaftNode.m_currentTerm; - m_votedFor = boostPersistRaftNode.m_votedFor; - m_lastSnapshotIncludeIndex = boostPersistRaftNode.m_lastSnapshotIncludeIndex; - m_lastSnapshotIncludeTerm = boostPersistRaftNode.m_lastSnapshotIncludeTerm; - m_logs.clear(); - for (auto &item: boostPersistRaftNode.m_logs) { - raftRpcProctoc::LogEntry logEntry; - logEntry.ParseFromString(item); - m_logs.emplace_back(logEntry); - } + if (data.empty()) { + return; + } + std::stringstream iss(data); + boost::archive::text_iarchive ia(iss); + // read class state from archive + BoostPersistRaftNode boostPersistRaftNode; + ia >> boostPersistRaftNode; + + m_currentTerm = boostPersistRaftNode.m_currentTerm; + m_votedFor = boostPersistRaftNode.m_votedFor; + m_lastSnapshotIncludeIndex = boostPersistRaftNode.m_lastSnapshotIncludeIndex; + m_lastSnapshotIncludeTerm = boostPersistRaftNode.m_lastSnapshotIncludeTerm; + m_logs.clear(); + for (auto &item : boostPersistRaftNode.m_logs) { + raftRpcProctoc::LogEntry logEntry; + logEntry.ParseFromString(item); + m_logs.emplace_back(logEntry); + } } void Raft::Snapshot(int index, std::string snapshot) { - std::lock_guard lg(m_mtx); - - - if (m_lastSnapshotIncludeIndex >= index || index > m_commitIndex) { - DPrintf( - "[func-Snapshot-rf{%d}] rejects replacing log with snapshotIndex %d as current snapshotIndex %d is larger or smaller ", - m_me, index, m_lastSnapshotIncludeIndex); - return; - } - auto lastLogIndex = getLastLogIndex(); //为了检查snapshot前后日志是否一样,防止多截取或者少截取日志 - - //制造完此快照后剩余的所有日志 - int newLastSnapshotIncludeIndex = index; - int newLastSnapshotIncludeTerm = m_logs[getSlicesIndexFromLogIndex(index)].logterm(); - std::vector trunckedLogs; - //todo :这种写法有点笨,待改进,而且有内存泄漏的风险 - for (int i = index + 1; i <= getLastLogIndex(); i++) { - //注意有=,因为要拿到最后一个日志 - trunckedLogs.push_back(m_logs[getSlicesIndexFromLogIndex(i)]); - } - m_lastSnapshotIncludeIndex = newLastSnapshotIncludeIndex; - m_lastSnapshotIncludeTerm = newLastSnapshotIncludeTerm; - m_logs = trunckedLogs; - m_commitIndex = std::max(m_commitIndex, index); - m_lastApplied = std::max(m_lastApplied, index); - - - //rf.lastApplied = index //lastApplied 和 commit应不应该改变呢??? 为什么 不应该改变吧 - m_persister->Save(persistData(), snapshot); - - - DPrintf("[SnapShot]Server %d snapshot snapshot index {%d}, term {%d}, loglen {%d}", m_me, index, - m_lastSnapshotIncludeTerm, m_logs.size()); - myAssert(m_logs.size() + m_lastSnapshotIncludeIndex == lastLogIndex, - format("len(rf.logs){%d} + rf.lastSnapshotIncludeIndex{%d} != lastLogjInde{%d}", m_logs.size(), - m_lastSnapshotIncludeIndex, lastLogIndex)); + std::lock_guard lg(m_mtx); + + if (m_lastSnapshotIncludeIndex >= index || index > m_commitIndex) { + DPrintf( + "[func-Snapshot-rf{%d}] rejects replacing log with snapshotIndex %d as current snapshotIndex %d is larger or " + "smaller ", + m_me, index, m_lastSnapshotIncludeIndex); + return; + } + auto lastLogIndex = getLastLogIndex(); //为了检查snapshot前后日志是否一样,防止多截取或者少截取日志 + + //制造完此快照后剩余的所有日志 + int newLastSnapshotIncludeIndex = index; + int newLastSnapshotIncludeTerm = m_logs[getSlicesIndexFromLogIndex(index)].logterm(); + std::vector trunckedLogs; + // todo :这种写法有点笨,待改进,而且有内存泄漏的风险 + for (int i = index + 1; i <= getLastLogIndex(); i++) { + //注意有=,因为要拿到最后一个日志 + trunckedLogs.push_back(m_logs[getSlicesIndexFromLogIndex(i)]); + } + m_lastSnapshotIncludeIndex = newLastSnapshotIncludeIndex; + m_lastSnapshotIncludeTerm = newLastSnapshotIncludeTerm; + m_logs = trunckedLogs; + m_commitIndex = std::max(m_commitIndex, index); + m_lastApplied = std::max(m_lastApplied, index); + + // rf.lastApplied = index //lastApplied 和 commit应不应该改变呢??? 为什么 不应该改变吧 + m_persister->Save(persistData(), snapshot); + + DPrintf("[SnapShot]Server %d snapshot snapshot index {%d}, term {%d}, loglen {%d}", m_me, index, + m_lastSnapshotIncludeTerm, m_logs.size()); + myAssert(m_logs.size() + m_lastSnapshotIncludeIndex == lastLogIndex, + format("len(rf.logs){%d} + rf.lastSnapshotIncludeIndex{%d} != lastLogjInde{%d}", m_logs.size(), + m_lastSnapshotIncludeIndex, lastLogIndex)); } diff --git a/src/raftCore/raftRpcUtil.cpp b/src/raftCore/raftRpcUtil.cpp index f1415d4..ab9f8fb 100644 --- a/src/raftCore/raftRpcUtil.cpp +++ b/src/raftCore/raftRpcUtil.cpp @@ -7,34 +7,31 @@ #include #include - bool RaftRpcUtil::AppendEntries(raftRpcProctoc::AppendEntriesArgs *args, raftRpcProctoc::AppendEntriesReply *response) { - MprpcController controller; - stub_->AppendEntries(&controller, args, response, nullptr); - return !controller.Failed(); + MprpcController controller; + stub_->AppendEntries(&controller, args, response, nullptr); + return !controller.Failed(); } bool RaftRpcUtil::InstallSnapshot(raftRpcProctoc::InstallSnapshotRequest *args, - raftRpcProctoc::InstallSnapshotResponse *response) { - MprpcController controller; - stub_->InstallSnapshot(&controller, args, response, nullptr); - return !controller.Failed(); + raftRpcProctoc::InstallSnapshotResponse *response) { + MprpcController controller; + stub_->InstallSnapshot(&controller, args, response, nullptr); + return !controller.Failed(); } bool RaftRpcUtil::RequestVote(raftRpcProctoc::RequestVoteArgs *args, raftRpcProctoc::RequestVoteReply *response) { - MprpcController controller; - stub_->RequestVote(&controller, args, response, nullptr); - return !controller.Failed(); + MprpcController controller; + stub_->RequestVote(&controller, args, response, nullptr); + return !controller.Failed(); } //先开启服务器,再尝试连接其他的节点,中间给一个间隔时间,等待其他的rpc服务器节点启动 RaftRpcUtil::RaftRpcUtil(std::string ip, short port) { - //********************************************* */ - //发送rpc设置 - stub_ = new raftRpcProctoc::raftRpc_Stub(new MprpcChannel(ip, port, true)); + //********************************************* */ + //发送rpc设置 + stub_ = new raftRpcProctoc::raftRpc_Stub(new MprpcChannel(ip, port, true)); } -RaftRpcUtil::~RaftRpcUtil() { - delete stub_; -} +RaftRpcUtil::~RaftRpcUtil() { delete stub_; } diff --git a/src/rpc/include/mprpcchannel.h b/src/rpc/include/mprpcchannel.h index 616b9e7..cb84858 100644 --- a/src/rpc/include/mprpcchannel.h +++ b/src/rpc/include/mprpcchannel.h @@ -1,44 +1,39 @@ -#ifndef MPRPCCHANNEL_H +#ifndef MPRPCCHANNEL_H #define MPRPCCHANNEL_H -#include #include #include -#include -#include -#include +#include #include -#include +#include // 包含 std::generate_n() 和 std::generate() 函数的头文件 +#include +#include #include -#include // 包含 std::generate_n() 和 std::generate() 函数的头文件 -#include // 包含 std::uniform_int_distribution 类型的头文件 +#include // 包含 std::uniform_int_distribution 类型的头文件 +#include #include +#include using namespace std; // 真正负责发送和接受的前后处理工作 // 如消息的组织方式,向哪个节点发送等等 -class MprpcChannel : public google::protobuf::RpcChannel -{ - -public: - // 所有通过stub代理对象调用的rpc方法,都走到这里了,统一做rpc方法调用的数据数据序列化和网络发送 那一步 - void CallMethod(const google::protobuf::MethodDescriptor *method, - google::protobuf::RpcController *controller, - const google::protobuf::Message *request, - google::protobuf::Message *response, - google::protobuf::Closure *done) override; - MprpcChannel(string ip, short port,bool connectNow); +class MprpcChannel : public google::protobuf::RpcChannel { + public: + // 所有通过stub代理对象调用的rpc方法,都走到这里了,统一做rpc方法调用的数据数据序列化和网络发送 那一步 + void CallMethod(const google::protobuf::MethodDescriptor *method, google::protobuf::RpcController *controller, + const google::protobuf::Message *request, google::protobuf::Message *response, + google::protobuf::Closure *done) override; + MprpcChannel(string ip, short port, bool connectNow); -private: - int m_clientFd; - const std::string m_ip; //保存ip和端口,如果断了可以尝试重连 - const uint16_t m_port; - /// @brief 连接ip和端口,并设置m_clientFd - /// @param ip ip地址,本机字节序 - /// @param port 端口,本机字节序 - /// @return 成功返回空字符串,否则返回失败信息 - bool newConnect(const char *ip, uint16_t port, string *errMsg); + private: + int m_clientFd; + const std::string m_ip; //保存ip和端口,如果断了可以尝试重连 + const uint16_t m_port; + /// @brief 连接ip和端口,并设置m_clientFd + /// @param ip ip地址,本机字节序 + /// @param port 端口,本机字节序 + /// @return 成功返回空字符串,否则返回失败信息 + bool newConnect(const char *ip, uint16_t port, string *errMsg); }; - -#endif //MPRPCCHANNEL_H \ No newline at end of file +#endif // MPRPCCHANNEL_H \ No newline at end of file diff --git a/src/rpc/include/mprpcconfig.h b/src/rpc/include/mprpcconfig.h index 80e9750..69f4ab1 100644 --- a/src/rpc/include/mprpcconfig.h +++ b/src/rpc/include/mprpcconfig.h @@ -1,19 +1,19 @@ #pragma once -#include #include +#include // rpcserverip rpcserverport zookeeperip zookeeperport // 框架读取配置文件类 -class MprpcConfig -{ -public: - // 负责解析加载配置文件 - void LoadConfigFile(const char *config_file); - // 查询配置项信息 - std::string Load(const std::string &key); -private: - std::unordered_map m_configMap; - // 去掉字符串前后的空格 - void Trim(std::string &src_buf); +class MprpcConfig { + public: + // 负责解析加载配置文件 + void LoadConfigFile(const char *config_file); + // 查询配置项信息 + std::string Load(const std::string &key); + + private: + std::unordered_map m_configMap; + // 去掉字符串前后的空格 + void Trim(std::string &src_buf); }; \ No newline at end of file diff --git a/src/rpc/include/mprpccontroller.h b/src/rpc/include/mprpccontroller.h index e427013..b7ca73b 100644 --- a/src/rpc/include/mprpccontroller.h +++ b/src/rpc/include/mprpccontroller.h @@ -2,20 +2,20 @@ #include #include -class MprpcController : public google::protobuf::RpcController -{ -public: - MprpcController(); - void Reset(); - bool Failed() const; - std::string ErrorText() const; - void SetFailed(const std::string& reason); +class MprpcController : public google::protobuf::RpcController { + public: + MprpcController(); + void Reset(); + bool Failed() const; + std::string ErrorText() const; + void SetFailed(const std::string& reason); - // 目前未实现具体的功能 - void StartCancel(); - bool IsCanceled() const; - void NotifyOnCancel(google::protobuf::Closure* callback); -private: - bool m_failed; // RPC方法执行过程中的状态 - std::string m_errText; // RPC方法执行过程中的错误信息 + // 目前未实现具体的功能 + void StartCancel(); + bool IsCanceled() const; + void NotifyOnCancel(google::protobuf::Closure* callback); + + private: + bool m_failed; // RPC方法执行过程中的状态 + std::string m_errText; // RPC方法执行过程中的错误信息 }; \ No newline at end of file diff --git a/src/rpc/include/rpcprovider.h b/src/rpc/include/rpcprovider.h index 1bc1e37..3444865 100644 --- a/src/rpc/include/rpcprovider.h +++ b/src/rpc/include/rpcprovider.h @@ -1,47 +1,45 @@ #pragma once -#include "google/protobuf/service.h" -#include +#include #include #include #include -#include +#include #include -#include +#include #include +#include "google/protobuf/service.h" // 框架提供的专门发布rpc服务的网络对象类 // todo:现在rpc客户端变成了 长连接,因此rpc服务器这边最好提供一个定时器,用以断开很久没有请求的连接。 // todo:为了配合这个,那么rpc客户端那边每次发送之前也需要真正的 -class RpcProvider -{ -public: - // 这里是框架提供给外部使用的,可以发布rpc方法的函数接口 - void NotifyService(google::protobuf::Service *service); +class RpcProvider { + public: + // 这里是框架提供给外部使用的,可以发布rpc方法的函数接口 + void NotifyService(google::protobuf::Service *service); - // 启动rpc服务节点,开始提供rpc远程网络调用服务 - void Run(int nodeIndex,short port); + // 启动rpc服务节点,开始提供rpc远程网络调用服务 + void Run(int nodeIndex, short port); -private: - // 组合EventLoop - muduo::net::EventLoop m_eventLoop; - std::shared_ptr m_muduo_server; + private: + // 组合EventLoop + muduo::net::EventLoop m_eventLoop; + std::shared_ptr m_muduo_server; - // service服务类型信息 - struct ServiceInfo - { - google::protobuf::Service *m_service; // 保存服务对象 - std::unordered_map m_methodMap; // 保存服务方法 - }; - // 存储注册成功的服务对象和其服务方法的所有信息 - std::unordered_map m_serviceMap; + // service服务类型信息 + struct ServiceInfo { + google::protobuf::Service *m_service; // 保存服务对象 + std::unordered_map m_methodMap; // 保存服务方法 + }; + // 存储注册成功的服务对象和其服务方法的所有信息 + std::unordered_map m_serviceMap; - // 新的socket连接回调 - void OnConnection(const muduo::net::TcpConnectionPtr&); - // 已建立连接用户的读写事件回调 - void OnMessage(const muduo::net::TcpConnectionPtr&, muduo::net::Buffer*, muduo::Timestamp); - // Closure的回调操作,用于序列化rpc的响应和网络发送 - void SendRpcResponse(const muduo::net::TcpConnectionPtr&, google::protobuf::Message*); + // 新的socket连接回调 + void OnConnection(const muduo::net::TcpConnectionPtr &); + // 已建立连接用户的读写事件回调 + void OnMessage(const muduo::net::TcpConnectionPtr &, muduo::net::Buffer *, muduo::Timestamp); + // Closure的回调操作,用于序列化rpc的响应和网络发送 + void SendRpcResponse(const muduo::net::TcpConnectionPtr &, google::protobuf::Message *); -public: - ~RpcProvider(); + public: + ~RpcProvider(); }; \ No newline at end of file diff --git a/src/rpc/mprpcchannel.cpp b/src/rpc/mprpcchannel.cpp index 10a9e41..1ea486d 100644 --- a/src/rpc/mprpcchannel.cpp +++ b/src/rpc/mprpcchannel.cpp @@ -1,12 +1,12 @@ #include "mprpcchannel.h" -#include -#include "rpcheader.pb.h" -#include #include #include +#include #include #include +#include #include "mprpccontroller.h" +#include "rpcheader.pb.h" #include "util.h" /* header_size + service_name method_name args_size + args @@ -15,165 +15,150 @@ header_size + service_name method_name args_size + args // 统一通过rpcChannel来调用方法 // 统一做rpc方法调用的数据数据序列化和网络发送 void MprpcChannel::CallMethod(const google::protobuf::MethodDescriptor *method, - google::protobuf::RpcController *controller, - const google::protobuf::Message *request, - google::protobuf::Message *response, - google::protobuf::Closure *done) -{ - if(m_clientFd == -1){ - std::string errMsg; - bool rt = newConnect(m_ip.c_str(), m_port, &errMsg); - if(!rt){ - DPrintf("[func-MprpcChannel::CallMethod]重连接ip:{%s} port{%d}失败",m_ip.c_str(),m_port); - controller->SetFailed(errMsg); - return ; - }else{ - DPrintf("[func-MprpcChannel::CallMethod]连接ip:{%s} port{%d}成功",m_ip.c_str(),m_port); - } + google::protobuf::RpcController *controller, const google::protobuf::Message *request, + google::protobuf::Message *response, google::protobuf::Closure *done) { + if (m_clientFd == -1) { + std::string errMsg; + bool rt = newConnect(m_ip.c_str(), m_port, &errMsg); + if (!rt) { + DPrintf("[func-MprpcChannel::CallMethod]重连接ip:{%s} port{%d}失败", m_ip.c_str(), m_port); + controller->SetFailed(errMsg); + return; + } else { + DPrintf("[func-MprpcChannel::CallMethod]连接ip:{%s} port{%d}成功", m_ip.c_str(), m_port); } - - const google::protobuf::ServiceDescriptor *sd = method->service(); - std::string service_name = sd->name(); // service_name - std::string method_name = method->name(); // method_name + } - // 获取参数的序列化字符串长度 args_size - uint32_t args_size = 0; - std::string args_str; - if (request->SerializeToString(&args_str)) - { - args_size = args_str.size(); - } - else - { - controller->SetFailed("serialize request error!"); - return; - } + const google::protobuf::ServiceDescriptor *sd = method->service(); + std::string service_name = sd->name(); // service_name + std::string method_name = method->name(); // method_name - // 定义rpc的请求header - RPC::RpcHeader rpcHeader; - rpcHeader.set_service_name(service_name); - rpcHeader.set_method_name(method_name); - rpcHeader.set_args_size(args_size); + // 获取参数的序列化字符串长度 args_size + uint32_t args_size = 0; + std::string args_str; + if (request->SerializeToString(&args_str)) { + args_size = args_str.size(); + } else { + controller->SetFailed("serialize request error!"); + return; + } - uint32_t header_size = 0; - std::string rpc_header_str; - if (rpcHeader.SerializeToString(&rpc_header_str)) - { - header_size = rpc_header_str.size(); - } - else - { - controller->SetFailed("serialize rpc header error!"); - return; - } + // 定义rpc的请求header + RPC::RpcHeader rpcHeader; + rpcHeader.set_service_name(service_name); + rpcHeader.set_method_name(method_name); + rpcHeader.set_args_size(args_size); - // 组织待发送的rpc请求的字符串 - std::string send_rpc_str; - send_rpc_str.insert(0, std::string((char *)&header_size, 4)); // header_size - send_rpc_str += rpc_header_str; // rpcheader - send_rpc_str += args_str; // args + uint32_t header_size = 0; + std::string rpc_header_str; + if (rpcHeader.SerializeToString(&rpc_header_str)) { + header_size = rpc_header_str.size(); + } else { + controller->SetFailed("serialize rpc header error!"); + return; + } - // 打印调试信息 -// std::cout << "============================================" << std::endl; -// std::cout << "header_size: " << header_size << std::endl; -// std::cout << "rpc_header_str: " << rpc_header_str << std::endl; -// std::cout << "service_name: " << service_name << std::endl; -// std::cout << "method_name: " << method_name << std::endl; -// std::cout << "args_str: " << args_str << std::endl; -// std::cout << "============================================" << std::endl; + // 组织待发送的rpc请求的字符串 + std::string send_rpc_str; + send_rpc_str.insert(0, std::string((char *)&header_size, 4)); // header_size + send_rpc_str += rpc_header_str; // rpcheader + send_rpc_str += args_str; // args - // 发送rpc请求 - //失败会重试连接再发送,重试连接失败会直接return - while (-1 == send(m_clientFd, send_rpc_str.c_str(), send_rpc_str.size(), 0)) - { - char errtxt[512] = {0}; - sprintf(errtxt, "send error! errno:%d", errno); - std::cout<<"尝试重新连接,对方ip:"<SetFailed(errMsg); - return ; - } - } - /* - 从时间节点来说,这里将请求发送过去之后rpc服务的提供者就会开始处理,返回的时候就代表着已经返回响应了 - */ + // 打印调试信息 + // std::cout << "============================================" << std::endl; + // std::cout << "header_size: " << header_size << std::endl; + // std::cout << "rpc_header_str: " << rpc_header_str << std::endl; + // std::cout << "service_name: " << service_name << std::endl; + // std::cout << "method_name: " << method_name << std::endl; + // std::cout << "args_str: " << args_str << std::endl; + // std::cout << "============================================" << std::endl; - // 接收rpc请求的响应值 - char recv_buf[1024] = {0}; - int recv_size = 0; - if (-1 == (recv_size = recv(m_clientFd, recv_buf, 1024, 0))) - { - close(m_clientFd); m_clientFd = -1; - char errtxt[512] = {0}; - sprintf(errtxt, "recv error! errno:%d", errno); - controller->SetFailed(errtxt); - return; + // 发送rpc请求 + //失败会重试连接再发送,重试连接失败会直接return + while (-1 == send(m_clientFd, send_rpc_str.c_str(), send_rpc_str.size(), 0)) { + char errtxt[512] = {0}; + sprintf(errtxt, "send error! errno:%d", errno); + std::cout << "尝试重新连接,对方ip:" << m_ip << " 对方端口" << m_port << std::endl; + close(m_clientFd); + m_clientFd = -1; + std::string errMsg; + bool rt = newConnect(m_ip.c_str(), m_port, &errMsg); + if (!rt) { + controller->SetFailed(errMsg); + return; } + } + /* + 从时间节点来说,这里将请求发送过去之后rpc服务的提供者就会开始处理,返回的时候就代表着已经返回响应了 + */ - // 反序列化rpc调用的响应数据 - // std::string response_str(recv_buf, 0, recv_size); // bug:出现问题,recv_buf中遇到\0后面的数据就存不下来了,导致反序列化失败 - // if (!response->ParseFromString(response_str)) - if (!response->ParseFromArray(recv_buf, recv_size)) - { - char errtxt[1050] = {0}; - sprintf(errtxt, "parse error! response_str:%s", recv_buf); - controller->SetFailed(errtxt); - return; - } + // 接收rpc请求的响应值 + char recv_buf[1024] = {0}; + int recv_size = 0; + if (-1 == (recv_size = recv(m_clientFd, recv_buf, 1024, 0))) { + close(m_clientFd); + m_clientFd = -1; + char errtxt[512] = {0}; + sprintf(errtxt, "recv error! errno:%d", errno); + controller->SetFailed(errtxt); + return; + } + // 反序列化rpc调用的响应数据 + // std::string response_str(recv_buf, 0, recv_size); // + // bug:出现问题,recv_buf中遇到\0后面的数据就存不下来了,导致反序列化失败 if + // (!response->ParseFromString(response_str)) + if (!response->ParseFromArray(recv_buf, recv_size)) { + char errtxt[1050] = {0}; + sprintf(errtxt, "parse error! response_str:%s", recv_buf); + controller->SetFailed(errtxt); + return; + } } +bool MprpcChannel::newConnect(const char *ip, uint16_t port, string *errMsg) { + int clientfd = socket(AF_INET, SOCK_STREAM, 0); + if (-1 == clientfd) { + char errtxt[512] = {0}; + sprintf(errtxt, "create socket error! errno:%d", errno); + m_clientFd = -1; + *errMsg = errtxt; + return false; + } - -bool MprpcChannel::newConnect(const char *ip, uint16_t port,string* errMsg) -{ - int clientfd = socket(AF_INET, SOCK_STREAM, 0); - if (-1 == clientfd) - { - char errtxt[512] = {0}; - sprintf(errtxt, "create socket error! errno:%d", errno); - m_clientFd = -1; - *errMsg = errtxt; - return false; - } - - struct sockaddr_in server_addr; - server_addr.sin_family = AF_INET; - server_addr.sin_port = htons(port); - server_addr.sin_addr.s_addr = inet_addr(ip); - // 连接rpc服务节点 - if (-1 == connect(clientfd, (struct sockaddr *)&server_addr, sizeof(server_addr))) - { - close(clientfd); - char errtxt[512] = {0}; - sprintf(errtxt, "connect fail! errno:%d", errno); - m_clientFd = -1; - *errMsg = errtxt; - return false; - } - m_clientFd = clientfd; - return true; + struct sockaddr_in server_addr; + server_addr.sin_family = AF_INET; + server_addr.sin_port = htons(port); + server_addr.sin_addr.s_addr = inet_addr(ip); + // 连接rpc服务节点 + if (-1 == connect(clientfd, (struct sockaddr *)&server_addr, sizeof(server_addr))) { + close(clientfd); + char errtxt[512] = {0}; + sprintf(errtxt, "connect fail! errno:%d", errno); + m_clientFd = -1; + *errMsg = errtxt; + return false; + } + m_clientFd = clientfd; + return true; } -MprpcChannel::MprpcChannel(string ip, short port,bool connectNow):m_ip(ip),m_port(port) ,m_clientFd(-1){ - // 使用tcp编程,完成rpc方法的远程调用,使用的是短连接,因此每次都要重新连接上去,待改成长连接。 - // 没有连接或者连接已经断开,那么就要重新连接呢,会一直不断地重试 - // 读取配置文件rpcserver的信息 - // std::string ip = MprpcApplication::GetInstance().GetConfig().Load("rpcserverip"); - // uint16_t port = atoi(MprpcApplication::GetInstance().GetConfig().Load("rpcserverport").c_str()); - // rpc调用方想调用service_name的method_name服务,需要查询zk上该服务所在的host信息 - // /UserServiceRpc/Login - if(!connectNow){return ;} //可以允许延迟连接 - std::string errMsg; - auto rt = newConnect(ip.c_str(), port,&errMsg); - int tryCount = 3; - while (!rt && tryCount--) - { - std::cout< // 负责解析加载配置文件 -void MprpcConfig::LoadConfigFile(const char *config_file) -{ - FILE *pf = fopen(config_file, "r"); - if (nullptr == pf) - { - std::cout << config_file << " is note exist!" << std::endl; - exit(EXIT_FAILURE); - } - - // 1.注释 2.正确的配置项 = 3.去掉开头的多余的空格 - while(!feof(pf)) - { - char buf[512] = {0}; - fgets(buf, 512, pf); +void MprpcConfig::LoadConfigFile(const char *config_file) { + FILE *pf = fopen(config_file, "r"); + if (nullptr == pf) { + std::cout << config_file << " is note exist!" << std::endl; + exit(EXIT_FAILURE); + } - // 去掉字符串前面多余的空格 - std::string read_buf(buf); - Trim(read_buf); + // 1.注释 2.正确的配置项 = 3.去掉开头的多余的空格 + while (!feof(pf)) { + char buf[512] = {0}; + fgets(buf, 512, pf); - // 判断#的注释 - if (read_buf[0] == '#' || read_buf.empty()) - { - continue; - } + // 去掉字符串前面多余的空格 + std::string read_buf(buf); + Trim(read_buf); - // 解析配置项 - int idx = read_buf.find('='); - if (idx == -1) - { - // 配置项不合法 - continue; - } + // 判断#的注释 + if (read_buf[0] == '#' || read_buf.empty()) { + continue; + } - std::string key; - std::string value; - key = read_buf.substr(0, idx); - Trim(key); - // rpcserverip=127.0.0.1\n - int endidx = read_buf.find('\n', idx); - value = read_buf.substr(idx+1, endidx-idx-1); - Trim(value); - m_configMap.insert({key, value}); + // 解析配置项 + int idx = read_buf.find('='); + if (idx == -1) { + // 配置项不合法 + continue; } - fclose(pf); + std::string key; + std::string value; + key = read_buf.substr(0, idx); + Trim(key); + // rpcserverip=127.0.0.1\n + int endidx = read_buf.find('\n', idx); + value = read_buf.substr(idx + 1, endidx - idx - 1); + Trim(value); + m_configMap.insert({key, value}); + } + + fclose(pf); } // 查询配置项信息 -std::string MprpcConfig::Load(const std::string &key) -{ - auto it = m_configMap.find(key); - if (it == m_configMap.end()) - { - return ""; - } - return it->second; +std::string MprpcConfig::Load(const std::string &key) { + auto it = m_configMap.find(key); + if (it == m_configMap.end()) { + return ""; + } + return it->second; } // 去掉字符串前后的空格 -void MprpcConfig::Trim(std::string &src_buf) -{ - int idx = src_buf.find_first_not_of(' '); - if (idx != -1) - { - // 说明字符串前面有空格 - src_buf = src_buf.substr(idx, src_buf.size()-idx); - } - // 去掉字符串后面多余的空格 - idx = src_buf.find_last_not_of(' '); - if (idx != -1) - { - // 说明字符串后面有空格 - src_buf = src_buf.substr(0, idx+1); - } +void MprpcConfig::Trim(std::string &src_buf) { + int idx = src_buf.find_first_not_of(' '); + if (idx != -1) { + // 说明字符串前面有空格 + src_buf = src_buf.substr(idx, src_buf.size() - idx); + } + // 去掉字符串后面多余的空格 + idx = src_buf.find_last_not_of(' '); + if (idx != -1) { + // 说明字符串后面有空格 + src_buf = src_buf.substr(0, idx + 1); + } } \ No newline at end of file diff --git a/src/rpc/mprpccontroller.cpp b/src/rpc/mprpccontroller.cpp index 8fbd325..a67317e 100644 --- a/src/rpc/mprpccontroller.cpp +++ b/src/rpc/mprpccontroller.cpp @@ -1,34 +1,25 @@ #include "mprpccontroller.h" -MprpcController::MprpcController() -{ - m_failed = false; - m_errText = ""; +MprpcController::MprpcController() { + m_failed = false; + m_errText = ""; } -void MprpcController::Reset() -{ - m_failed = false; - m_errText = ""; +void MprpcController::Reset() { + m_failed = false; + m_errText = ""; } -bool MprpcController::Failed() const -{ - return m_failed; -} +bool MprpcController::Failed() const { return m_failed; } -std::string MprpcController::ErrorText() const -{ - return m_errText; -} +std::string MprpcController::ErrorText() const { return m_errText; } -void MprpcController::SetFailed(const std::string& reason) -{ - m_failed = true; - m_errText = reason; +void MprpcController::SetFailed(const std::string& reason) { + m_failed = true; + m_errText = reason; } // 目前未实现具体的功能 -void MprpcController::StartCancel(){} -bool MprpcController::IsCanceled() const {return false;} +void MprpcController::StartCancel() {} +bool MprpcController::IsCanceled() const { return false; } void MprpcController::NotifyOnCancel(google::protobuf::Closure* callback) {} \ No newline at end of file diff --git a/src/rpc/rpcheader.pb.cpp b/src/rpc/rpcheader.pb.cpp index 8ef7de0..223667d 100644 --- a/src/rpc/rpcheader.pb.cpp +++ b/src/rpc/rpcheader.pb.cpp @@ -5,13 +5,13 @@ #include -#include -#include -#include #include +#include #include +#include #include #include +#include // @@protoc_insertion_point(includes) #include namespace RPC { @@ -25,85 +25,99 @@ static void InitDefaultsscc_info_RpcHeader_rpcheader_2eproto() { { void* ptr = &::RPC::_RpcHeader_default_instance_; - new (ptr) ::RPC::RpcHeader(); + new (ptr)::RPC::RpcHeader(); ::PROTOBUF_NAMESPACE_ID::internal::OnShutdownDestroyMessage(ptr); } ::RPC::RpcHeader::InitAsDefaultInstance(); } -::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<0> scc_info_RpcHeader_rpcheader_2eproto = - {{ATOMIC_VAR_INIT(::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase::kUninitialized), 0, 0, InitDefaultsscc_info_RpcHeader_rpcheader_2eproto}, {}}; +::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<0> scc_info_RpcHeader_rpcheader_2eproto = { + {ATOMIC_VAR_INIT(::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase::kUninitialized), 0, 0, + InitDefaultsscc_info_RpcHeader_rpcheader_2eproto}, + {}}; static ::PROTOBUF_NAMESPACE_ID::Metadata file_level_metadata_rpcheader_2eproto[1]; -static constexpr ::PROTOBUF_NAMESPACE_ID::EnumDescriptor const** file_level_enum_descriptors_rpcheader_2eproto = nullptr; -static constexpr ::PROTOBUF_NAMESPACE_ID::ServiceDescriptor const** file_level_service_descriptors_rpcheader_2eproto = nullptr; - -const ::PROTOBUF_NAMESPACE_ID::uint32 TableStruct_rpcheader_2eproto::offsets[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) = { - ~0u, // no _has_bits_ - PROTOBUF_FIELD_OFFSET(::RPC::RpcHeader, _internal_metadata_), - ~0u, // no _extensions_ - ~0u, // no _oneof_case_ - ~0u, // no _weak_field_map_ - PROTOBUF_FIELD_OFFSET(::RPC::RpcHeader, service_name_), - PROTOBUF_FIELD_OFFSET(::RPC::RpcHeader, method_name_), - PROTOBUF_FIELD_OFFSET(::RPC::RpcHeader, args_size_), +static constexpr ::PROTOBUF_NAMESPACE_ID::EnumDescriptor const** file_level_enum_descriptors_rpcheader_2eproto = + nullptr; +static constexpr ::PROTOBUF_NAMESPACE_ID::ServiceDescriptor const** file_level_service_descriptors_rpcheader_2eproto = + nullptr; + +const ::PROTOBUF_NAMESPACE_ID::uint32 TableStruct_rpcheader_2eproto::offsets[] PROTOBUF_SECTION_VARIABLE( + protodesc_cold) = { + ~0u, // no _has_bits_ + PROTOBUF_FIELD_OFFSET(::RPC::RpcHeader, _internal_metadata_), + ~0u, // no _extensions_ + ~0u, // no _oneof_case_ + ~0u, // no _weak_field_map_ + PROTOBUF_FIELD_OFFSET(::RPC::RpcHeader, service_name_), + PROTOBUF_FIELD_OFFSET(::RPC::RpcHeader, method_name_), + PROTOBUF_FIELD_OFFSET(::RPC::RpcHeader, args_size_), }; static const ::PROTOBUF_NAMESPACE_ID::internal::MigrationSchema schemas[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) = { - { 0, -1, sizeof(::RPC::RpcHeader)}, + {0, -1, sizeof(::RPC::RpcHeader)}, }; -static ::PROTOBUF_NAMESPACE_ID::Message const * const file_default_instances[] = { - reinterpret_cast(&::RPC::_RpcHeader_default_instance_), +static ::PROTOBUF_NAMESPACE_ID::Message const* const file_default_instances[] = { + reinterpret_cast(&::RPC::_RpcHeader_default_instance_), }; const char descriptor_table_protodef_rpcheader_2eproto[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) = - "\n\017rpcheader.proto\022\003RPC\"I\n\tRpcHeader\022\024\n\014s" - "ervice_name\030\001 \001(\014\022\023\n\013method_name\030\002 \001(\014\022\021" - "\n\targs_size\030\003 \001(\rb\006proto3" - ; -static const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable*const descriptor_table_rpcheader_2eproto_deps[1] = { -}; -static ::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase*const descriptor_table_rpcheader_2eproto_sccs[1] = { - &scc_info_RpcHeader_rpcheader_2eproto.base, + "\n\017rpcheader.proto\022\003RPC\"I\n\tRpcHeader\022\024\n\014s" + "ervice_name\030\001 \001(\014\022\023\n\013method_name\030\002 \001(\014\022\021" + "\n\targs_size\030\003 \001(\rb\006proto3"; +static const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable* const descriptor_table_rpcheader_2eproto_deps[1] = {}; +static ::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase* const descriptor_table_rpcheader_2eproto_sccs[1] = { + &scc_info_RpcHeader_rpcheader_2eproto.base, }; static ::PROTOBUF_NAMESPACE_ID::internal::once_flag descriptor_table_rpcheader_2eproto_once; const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_rpcheader_2eproto = { - false, false, descriptor_table_protodef_rpcheader_2eproto, "rpcheader.proto", 105, - &descriptor_table_rpcheader_2eproto_once, descriptor_table_rpcheader_2eproto_sccs, descriptor_table_rpcheader_2eproto_deps, 1, 0, - schemas, file_default_instances, TableStruct_rpcheader_2eproto::offsets, - file_level_metadata_rpcheader_2eproto, 1, file_level_enum_descriptors_rpcheader_2eproto, file_level_service_descriptors_rpcheader_2eproto, + false, + false, + descriptor_table_protodef_rpcheader_2eproto, + "rpcheader.proto", + 105, + &descriptor_table_rpcheader_2eproto_once, + descriptor_table_rpcheader_2eproto_sccs, + descriptor_table_rpcheader_2eproto_deps, + 1, + 0, + schemas, + file_default_instances, + TableStruct_rpcheader_2eproto::offsets, + file_level_metadata_rpcheader_2eproto, + 1, + file_level_enum_descriptors_rpcheader_2eproto, + file_level_service_descriptors_rpcheader_2eproto, }; // Force running AddDescriptors() at dynamic initialization time. -static bool dynamic_init_dummy_rpcheader_2eproto = (static_cast(::PROTOBUF_NAMESPACE_ID::internal::AddDescriptors(&descriptor_table_rpcheader_2eproto)), true); +static bool dynamic_init_dummy_rpcheader_2eproto = + (static_cast(::PROTOBUF_NAMESPACE_ID::internal::AddDescriptors(&descriptor_table_rpcheader_2eproto)), true); namespace RPC { // =================================================================== -void RpcHeader::InitAsDefaultInstance() { -} +void RpcHeader::InitAsDefaultInstance() {} class RpcHeader::_Internal { public: }; -RpcHeader::RpcHeader(::PROTOBUF_NAMESPACE_ID::Arena* arena) - : ::PROTOBUF_NAMESPACE_ID::Message(arena) { +RpcHeader::RpcHeader(::PROTOBUF_NAMESPACE_ID::Arena* arena) : ::PROTOBUF_NAMESPACE_ID::Message(arena) { SharedCtor(); RegisterArenaDtor(arena); // @@protoc_insertion_point(arena_constructor:RPC.RpcHeader) } -RpcHeader::RpcHeader(const RpcHeader& from) - : ::PROTOBUF_NAMESPACE_ID::Message() { +RpcHeader::RpcHeader(const RpcHeader& from) : ::PROTOBUF_NAMESPACE_ID::Message() { _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); service_name_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); if (!from._internal_service_name().empty()) { service_name_.Set(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), from._internal_service_name(), - GetArena()); + GetArena()); } method_name_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited()); if (!from._internal_method_name().empty()) { method_name_.Set(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), from._internal_method_name(), - GetArena()); + GetArena()); } args_size_ = from.args_size_; // @@protoc_insertion_point(copy_constructor:RPC.RpcHeader) @@ -129,25 +143,21 @@ void RpcHeader::SharedDtor() { } void RpcHeader::ArenaDtor(void* object) { - RpcHeader* _this = reinterpret_cast< RpcHeader* >(object); + RpcHeader* _this = reinterpret_cast(object); (void)_this; } -void RpcHeader::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) { -} -void RpcHeader::SetCachedSize(int size) const { - _cached_size_.Set(size); -} +void RpcHeader::RegisterArenaDtor(::PROTOBUF_NAMESPACE_ID::Arena*) {} +void RpcHeader::SetCachedSize(int size) const { _cached_size_.Set(size); } const RpcHeader& RpcHeader::default_instance() { ::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&::scc_info_RpcHeader_rpcheader_2eproto.base); return *internal_default_instance(); } - void RpcHeader::Clear() { -// @@protoc_insertion_point(message_clear_start:RPC.RpcHeader) + // @@protoc_insertion_point(message_clear_start:RPC.RpcHeader) ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused - (void) cached_has_bits; + (void)cached_has_bits; service_name_.ClearToEmpty(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArena()); method_name_.ClearToEmpty(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArena()); @@ -156,8 +166,10 @@ void RpcHeader::Clear() { } const char* RpcHeader::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) { -#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure - ::PROTOBUF_NAMESPACE_ID::Arena* arena = GetArena(); (void)arena; +#define CHK_(x) \ + if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure + ::PROTOBUF_NAMESPACE_ID::Arena* arena = GetArena(); + (void)arena; while (!ctx->Done(&ptr)) { ::PROTOBUF_NAMESPACE_ID::uint32 tag; ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag); @@ -169,7 +181,8 @@ const char* RpcHeader::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID:: auto str = _internal_mutable_service_name(); ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParser(str, ptr, ctx); CHK_(ptr); - } else goto handle_unusual; + } else + goto handle_unusual; continue; // bytes method_name = 2; case 2: @@ -177,14 +190,16 @@ const char* RpcHeader::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID:: auto str = _internal_mutable_method_name(); ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParser(str, ptr, ctx); CHK_(ptr); - } else goto handle_unusual; + } else + goto handle_unusual; continue; // uint32 args_size = 3; case 3: if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 24)) { args_size_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint32(&ptr); CHK_(ptr); - } else goto handle_unusual; + } else + goto handle_unusual; continue; default: { handle_unusual: @@ -192,14 +207,13 @@ const char* RpcHeader::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID:: ctx->SetLastTag(tag); goto success; } - ptr = UnknownFieldParse(tag, - _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(), - ptr, ctx); + ptr = UnknownFieldParse( + tag, _internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(), ptr, ctx); CHK_(ptr != nullptr); continue; } } // switch - } // while + } // while success: return ptr; failure: @@ -212,66 +226,60 @@ ::PROTOBUF_NAMESPACE_ID::uint8* RpcHeader::_InternalSerialize( ::PROTOBUF_NAMESPACE_ID::uint8* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const { // @@protoc_insertion_point(serialize_to_array_start:RPC.RpcHeader) ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; - (void) cached_has_bits; + (void)cached_has_bits; // bytes service_name = 1; if (this->service_name().size() > 0) { - target = stream->WriteBytesMaybeAliased( - 1, this->_internal_service_name(), target); + target = stream->WriteBytesMaybeAliased(1, this->_internal_service_name(), target); } // bytes method_name = 2; if (this->method_name().size() > 0) { - target = stream->WriteBytesMaybeAliased( - 2, this->_internal_method_name(), target); + target = stream->WriteBytesMaybeAliased(2, this->_internal_method_name(), target); } // uint32 args_size = 3; if (this->args_size() != 0) { target = stream->EnsureSpace(target); - target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteUInt32ToArray(3, this->_internal_args_size(), target); + target = + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteUInt32ToArray(3, this->_internal_args_size(), target); } if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) { target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::InternalSerializeUnknownFieldsToArray( - _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream); + _internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>( + ::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), + target, stream); } // @@protoc_insertion_point(serialize_to_array_end:RPC.RpcHeader) return target; } size_t RpcHeader::ByteSizeLong() const { -// @@protoc_insertion_point(message_byte_size_start:RPC.RpcHeader) + // @@protoc_insertion_point(message_byte_size_start:RPC.RpcHeader) size_t total_size = 0; ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; // Prevent compiler warnings about cached_has_bits being unused - (void) cached_has_bits; + (void)cached_has_bits; // bytes service_name = 1; if (this->service_name().size() > 0) { - total_size += 1 + - ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::BytesSize( - this->_internal_service_name()); + total_size += 1 + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::BytesSize(this->_internal_service_name()); } // bytes method_name = 2; if (this->method_name().size() > 0) { - total_size += 1 + - ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::BytesSize( - this->_internal_method_name()); + total_size += 1 + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::BytesSize(this->_internal_method_name()); } // uint32 args_size = 3; if (this->args_size() != 0) { - total_size += 1 + - ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::UInt32Size( - this->_internal_args_size()); + total_size += 1 + ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::UInt32Size(this->_internal_args_size()); } if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) { - return ::PROTOBUF_NAMESPACE_ID::internal::ComputeUnknownFieldsSize( - _internal_metadata_, total_size, &_cached_size_); + return ::PROTOBUF_NAMESPACE_ID::internal::ComputeUnknownFieldsSize(_internal_metadata_, total_size, &_cached_size_); } int cached_size = ::PROTOBUF_NAMESPACE_ID::internal::ToCachedSize(total_size); SetCachedSize(cached_size); @@ -279,26 +287,24 @@ size_t RpcHeader::ByteSizeLong() const { } void RpcHeader::MergeFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) { -// @@protoc_insertion_point(generalized_merge_from_start:RPC.RpcHeader) + // @@protoc_insertion_point(generalized_merge_from_start:RPC.RpcHeader) GOOGLE_DCHECK_NE(&from, this); - const RpcHeader* source = - ::PROTOBUF_NAMESPACE_ID::DynamicCastToGenerated( - &from); + const RpcHeader* source = ::PROTOBUF_NAMESPACE_ID::DynamicCastToGenerated(&from); if (source == nullptr) { - // @@protoc_insertion_point(generalized_merge_from_cast_fail:RPC.RpcHeader) + // @@protoc_insertion_point(generalized_merge_from_cast_fail:RPC.RpcHeader) ::PROTOBUF_NAMESPACE_ID::internal::ReflectionOps::Merge(from, this); } else { - // @@protoc_insertion_point(generalized_merge_from_cast_success:RPC.RpcHeader) + // @@protoc_insertion_point(generalized_merge_from_cast_success:RPC.RpcHeader) MergeFrom(*source); } } void RpcHeader::MergeFrom(const RpcHeader& from) { -// @@protoc_insertion_point(class_specific_merge_from_start:RPC.RpcHeader) + // @@protoc_insertion_point(class_specific_merge_from_start:RPC.RpcHeader) GOOGLE_DCHECK_NE(&from, this); _internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_); ::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0; - (void) cached_has_bits; + (void)cached_has_bits; if (from.service_name().size() > 0) { _internal_set_service_name(from._internal_service_name()); @@ -312,41 +318,39 @@ void RpcHeader::MergeFrom(const RpcHeader& from) { } void RpcHeader::CopyFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) { -// @@protoc_insertion_point(generalized_copy_from_start:RPC.RpcHeader) + // @@protoc_insertion_point(generalized_copy_from_start:RPC.RpcHeader) if (&from == this) return; Clear(); MergeFrom(from); } void RpcHeader::CopyFrom(const RpcHeader& from) { -// @@protoc_insertion_point(class_specific_copy_from_start:RPC.RpcHeader) + // @@protoc_insertion_point(class_specific_copy_from_start:RPC.RpcHeader) if (&from == this) return; Clear(); MergeFrom(from); } -bool RpcHeader::IsInitialized() const { - return true; -} +bool RpcHeader::IsInitialized() const { return true; } void RpcHeader::InternalSwap(RpcHeader* other) { using std::swap; _internal_metadata_.Swap<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(&other->_internal_metadata_); - service_name_.Swap(&other->service_name_, &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArena()); - method_name_.Swap(&other->method_name_, &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), GetArena()); + service_name_.Swap(&other->service_name_, &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), + GetArena()); + method_name_.Swap(&other->method_name_, &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), + GetArena()); swap(args_size_, other->args_size_); } -::PROTOBUF_NAMESPACE_ID::Metadata RpcHeader::GetMetadata() const { - return GetMetadataStatic(); -} - +::PROTOBUF_NAMESPACE_ID::Metadata RpcHeader::GetMetadata() const { return GetMetadataStatic(); } // @@protoc_insertion_point(namespace_scope) } // namespace RPC PROTOBUF_NAMESPACE_OPEN -template<> PROTOBUF_NOINLINE ::RPC::RpcHeader* Arena::CreateMaybeMessage< ::RPC::RpcHeader >(Arena* arena) { - return Arena::CreateMessageInternal< ::RPC::RpcHeader >(arena); +template <> +PROTOBUF_NOINLINE ::RPC::RpcHeader* Arena::CreateMaybeMessage<::RPC::RpcHeader>(Arena* arena) { + return Arena::CreateMessageInternal<::RPC::RpcHeader>(arena); } PROTOBUF_NAMESPACE_CLOSE diff --git a/src/rpc/rpcprovider.cpp b/src/rpc/rpcprovider.cpp index 47fa1d7..9bde83e 100644 --- a/src/rpc/rpcprovider.cpp +++ b/src/rpc/rpcprovider.cpp @@ -1,12 +1,12 @@ #include "rpcprovider.h" -#include "rpcheader.pb.h" -#include "util.h" -#include -#include #include #include +#include +#include #include #include +#include "rpcheader.pb.h" +#include "util.h" /* service_name => service描述 =》 service* 记录服务对象 @@ -15,114 +15,105 @@ json protobuf */ // 这里是框架提供给外部使用的,可以发布rpc方法的函数接口 // 只是简单的把服务描述符和方法描述符全部保存在本地而已 -//todo 待修改 要把本机开启的ip和端口写在文件里面 -void RpcProvider::NotifyService(google::protobuf::Service *service) -{ - ServiceInfo service_info; - - // 获取了服务对象的描述信息 - const google::protobuf::ServiceDescriptor *pserviceDesc = service->GetDescriptor(); - // 获取服务的名字 - std::string service_name = pserviceDesc->name(); - // 获取服务对象service的方法的数量 - int methodCnt = pserviceDesc->method_count(); - - std::cout << "service_name:" << service_name << std::endl; - - for (int i = 0; i < methodCnt; ++i) - { - // 获取了服务对象指定下标的服务方法的描述(抽象描述) UserService Login - const google::protobuf::MethodDescriptor *pmethodDesc = pserviceDesc->method(i); - std::string method_name = pmethodDesc->name(); - service_info.m_methodMap.insert({method_name, pmethodDesc}); - - } - service_info.m_service = service; - m_serviceMap.insert({service_name, service_info}); +// todo 待修改 要把本机开启的ip和端口写在文件里面 +void RpcProvider::NotifyService(google::protobuf::Service *service) { + ServiceInfo service_info; + + // 获取了服务对象的描述信息 + const google::protobuf::ServiceDescriptor *pserviceDesc = service->GetDescriptor(); + // 获取服务的名字 + std::string service_name = pserviceDesc->name(); + // 获取服务对象service的方法的数量 + int methodCnt = pserviceDesc->method_count(); + + std::cout << "service_name:" << service_name << std::endl; + + for (int i = 0; i < methodCnt; ++i) { + // 获取了服务对象指定下标的服务方法的描述(抽象描述) UserService Login + const google::protobuf::MethodDescriptor *pmethodDesc = pserviceDesc->method(i); + std::string method_name = pmethodDesc->name(); + service_info.m_methodMap.insert({method_name, pmethodDesc}); + } + service_info.m_service = service; + m_serviceMap.insert({service_name, service_info}); } // 启动rpc服务节点,开始提供rpc远程网络调用服务 -void RpcProvider::Run(int nodeIndex,short port) -{ - //获取可用ip - char* ipC; - char hname[128]; - struct hostent* hent; - gethostname(hname, sizeof(hname)); - hent = gethostbyname(hname); - for (int i = 0; hent->h_addr_list[i]; i++) - { - ipC = inet_ntoa(*(struct in_addr*)(hent->h_addr_list[i]));//IP地址 - } - std::string ip = std::string (ipC); -// // 获取端口 -// if(getReleasePort(port)) //在port的基础上获取一个可用的port,不知道为何没有效果 -// { -// std::cout << "可用的端口号为:" << port << std::endl; -// } -// else -// { -// std::cout << "获取可用端口号失败!" << std::endl; -// } - //写入文件 "test.conf" - std::string node = "node" + std::to_string(nodeIndex); - std::ofstream outfile; - outfile.open("test.conf", std::ios::app); //打开文件并追加写入 - if (!outfile.is_open()) - { - std::cout << "打开文件失败!" << std::endl; - exit(EXIT_FAILURE); - } - outfile << node+"ip="+ip << std::endl; - outfile << node+"port="+std::to_string(port)<< std::endl; - outfile.close(); - - //创建服务器 - muduo::net::InetAddress address(ip, port); - - // 创建TcpServer对象 - m_muduo_server = std::make_shared(&m_eventLoop, address, "RpcProvider"); - - // 绑定连接回调和消息读写回调方法 分离了网络代码和业务代码 - /* - bind的作用: - 如果不使用std::bind将回调函数和TcpConnection对象绑定起来,那么在回调函数中就无法直接访问和修改TcpConnection对象的状态。因为回调函数是作为一个独立的函数被调用的,它没有当前对象的上下文信息(即this指针),也就无法直接访问当前对象的状态。 - 如果要在回调函数中访问和修改TcpConnection对象的状态,需要通过参数的形式将当前对象的指针传递进去,并且保证回调函数在当前对象的上下文环境中被调用。这种方式比较复杂,容易出错,也不便于代码的编写和维护。因此,使用std::bind将回调函数和TcpConnection对象绑定起来,可以更加方便、直观地访问和修改对象的状态,同时也可以避免一些常见的错误。 - */ - m_muduo_server->setConnectionCallback(std::bind(&RpcProvider::OnConnection, this, std::placeholders::_1)); - m_muduo_server->setMessageCallback(std::bind(&RpcProvider::OnMessage, this, std::placeholders::_1, - std::placeholders::_2, std::placeholders::_3)); - - // 设置muduo库的线程数量 - m_muduo_server->setThreadNum(4); - - // rpc服务端准备启动,打印信息 - std::cout << "RpcProvider start service at ip:" << ip << " port:" << port << std::endl; - - // 启动网络服务 - m_muduo_server->start(); - m_eventLoop.loop(); - /* - 这段代码是在启动网络服务和事件循环,其中server是一个TcpServer对象,m_eventLoop是一个EventLoop对象。 +void RpcProvider::Run(int nodeIndex, short port) { + //获取可用ip + char *ipC; + char hname[128]; + struct hostent *hent; + gethostname(hname, sizeof(hname)); + hent = gethostbyname(hname); + for (int i = 0; hent->h_addr_list[i]; i++) { + ipC = inet_ntoa(*(struct in_addr *)(hent->h_addr_list[i])); // IP地址 + } + std::string ip = std::string(ipC); + // // 获取端口 + // if(getReleasePort(port)) //在port的基础上获取一个可用的port,不知道为何没有效果 + // { + // std::cout << "可用的端口号为:" << port << std::endl; + // } + // else + // { + // std::cout << "获取可用端口号失败!" << std::endl; + // } + //写入文件 "test.conf" + std::string node = "node" + std::to_string(nodeIndex); + std::ofstream outfile; + outfile.open("test.conf", std::ios::app); //打开文件并追加写入 + if (!outfile.is_open()) { + std::cout << "打开文件失败!" << std::endl; + exit(EXIT_FAILURE); + } + outfile << node + "ip=" + ip << std::endl; + outfile << node + "port=" + std::to_string(port) << std::endl; + outfile.close(); + + //创建服务器 + muduo::net::InetAddress address(ip, port); + + // 创建TcpServer对象 + m_muduo_server = std::make_shared(&m_eventLoop, address, "RpcProvider"); + + // 绑定连接回调和消息读写回调方法 分离了网络代码和业务代码 + /* + bind的作用: + 如果不使用std::bind将回调函数和TcpConnection对象绑定起来,那么在回调函数中就无法直接访问和修改TcpConnection对象的状态。因为回调函数是作为一个独立的函数被调用的,它没有当前对象的上下文信息(即this指针),也就无法直接访问当前对象的状态。 + 如果要在回调函数中访问和修改TcpConnection对象的状态,需要通过参数的形式将当前对象的指针传递进去,并且保证回调函数在当前对象的上下文环境中被调用。这种方式比较复杂,容易出错,也不便于代码的编写和维护。因此,使用std::bind将回调函数和TcpConnection对象绑定起来,可以更加方便、直观地访问和修改对象的状态,同时也可以避免一些常见的错误。 + */ + m_muduo_server->setConnectionCallback(std::bind(&RpcProvider::OnConnection, this, std::placeholders::_1)); + m_muduo_server->setMessageCallback( + std::bind(&RpcProvider::OnMessage, this, std::placeholders::_1, std::placeholders::_2, std::placeholders::_3)); + + // 设置muduo库的线程数量 + m_muduo_server->setThreadNum(4); + + // rpc服务端准备启动,打印信息 + std::cout << "RpcProvider start service at ip:" << ip << " port:" << port << std::endl; + + // 启动网络服务 + m_muduo_server->start(); + m_eventLoop.loop(); + /* + 这段代码是在启动网络服务和事件循环,其中server是一个TcpServer对象,m_eventLoop是一个EventLoop对象。 首先调用server.start()函数启动网络服务。在Muduo库中,TcpServer类封装了底层网络操作,包括TCP连接的建立和关闭、接收客户端数据、发送数据给客户端等等。通过调用TcpServer对象的start函数,可以启动底层网络服务并监听客户端连接的到来。 接下来调用m_eventLoop.loop()函数启动事件循环。在Muduo库中,EventLoop类封装了事件循环的核心逻辑,包括定时器、IO事件、信号等等。通过调用EventLoop对象的loop函数,可以启动事件循环,等待事件的到来并处理事件。 在这段代码中,首先启动网络服务,然后进入事件循环阶段,等待并处理各种事件。网络服务和事件循环是两个相对独立的模块,它们的启动顺序和调用方式都是确定的。启动网络服务通常是在事件循环之前,因为网络服务是事件循环的基础。启动事件循环则是整个应用程序的核心,所有的事件都在事件循环中被处理。 - */ + */ } // 新的socket连接回调 -void RpcProvider::OnConnection(const muduo::net::TcpConnectionPtr &conn) -{ - - // 如果是新连接就什么都不干,即正常的接收连接即可 - if (!conn->connected()) - { - // 和rpc client的连接断开了 - conn->shutdown(); - } +void RpcProvider::OnConnection(const muduo::net::TcpConnectionPtr &conn) { + // 如果是新连接就什么都不干,即正常的接收连接即可 + if (!conn->connected()) { + // 和rpc client的连接断开了 + conn->shutdown(); + } } /* @@ -139,126 +130,109 @@ std::string insert和copy方法 // 已建立连接用户的读写事件回调 如果远程有一个rpc服务的调用请求,那么OnMessage方法就会响应 // 这里来的肯定是一个远程调用请求 // 因此本函数需要:解析请求,根据服务名,方法名,参数,来调用service的来callmethod来调用本地的业务 -void RpcProvider::OnMessage(const muduo::net::TcpConnectionPtr &conn, - muduo::net::Buffer *buffer, - muduo::Timestamp) -{ - // 网络上接收的远程rpc调用请求的字符流 Login args - std::string recv_buf = buffer->retrieveAllAsString(); - - // 从字符流中读取前4个字节的内容 - uint32_t header_size = 0; - recv_buf.copy((char *)&header_size, 4, 0); - - // 根据header_size读取数据头的原始字符流,反序列化数据,得到rpc请求的详细信息 - std::string rpc_header_str = recv_buf.substr(4, header_size); - RPC::RpcHeader rpcHeader; - std::string service_name; - std::string method_name; - uint32_t args_size; - if (rpcHeader.ParseFromString(rpc_header_str)) - { - // 数据头反序列化成功 - service_name = rpcHeader.service_name(); - method_name = rpcHeader.method_name(); - args_size = rpcHeader.args_size(); - } - else - { - // 数据头反序列化失败 - std::cout << "rpc_header_str:" << rpc_header_str << " parse error!" << std::endl; - return; - } - - // 获取rpc方法参数的字符流数据 - std::string args_str = recv_buf.substr(4 + header_size, args_size); - - // 打印调试信息 -// std::cout << "============================================" << std::endl; -// std::cout << "header_size: " << header_size << std::endl; -// std::cout << "rpc_header_str: " << rpc_header_str << std::endl; -// std::cout << "service_name: " << service_name << std::endl; -// std::cout << "method_name: " << method_name << std::endl; -// std::cout << "args_str: " << args_str << std::endl; -// std::cout << "============================================" << std::endl; - - // 获取service对象和method对象 - auto it = m_serviceMap.find(service_name); - if (it == m_serviceMap.end()) - { - std::cout << "服务:"<second.m_methodMap.find(method_name); - if (mit == it->second.m_methodMap.end()) - { - std::cout << service_name << ":" << method_name << " is not exist!" << std::endl; - return; - } - - google::protobuf::Service *service = it->second.m_service; // 获取service对象 new UserService - const google::protobuf::MethodDescriptor *method = mit->second; // 获取method对象 Login - - // 生成rpc方法调用的请求request和响应response参数,由于是rpc的请求,因此请求需要通过request来序列化 - google::protobuf::Message *request = service->GetRequestPrototype(method).New(); - if (!request->ParseFromString(args_str)) - { - std::cout << "request parse error, content:" << args_str << std::endl; - return; +void RpcProvider::OnMessage(const muduo::net::TcpConnectionPtr &conn, muduo::net::Buffer *buffer, muduo::Timestamp) { + // 网络上接收的远程rpc调用请求的字符流 Login args + std::string recv_buf = buffer->retrieveAllAsString(); + + // 从字符流中读取前4个字节的内容 + uint32_t header_size = 0; + recv_buf.copy((char *)&header_size, 4, 0); + + // 根据header_size读取数据头的原始字符流,反序列化数据,得到rpc请求的详细信息 + std::string rpc_header_str = recv_buf.substr(4, header_size); + RPC::RpcHeader rpcHeader; + std::string service_name; + std::string method_name; + uint32_t args_size; + if (rpcHeader.ParseFromString(rpc_header_str)) { + // 数据头反序列化成功 + service_name = rpcHeader.service_name(); + method_name = rpcHeader.method_name(); + args_size = rpcHeader.args_size(); + } else { + // 数据头反序列化失败 + std::cout << "rpc_header_str:" << rpc_header_str << " parse error!" << std::endl; + return; + } + + // 获取rpc方法参数的字符流数据 + std::string args_str = recv_buf.substr(4 + header_size, args_size); + + // 打印调试信息 + // std::cout << "============================================" << std::endl; + // std::cout << "header_size: " << header_size << std::endl; + // std::cout << "rpc_header_str: " << rpc_header_str << std::endl; + // std::cout << "service_name: " << service_name << std::endl; + // std::cout << "method_name: " << method_name << std::endl; + // std::cout << "args_str: " << args_str << std::endl; + // std::cout << "============================================" << std::endl; + + // 获取service对象和method对象 + auto it = m_serviceMap.find(service_name); + if (it == m_serviceMap.end()) { + std::cout << "服务:" << service_name << " is not exist!" << std::endl; + std::cout << "当前已经有的服务列表为:"; + for (auto item : m_serviceMap) { + std::cout << item.first << " "; } - google::protobuf::Message *response = service->GetResponsePrototype(method).New(); - - // 给下面的method方法的调用,绑定一个Closure的回调函数 - // closure是执行完本地方法之后会发生的回调,因此需要完成序列化和反向发送请求的操作 - google::protobuf::Closure *done = google::protobuf::NewCallback(this, - &RpcProvider::SendRpcResponse, - conn, response); - - // 在框架上根据远端rpc请求,调用当前rpc节点上发布的方法 - // new UserService().Login(controller, request, response, done) - - /* - 为什么下面这个service->CallMethod 要这么写?或者说为什么这么写就可以直接调用远程业务方法了 - 这个service在运行的时候会是注册的service - // 用户注册的service类 继承 .protoc生成的serviceRpc类 继承 google::protobuf::Service - // 用户注册的service类里面没有重写CallMethod方法,是 .protoc生成的serviceRpc类 里面重写了google::protobuf::Service中 - 的纯虚函数CallMethod,而 .protoc生成的serviceRpc类 会根据传入参数自动调取 生成的xx方法(如Login方法), - 由于xx方法被 用户注册的service类 重写了,因此这个方法运行的时候会调用 用户注册的service类 的xx方法 - 真的是妙呀 - */ - //真正调用方法 - service->CallMethod(method, nullptr, request, response, done); + std::cout << std::endl; + return; + } + + auto mit = it->second.m_methodMap.find(method_name); + if (mit == it->second.m_methodMap.end()) { + std::cout << service_name << ":" << method_name << " is not exist!" << std::endl; + return; + } + + google::protobuf::Service *service = it->second.m_service; // 获取service对象 new UserService + const google::protobuf::MethodDescriptor *method = mit->second; // 获取method对象 Login + + // 生成rpc方法调用的请求request和响应response参数,由于是rpc的请求,因此请求需要通过request来序列化 + google::protobuf::Message *request = service->GetRequestPrototype(method).New(); + if (!request->ParseFromString(args_str)) { + std::cout << "request parse error, content:" << args_str << std::endl; + return; + } + google::protobuf::Message *response = service->GetResponsePrototype(method).New(); + + // 给下面的method方法的调用,绑定一个Closure的回调函数 + // closure是执行完本地方法之后会发生的回调,因此需要完成序列化和反向发送请求的操作 + google::protobuf::Closure *done = + google::protobuf::NewCallback( + this, &RpcProvider::SendRpcResponse, conn, response); + + // 在框架上根据远端rpc请求,调用当前rpc节点上发布的方法 + // new UserService().Login(controller, request, response, done) + + /* + 为什么下面这个service->CallMethod 要这么写?或者说为什么这么写就可以直接调用远程业务方法了 + 这个service在运行的时候会是注册的service + // 用户注册的service类 继承 .protoc生成的serviceRpc类 继承 google::protobuf::Service + // 用户注册的service类里面没有重写CallMethod方法,是 .protoc生成的serviceRpc类 里面重写了google::protobuf::Service中 + 的纯虚函数CallMethod,而 .protoc生成的serviceRpc类 会根据传入参数自动调取 生成的xx方法(如Login方法), + 由于xx方法被 用户注册的service类 重写了,因此这个方法运行的时候会调用 用户注册的service类 的xx方法 + 真的是妙呀 + */ + //真正调用方法 + service->CallMethod(method, nullptr, request, response, done); } // Closure的回调操作,用于序列化rpc的响应和网络发送,发送响应回去 -void RpcProvider::SendRpcResponse(const muduo::net::TcpConnectionPtr &conn, google::protobuf::Message *response) -{ - std::string response_str; - if (response->SerializeToString(&response_str)) // response进行序列化 - { - // 序列化成功后,通过网络把rpc方法执行的结果发送会rpc的调用方 - conn->send(response_str); - } - else - { - std::cout << "serialize response_str error!" << std::endl; - } -// conn->shutdown(); // 模拟http的短链接服务,由rpcprovider主动断开连接 //改为长连接,不主动断开 +void RpcProvider::SendRpcResponse(const muduo::net::TcpConnectionPtr &conn, google::protobuf::Message *response) { + std::string response_str; + if (response->SerializeToString(&response_str)) // response进行序列化 + { + // 序列化成功后,通过网络把rpc方法执行的结果发送会rpc的调用方 + conn->send(response_str); + } else { + std::cout << "serialize response_str error!" << std::endl; + } + // conn->shutdown(); // 模拟http的短链接服务,由rpcprovider主动断开连接 //改为长连接,不主动断开 } RpcProvider::~RpcProvider() { - std::cout<<"[func - RpcProvider::~RpcProvider()]: ip和port信息:"<< m_muduo_server->ipPort()<ipPort() << std::endl; + m_eventLoop.quit(); + // m_muduo_server. 怎么没有stop函数,奇奇怪怪,看csdn上面的教程也没有要停止,甚至上面那个都没有 } diff --git a/src/skipList/include/skipList.h b/src/skipList/include/skipList.h index e52e4bf..05de0b7 100644 --- a/src/skipList/include/skipList.h +++ b/src/skipList/include/skipList.h @@ -12,142 +12,139 @@ > Description: ************************************************************************/ -#include -#include #include +#include #include -#include #include +#include +#include #define STORE_FILE "store/dumpFile" - static std::string delimiter = ":"; -//Class template to implement node -template +// Class template to implement node +template class Node { + public: + Node() {} -public: - - Node() {} + Node(K k, V v, int); - Node(K k, V v, int); + ~Node(); - ~Node(); + K get_key() const; - K get_key() const; + V get_value() const; - V get_value() const; + void set_value(V); - void set_value(V); + // Linear array to hold pointers to next node of different level + Node **forward; - // Linear array to hold pointers to next node of different level - Node **forward; + int node_level; - int node_level; - -private: - K key; - V value; + private: + K key; + V value; }; -template +template Node::Node(const K k, const V v, int level) { - this->key = k; - this->value = v; - this->node_level = level; + this->key = k; + this->value = v; + this->node_level = level; - // level + 1, because array index is from 0 - level - this->forward = new Node*[level+1]; + // level + 1, because array index is from 0 - level + this->forward = new Node *[level + 1]; - // Fill forward array with 0(NULL) - memset(this->forward, 0, sizeof(Node*)*(level+1)); + // Fill forward array with 0(NULL) + memset(this->forward, 0, sizeof(Node *) * (level + 1)); }; -template +template Node::~Node() { - delete []forward; + delete[] forward; }; -template +template K Node::get_key() const { - return key; + return key; }; -template +template V Node::get_value() const { - return value; + return value; }; -template +template void Node::set_value(V value) { - this->value=value; + this->value = value; }; -//Class template to implement node -template +// Class template to implement node +template class SkipListDump { -public: - friend class boost::serialization::access; - - template - void serialize(Archive &ar, const unsigned int version) { - ar & keyDumpVt_; - ar & valDumpVt_; - } - std::vector keyDumpVt_; - std::vector valDumpVt_; -public: - void insert(const Node &node); + public: + friend class boost::serialization::access; + + template + void serialize(Archive &ar, const unsigned int version) { + ar &keyDumpVt_; + ar &valDumpVt_; + } + std::vector keyDumpVt_; + std::vector valDumpVt_; + + public: + void insert(const Node &node); }; // Class template for Skip list template class SkipList { - -public: - SkipList(int); - ~SkipList(); - int get_random_level(); - Node* create_node(K, V, int); - int insert_element(K, V); - void display_list(); - bool search_element(K, V &value); - void delete_element(K); - void insert_set_element(K&,V&); - std::string dump_file(); - void load_file(const std::string &dumpStr); - //递归删除节点 - void clear(Node*); - int size(); - -private: - void get_key_value_from_string(const std::string& str, std::string* key, std::string* value); - bool is_valid_string(const std::string& str); - -private: - // Maximum level of the skip list - int _max_level; - - // current level of skip list - int _skip_list_level; - - // pointer to header node - Node *_header; - - // file operator - std::ofstream _file_writer; - std::ifstream _file_reader; - - // skiplist current element count - int _element_count; - - std::mutex _mtx; // mutex for critical section + public: + SkipList(int); + ~SkipList(); + int get_random_level(); + Node *create_node(K, V, int); + int insert_element(K, V); + void display_list(); + bool search_element(K, V &value); + void delete_element(K); + void insert_set_element(K &, V &); + std::string dump_file(); + void load_file(const std::string &dumpStr); + //递归删除节点 + void clear(Node *); + int size(); + + private: + void get_key_value_from_string(const std::string &str, std::string *key, std::string *value); + bool is_valid_string(const std::string &str); + + private: + // Maximum level of the skip list + int _max_level; + + // current level of skip list + int _skip_list_level; + + // pointer to header node + Node *_header; + + // file operator + std::ofstream _file_writer; + std::ifstream _file_reader; + + // skiplist current element count + int _element_count; + + std::mutex _mtx; // mutex for critical section }; // create new node -template -Node* SkipList::create_node(const K k, const V v, int level) { - Node *n = new Node(k, v, level); - return n; +template +Node *SkipList::create_node(const K k, const V v, int level) { + Node *n = new Node(k, v, level); + return n; } // Insert given key and value in skip list @@ -173,208 +170,199 @@ level 0 1 4 9 10 30 40 | 50 | 60 70 100 +----+ */ -template +template int SkipList::insert_element(const K key, const V value) { - - _mtx.lock(); - Node *current = this->_header; - - // create update array and initialize it - // update is array which put node that the node->forward[i] should be operated later - Node *update[_max_level+1]; - memset(update, 0, sizeof(Node*)*(_max_level+1)); - - // start form highest level of skip list - for(int i = _skip_list_level; i >= 0; i--) { - while(current->forward[i] != NULL && current->forward[i]->get_key() < key) { - current = current->forward[i]; - } - update[i] = current; + _mtx.lock(); + Node *current = this->_header; + + // create update array and initialize it + // update is array which put node that the node->forward[i] should be operated later + Node *update[_max_level + 1]; + memset(update, 0, sizeof(Node *) * (_max_level + 1)); + + // start form highest level of skip list + for (int i = _skip_list_level; i >= 0; i--) { + while (current->forward[i] != NULL && current->forward[i]->get_key() < key) { + current = current->forward[i]; } + update[i] = current; + } - // reached level 0 and forward pointer to right node, which is desired to insert key. - current = current->forward[0]; + // reached level 0 and forward pointer to right node, which is desired to insert key. + current = current->forward[0]; - // if current node have key equal to searched key, we get it - if (current != NULL && current->get_key() == key) { - std::cout << "key: " << key << ", exists" << std::endl; - _mtx.unlock(); - return 1; + // if current node have key equal to searched key, we get it + if (current != NULL && current->get_key() == key) { + std::cout << "key: " << key << ", exists" << std::endl; + _mtx.unlock(); + return 1; + } + + // if current is NULL that means we have reached to end of the level + // if current's key is not equal to key that means we have to insert node between update[0] and current node + if (current == NULL || current->get_key() != key) { + // Generate a random level for node + int random_level = get_random_level(); + + // If random level is greater thar skip list's current level, initialize update value with pointer to header + if (random_level > _skip_list_level) { + for (int i = _skip_list_level + 1; i < random_level + 1; i++) { + update[i] = _header; + } + _skip_list_level = random_level; } - // if current is NULL that means we have reached to end of the level - // if current's key is not equal to key that means we have to insert node between update[0] and current node - if (current == NULL || current->get_key() != key ) { - - // Generate a random level for node - int random_level = get_random_level(); - - // If random level is greater thar skip list's current level, initialize update value with pointer to header - if (random_level > _skip_list_level) { - for (int i = _skip_list_level+1; i < random_level+1; i++) { - update[i] = _header; - } - _skip_list_level = random_level; - } - - // create new node with random level generated - Node* inserted_node = create_node(key, value, random_level); - - // insert node - for (int i = 0; i <= random_level; i++) { - inserted_node->forward[i] = update[i]->forward[i]; - update[i]->forward[i] = inserted_node; - } - std::cout << "Successfully inserted key:" << key << ", value:" << value << std::endl; - _element_count ++; + // create new node with random level generated + Node *inserted_node = create_node(key, value, random_level); + + // insert node + for (int i = 0; i <= random_level; i++) { + inserted_node->forward[i] = update[i]->forward[i]; + update[i]->forward[i] = inserted_node; } - _mtx.unlock(); - return 0; + std::cout << "Successfully inserted key:" << key << ", value:" << value << std::endl; + _element_count++; + } + _mtx.unlock(); + return 0; } // Display skip list -template +template void SkipList::display_list() { - - std::cout << "\n*****Skip List*****"<<"\n"; - for (int i = 0; i <= _skip_list_level; i++) { - Node *node = this->_header->forward[i]; - std::cout << "Level " << i << ": "; - while (node != NULL) { - std::cout << node->get_key() << ":" << node->get_value() << ";"; - node = node->forward[i]; - } - std::cout << std::endl; + std::cout << "\n*****Skip List*****" + << "\n"; + for (int i = 0; i <= _skip_list_level; i++) { + Node *node = this->_header->forward[i]; + std::cout << "Level " << i << ": "; + while (node != NULL) { + std::cout << node->get_key() << ":" << node->get_value() << ";"; + node = node->forward[i]; } + std::cout << std::endl; + } } // todo 对dump 和 load 后面可能要考虑加锁的问题 // Dump data in memory to file -template +template std::string SkipList::dump_file() { - - // std::cout << "dump_file-----------------" << std::endl; - // - // - // _file_writer.open(STORE_FILE); - Node *node = this->_header->forward[0]; - SkipListDump dumper; - while (node != nullptr) { - dumper.insert(*node); - // _file_writer << node->get_key() << ":" << node->get_value() << "\n"; - // std::cout << node->get_key() << ":" << node->get_value() << ";\n"; - node = node->forward[0]; - } - std::stringstream ss; - boost::archive::text_oarchive oa(ss); - oa<< dumper; - return ss.str(); - // _file_writer.flush(); - // _file_writer.close(); + // std::cout << "dump_file-----------------" << std::endl; + // + // + // _file_writer.open(STORE_FILE); + Node *node = this->_header->forward[0]; + SkipListDump dumper; + while (node != nullptr) { + dumper.insert(*node); + // _file_writer << node->get_key() << ":" << node->get_value() << "\n"; + // std::cout << node->get_key() << ":" << node->get_value() << ";\n"; + node = node->forward[0]; + } + std::stringstream ss; + boost::archive::text_oarchive oa(ss); + oa << dumper; + return ss.str(); + // _file_writer.flush(); + // _file_writer.close(); } // Load data from disk -template +template void SkipList::load_file(const std::string &dumpStr) { - // _file_reader.open(STORE_FILE); - // std::cout << "load_file-----------------" << std::endl; - // std::string line; - // std::string* key = new std::string(); - // std::string* value = new std::string(); - // while (getline(_file_reader, line)) { - // get_key_value_from_string(line, key, value); - // if (key->empty() || value->empty()) { - // continue; - // } - // // Define key as int type - // insert_element(stoi(*key), *value); - // std::cout << "key:" << *key << "value:" << *value << std::endl; - // } - // delete key; - // delete value; - // _file_reader.close(); - - if(dumpStr.empty()) { - return ; - } - SkipListDump dumper; - std::stringstream iss(dumpStr); - boost::archive::text_iarchive ia(iss); - ia >> dumper; - for(int i = 0;iempty() || value->empty()) { + // continue; + // } + // // Define key as int type + // insert_element(stoi(*key), *value); + // std::cout << "key:" << *key << "value:" << *value << std::endl; + // } + // delete key; + // delete value; + // _file_reader.close(); + + if (dumpStr.empty()) { + return; + } + SkipListDump dumper; + std::stringstream iss(dumpStr); + boost::archive::text_iarchive ia(iss); + ia >> dumper; + for (int i = 0; i < dumper.keyDumpVt_.size(); ++i) { + insert_element(dumper.keyDumpVt_[i], dumper.keyDumpVt_[i]); + } } // Get current SkipList size -template +template int SkipList::size() { - return _element_count; + return _element_count; } -template -void SkipList::get_key_value_from_string(const std::string& str, std::string* key, std::string* value) { - - if(!is_valid_string(str)) { - return; - } - *key = str.substr(0, str.find(delimiter)); - *value = str.substr(str.find(delimiter)+1, str.length()); +template +void SkipList::get_key_value_from_string(const std::string &str, std::string *key, std::string *value) { + if (!is_valid_string(str)) { + return; + } + *key = str.substr(0, str.find(delimiter)); + *value = str.substr(str.find(delimiter) + 1, str.length()); } -template -bool SkipList::is_valid_string(const std::string& str) { - - if (str.empty()) { - return false; - } - if (str.find(delimiter) == std::string::npos) { - return false; - } - return true; +template +bool SkipList::is_valid_string(const std::string &str) { + if (str.empty()) { + return false; + } + if (str.find(delimiter) == std::string::npos) { + return false; + } + return true; } // Delete element from skip list -template +template void SkipList::delete_element(K key) { - - _mtx.lock(); - Node *current = this->_header; - Node *update[_max_level+1]; - memset(update, 0, sizeof(Node*)*(_max_level+1)); - - // start from highest level of skip list - for (int i = _skip_list_level; i >= 0; i--) { - while (current->forward[i] !=NULL && current->forward[i]->get_key() < key) { - current = current->forward[i]; - } - update[i] = current; + _mtx.lock(); + Node *current = this->_header; + Node *update[_max_level + 1]; + memset(update, 0, sizeof(Node *) * (_max_level + 1)); + + // start from highest level of skip list + for (int i = _skip_list_level; i >= 0; i--) { + while (current->forward[i] != NULL && current->forward[i]->get_key() < key) { + current = current->forward[i]; } + update[i] = current; + } - current = current->forward[0]; - if (current != NULL && current->get_key() == key) { - - // start for lowest level and delete the current node of each level - for (int i = 0; i <= _skip_list_level; i++) { - - // if at level i, next node is not target node, break the loop. - if (update[i]->forward[i] != current) - break; - - update[i]->forward[i] = current->forward[i]; - } + current = current->forward[0]; + if (current != NULL && current->get_key() == key) { + // start for lowest level and delete the current node of each level + for (int i = 0; i <= _skip_list_level; i++) { + // if at level i, next node is not target node, break the loop. + if (update[i]->forward[i] != current) break; - // Remove levels which have no elements - while (_skip_list_level > 0 && _header->forward[_skip_list_level] == 0) { - _skip_list_level --; - } + update[i]->forward[i] = current->forward[i]; + } - std::cout << "Successfully deleted key "<< key << std::endl; - delete current; - _element_count --; + // Remove levels which have no elements + while (_skip_list_level > 0 && _header->forward[_skip_list_level] == 0) { + _skip_list_level--; } - _mtx.unlock(); - return; + + std::cout << "Successfully deleted key " << key << std::endl; + delete current; + _element_count--; + } + _mtx.unlock(); + return; } /** @@ -382,13 +370,13 @@ void SkipList::delete_element(K key) { * insert_element是插入新元素, * insert_set_element是插入元素,如果元素存在则改变其值 */ -template +template void SkipList::insert_set_element(K &key, V &value) { - V oldValue; - if(search_element(key,oldValue)) { - delete_element(key); - } - insert_element(key,value); + V oldValue; + if (search_element(key, oldValue)) { + delete_element(key); + } + insert_element(key, value); } // Search for element in skip list @@ -410,88 +398,82 @@ level 1 1 4 10 30 50| 70 100 | level 0 1 4 9 10 30 40 50+-->60 70 100 */ -template -bool SkipList::search_element(K key,V &value) { - - std::cout << "search_element-----------------" << std::endl; - Node *current = _header; - - // start from highest level of skip list - for (int i = _skip_list_level; i >= 0; i--) { - while (current->forward[i] && current->forward[i]->get_key() < key) { - current = current->forward[i]; - } +template +bool SkipList::search_element(K key, V &value) { + std::cout << "search_element-----------------" << std::endl; + Node *current = _header; + + // start from highest level of skip list + for (int i = _skip_list_level; i >= 0; i--) { + while (current->forward[i] && current->forward[i]->get_key() < key) { + current = current->forward[i]; } + } - //reached level 0 and advance pointer to right node, which we search - current = current->forward[0]; + // reached level 0 and advance pointer to right node, which we search + current = current->forward[0]; - // if current node have key equal to searched key, we get it - if (current and current->get_key() == key) { - value = current->get_value(); - std::cout << "Found key: " << key << ", value: " << current->get_value() << std::endl; - return true; - } + // if current node have key equal to searched key, we get it + if (current and current->get_key() == key) { + value = current->get_value(); + std::cout << "Found key: " << key << ", value: " << current->get_value() << std::endl; + return true; + } - std::cout << "Not Found Key:" << key << std::endl; - return false; + std::cout << "Not Found Key:" << key << std::endl; + return false; } -template +template void SkipListDump::insert(const Node &node) { - keyDumpVt_.emplace_back(node.get_key()); - valDumpVt_.emplace_back(node.get_value()); + keyDumpVt_.emplace_back(node.get_key()); + valDumpVt_.emplace_back(node.get_value()); } // construct skip list -template +template SkipList::SkipList(int max_level) { - - this->_max_level = max_level; - this->_skip_list_level = 0; - this->_element_count = 0; - - // create header node and initialize key and value to null - K k; - V v; - this->_header = new Node(k, v, _max_level); + this->_max_level = max_level; + this->_skip_list_level = 0; + this->_element_count = 0; + + // create header node and initialize key and value to null + K k; + V v; + this->_header = new Node(k, v, _max_level); }; -template +template SkipList::~SkipList() { - - if (_file_writer.is_open()) { - _file_writer.close(); - } - if (_file_reader.is_open()) { - _file_reader.close(); - } - - //递归删除跳表链条 - if(_header->forward[0]!=nullptr){ - clear(_header->forward[0]); - } - delete(_header); - + if (_file_writer.is_open()) { + _file_writer.close(); + } + if (_file_reader.is_open()) { + _file_reader.close(); + } + + //递归删除跳表链条 + if (_header->forward[0] != nullptr) { + clear(_header->forward[0]); + } + delete (_header); } template -void SkipList::clear(Node * cur) -{ - if(cur->forward[0]!=nullptr){ - clear(cur->forward[0]); - } - delete(cur); +void SkipList::clear(Node *cur) { + if (cur->forward[0] != nullptr) { + clear(cur->forward[0]); + } + delete (cur); } -template -int SkipList::get_random_level(){ - - int k = 1; - while (rand() % 2) { - k++; - } - k = (k < _max_level) ? k : _max_level; - return k; +template +int SkipList::get_random_level() { + int k = 1; + while (rand() % 2) { + k++; + } + k = (k < _max_level) ? k : _max_level; + return k; }; // vim: et tw=100 ts=4 sw=4 cc=120 -#endif //SKIPLIST_H +#endif // SKIPLIST_H diff --git a/test/defer_run.cpp b/test/defer_run.cpp index b317f82..24658fa 100644 --- a/test/defer_run.cpp +++ b/test/defer_run.cpp @@ -7,22 +7,18 @@ using namespace std; -void testFun1(const string& name) { - cout< class Defer { -public: - Defer(F&& f) : m_func(std::forward(f)) {} - Defer(const F& f) : m_func(f) {} - ~Defer() { - m_func(); - } + public: + Defer(F&& f) : m_func(std::forward(f)) {} + Defer(const F& f) : m_func(f) {} + ~Defer() { m_func(); } - Defer(const Defer& e) = delete; - Defer& operator=(const Defer& e) = delete; + Defer(const Defer& e) = delete; + Defer& operator=(const Defer& e) = delete; -private: - F m_func; + private: + F m_func; }; #define _CONCAT(a, b) a##b @@ -29,4 +27,4 @@ class Defer { #undef DEFER #define DEFER _MAKE_DEFER_(__LINE__) -#endif //KVRAFTCPP_DEFER_H +#endif // KVRAFTCPP_DEFER_H diff --git a/test/run.cpp b/test/run.cpp index ec62299..b31abdf 100644 --- a/test/run.cpp +++ b/test/run.cpp @@ -1,65 +1,64 @@ // // Created by swx on 24-1-5. // -#include #include -#include +#include #include -#include +#include #include +#include class SerializablePair { -public: - SerializablePair() = default; - SerializablePair(const std::string& first, const std::string& second) - : first(first), second(second) {} + public: + SerializablePair() = default; + SerializablePair(const std::string& first, const std::string& second) : first(first), second(second) {} - template - void serialize(Archive & ar, const unsigned int version) { - ar & first; - ar & second; - } + template + void serialize(Archive& ar, const unsigned int version) { + ar& first; + ar& second; + } -private: - std::string first; - std::string second; + private: + std::string first; + std::string second; }; int main() { - // 创建 vector> 对象 - std::vector> data; - data.emplace_back("key1", "value1"); - data.emplace_back("key2", "value2"); - data.emplace_back("key3", "value3"); + // 创建 vector> 对象 + std::vector> data; + data.emplace_back("key1", "value1"); + data.emplace_back("key2", "value2"); + data.emplace_back("key3", "value3"); - // 打开一个输出文件流 - std::ofstream ofs("data_vector.txt"); + // 打开一个输出文件流 + std::ofstream ofs("data_vector.txt"); - // 创建文本输出存档对象 - boost::archive::text_oarchive oa(ofs); + // 创建文本输出存档对象 + boost::archive::text_oarchive oa(ofs); - // 序列化 vector> 到文本输出存档 - oa << data; + // 序列化 vector> 到文本输出存档 + oa << data; - // 关闭输出文件流 - ofs.close(); + // 关闭输出文件流 + ofs.close(); - // 打开一个输入文件流 - std::ifstream ifs("data_vector.txt"); + // 打开一个输入文件流 + std::ifstream ifs("data_vector.txt"); - // 创建文本输入存档对象 - boost::archive::text_iarchive ia(ifs); + // 创建文本输入存档对象 + boost::archive::text_iarchive ia(ifs); - // 创建空的 vector> 对象 - std::vector> loadedData; + // 创建空的 vector> 对象 + std::vector> loadedData; - // 反序列化数据到 vector> 对象 - ia >> loadedData; + // 反序列化数据到 vector> 对象 + ia >> loadedData; - // 输出反序列化后的数据 - for (const auto& pair : loadedData) { - std::cout << "Key: " << pair.first << ", Value: " << pair.second << std::endl; - } + // 输出反序列化后的数据 + for (const auto& pair : loadedData) { + std::cout << "Key: " << pair.first << ", Value: " << pair.second << std::endl; + } - return 0; + return 0; }