From 5dfb1725c241349587e15160624db23b74fb9d79 Mon Sep 17 00:00:00 2001 From: Saurabh Kumar Ojha <30659684+saurabhojha@users.noreply.github.com> Date: Wed, 11 Sep 2024 19:26:11 +0530 Subject: [PATCH 01/16] Add filtering to KV method returning all keys (#797) * Add implementation for filtering keys from kv store * fix function name * Add tests for KeyValueKeysWithFilters * Add documentation * Refactor and remove code duplication * fix spacing in tests * fix function signature * fix bucket name * fix comment * fix spacing * fix spacing * fix count * Resolve comments and refactor * Change method signature and fix spacing --- src/kv.c | 27 ++++++++- src/nats.h | 26 +++++++++ test/list_test.txt | 1 + test/test.c | 138 +++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 189 insertions(+), 3 deletions(-) diff --git a/src/kv.c b/src/kv.c index 2b079680c..f13963620 100644 --- a/src/kv.c +++ b/src/kv.c @@ -1176,8 +1176,8 @@ kvStore_WatchAll(kvWatcher **new_watcher, kvStore *kv, kvWatchOptions *opts) return NATS_UPDATE_ERR_STACK(s); } -natsStatus -kvStore_Keys(kvKeysList *list, kvStore *kv, kvWatchOptions *opts) +static natsStatus +_kvStore_Keys(kvKeysList *list, kvStore *kv, const char **filters, int numFilters, kvWatchOptions *opts) { natsStatus s; kvWatchOptions o; @@ -1200,7 +1200,11 @@ kvStore_Keys(kvKeysList *list, kvStore *kv, kvWatchOptions *opts) if (o.Timeout > 0) timeout = o.Timeout; - s = kvStore_WatchAll(&w, kv, &o); + if (numFilters > 0) + s = kvStore_WatchMulti(&w, kv, filters, numFilters, &o); + else + s = kvStore_WatchAll(&w, kv, &o); + if (s != NATS_OK) return NATS_UPDATE_ERR_STACK(s); @@ -1242,6 +1246,23 @@ kvStore_Keys(kvKeysList *list, kvStore *kv, kvWatchOptions *opts) return NATS_UPDATE_ERR_STACK(s); } +natsStatus +kvStore_Keys(kvKeysList *list, kvStore *kv, kvWatchOptions *opts) +{ + natsStatus s = _kvStore_Keys(list, kv, NULL, 0, opts); + return NATS_UPDATE_ERR_STACK(s); +} + +natsStatus +kvStore_KeysWithFilters(kvKeysList *list, kvStore *kv, const char **filters, int numFilters, kvWatchOptions *opts) +{ + if ((filters == NULL) || (numFilters <= 0)) + return nats_setDefaultError(NATS_INVALID_ARG); + + natsStatus s = _kvStore_Keys(list, kv, filters, numFilters, opts); + return NATS_UPDATE_ERR_STACK(s); +} + void kvKeysList_Destroy(kvKeysList *list) { diff --git a/src/nats.h b/src/nats.h index 24e88c190..9436412ed 100644 --- a/src/nats.h +++ b/src/nats.h @@ -7172,6 +7172,32 @@ kvStore_WatchAll(kvWatcher **new_watcher, kvStore *kv, kvWatchOptions *opts); NATS_EXTERN natsStatus kvStore_Keys(kvKeysList *list, kvStore *kv, kvWatchOptions *opts); +/** \brief Returns all keys in the bucket which matches the list of subject like filters. + * + * Get a list of the keys in a bucket filtered by a + * subject-like string, for instance "key" or "key.foo.*" or "key.>" + * Any deleted or purged keys will not be returned. + * + * \note Use #kvWatchOptions.Timeout to specify how long to wait (in milliseconds) + * to gather all keys for this bucket. If the deadline is reached, this function + * will return #NATS_TIMEOUT and no keys. + * + * \warning The user should call #kvKeysList_Destroy to release memory allocated + * for the entries list. + * + * @see kvWatchOptions_Init + * @see kvKeysList_Destroy + * @see kvStore_WatchMulti + * + * @param list the pointer to a #kvKeysList that will be initialized and filled with resulting key strings. + * @param kv the pointer to the #kvStore object. + * @param filters the list of subject filters. Cannot be `NULL`. + * @param numFilters number of filters. Cannot be 0. + * @param opts the history options, possibly `NULL`. + */ +NATS_EXTERN natsStatus +kvStore_KeysWithFilters(kvKeysList *list, kvStore *kv, const char **filters, int numFilters, kvWatchOptions *opts); + /** \brief Destroys this list of KeyValue store key strings. * * This function iterates through the list of all key strings and free them. diff --git a/test/list_test.txt b/test/list_test.txt index 496d5b3f8..8a8e82926 100644 --- a/test/list_test.txt +++ b/test/list_test.txt @@ -117,6 +117,7 @@ _test(KeyValueDeleteVsPurge) _test(KeyValueDiscardOldToNew) _test(KeyValueHistory) _test(KeyValueKeys) +_test(KeyValueKeysWithFilters) _test(KeyValueManager) _test(KeyValueMirrorCrossDomains) _test(KeyValueMirrorDirectGet) diff --git a/test/test.c b/test/test.c index 5c46669a4..7cdb24116 100644 --- a/test/test.c +++ b/test/test.c @@ -31470,6 +31470,144 @@ void test_KeyValueKeys(void) JS_TEARDOWN; } +void test_KeyValueKeysWithFilters(void) +{ + natsStatus s; + kvStore *kv = NULL; + kvKeysList l; + kvConfig kvc; + const char **defaultSubject = (const char *[]){">"}; + l.Count = 0; + l.Keys = NULL; + + JS_SETUP(2, 10, 14); + + test("Create KV: "); + kvConfig_Init(&kvc); + kvc.Bucket = "KVSF"; + kvc.History = 2; + s = js_CreateKeyValue(&kv, js, &kvc); + testCond(s == NATS_OK); + + test("Populate: "); + s = kvStore_PutString(NULL, kv, "a.b", "a.b"); + IFOK(s, kvStore_PutString(NULL, kv, "a.d", "a.d")); + IFOK(s, kvStore_PutString(NULL, kv, "c.d", "c.d")); + IFOK(s, kvStore_PutString(NULL, kv, "e.f", "e.f")); + IFOK(s, kvStore_PutString(NULL, kv, "e.a.f", "e.a.f")); + testCond(s == NATS_OK); + + test("Get keys with filters (bad args): List is NULL"); + s = kvStore_KeysWithFilters(NULL, kv, defaultSubject, 1, NULL); + testCond(s == NATS_INVALID_ARG); + nats_clearLastError(); + + test("Get keys with filters (bad args): filters is NULL"); + s = kvStore_KeysWithFilters(&l, kv, NULL, 0, NULL); + testCond((s == NATS_INVALID_ARG) && (l.Keys == NULL) && (l.Count == 0)); + nats_clearLastError(); + + test("Get keys with filters (bad args): numFilters is 0"); + s = kvStore_KeysWithFilters(&l, kv, defaultSubject, 0, NULL); + testCond((s == NATS_INVALID_ARG) && (l.Keys == NULL) && (l.Count == 0)); + nats_clearLastError(); + + test("Get keys with filters (bad args): numFilters is <0"); + s = kvStore_KeysWithFilters(&l, kv, defaultSubject, -10, NULL); + testCond((s == NATS_INVALID_ARG) && (l.Keys == NULL) && (l.Count == 0)); + nats_clearLastError(); + + test("Get keys with filters (bad args): empty string"); + const char **filter0 = (const char *[]){"a.*", "", "b.*"}; + s = kvStore_KeysWithFilters(&l, kv, filter0, 3, NULL); + testCond((s == NATS_INVALID_ARG) && (l.Keys == NULL) && (l.Count == 0)); + nats_clearLastError(); + + test("Get keys with filters (bad args): kv is NULL"); + s = kvStore_KeysWithFilters(&l, NULL, defaultSubject, 1, NULL); + testCond((s == NATS_INVALID_ARG) && (l.Keys == NULL) && (l.Count == 0)); + nats_clearLastError(); + kvKeysList_Destroy(&l); + + test("filter: a.*"); + const char **filter1 = (const char *[]){"a.*"}; + s = kvStore_KeysWithFilters(&l, kv, filter1, 1, NULL); + testCond((s == NATS_OK) && (l.Keys != NULL) && (l.Count == 2)); + nats_clearLastError(); + kvKeysList_Destroy(&l); + + test("filter: *.a.*"); + const char **filter2 = (const char *[]){"*.a.*"}; + s = kvStore_KeysWithFilters(&l, kv, filter2, 1, NULL); + testCond((s == NATS_OK) && (l.Keys != NULL) && (l.Count == 1)); + nats_clearLastError(); + kvKeysList_Destroy(&l); + + test("filter: *.a"); + const char **filter3 = (const char *[]){"*.a"}; + s = kvStore_KeysWithFilters(&l, kv, filter3, 1, NULL); + testCond((s == NATS_OK) && (l.Keys == NULL) && (l.Count == 0)); + nats_clearLastError(); + kvKeysList_Destroy(&l); + + test("filter: e.a.f"); + const char **filter4 = (const char *[]){"e.a.f"}; + s = kvStore_KeysWithFilters(&l, kv, filter4, 1, NULL); + testCond((s == NATS_OK) && (l.Keys != NULL) && (l.Count == 1)); + nats_clearLastError(); + kvKeysList_Destroy(&l); + + test("filter: >"); + const char **filter5 = (const char *[]){">"}; + s = kvStore_KeysWithFilters(&l, kv, filter5, 1, NULL); + testCond((s == NATS_OK) && (l.Keys != NULL) && (l.Count == 5)); + nats_clearLastError(); + kvKeysList_Destroy(&l); + + test("filter: multiple overlapping filters"); + const char **filter6 = (const char *[]){"*.a","a.*","*.a.*"}; + s = kvStore_KeysWithFilters(&l, kv, filter6, 3, NULL); + // consumer subject filters cannot overlap + testCond((s == NATS_ERR) && (l.Keys == NULL) && (l.Count == 0)); + nats_clearLastError(); + kvKeysList_Destroy(&l); + + test("filter: multiple non overlapping filters"); + const char **filter7 = (const char *[]){"a.*","e.*"}; + s = kvStore_KeysWithFilters(&l, kv, filter7, 2, NULL); + testCond((s == NATS_OK) && (l.Keys != NULL) && (l.Count == 3)); + nats_clearLastError(); + kvKeysList_Destroy(&l); + + // Delete the key and check if returned after filtering + test("Delete a.b:"); + s = kvStore_Delete(kv, "a.b"); + testCond(s == NATS_OK); + + test("a.b should not be returned post deletion") + const char **filter8 = (const char *[]){"a.b"}; + s = kvStore_KeysWithFilters(&l, kv, filter8, 1, NULL); + testCond((s == NATS_OK) && (l.Keys == NULL) && (l.Count == 0)); + nats_clearLastError(); + kvKeysList_Destroy(&l); + + // Purge the key and check if returned after filtering + test("Purge a.b:"); + s = kvStore_Purge(kv, "a.d", NULL); + testCond(s == NATS_OK); + + test("a.d should not be returned post purge") + const char **filter9 = (const char *[]){"a.d"}; + s = kvStore_KeysWithFilters(&l, kv, filter9, 1, NULL); + testCond((s == NATS_OK) && (l.Keys == NULL) && (l.Count == 0)); + nats_clearLastError(); + kvKeysList_Destroy(&l); + + kvStore_Destroy(kv); + + JS_TEARDOWN; +} + void test_KeyValueDeleteVsPurge(void) { natsStatus s; From 2d74e0288665b722eba0ff164e9febf6d331ab79 Mon Sep 17 00:00:00 2001 From: Lev <1187448+levb@users.noreply.github.com> Date: Wed, 11 Sep 2024 08:56:31 -0700 Subject: [PATCH 02/16] [TEST ONLY] test fixes for 2.11 server changes (#798) * [TEST ONLY] server adds stream metadata now * [TEST ONLY] protect JetStreamSubscribeFlowControl against rate limiting --- test/test.c | 43 ++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 40 insertions(+), 3 deletions(-) diff --git a/test/test.c b/test/test.c index 7cdb24116..11c850171 100644 --- a/test/test.c +++ b/test/test.c @@ -23249,7 +23249,7 @@ void test_JetStreamMgtStreams(void) && (si != NULL) && (si->Config != NULL) && (strcmp(si->Config->Name, "TEST210") == 0) - && (si->Config->Metadata.Count == 2) + && (si->Config->Metadata.Count >= 2) && (si->Config->Compression == js_StorageCompressionS2) && (si->Config->FirstSeq == 9999) && (strcmp(si->Config->SubjectTransform.Source, "foo210") == 0) @@ -27938,9 +27938,45 @@ void test_JetStreamSubscribeFlowControl(void) char *subj = NULL; natsBuffer *buf = NULL; - JS_SETUP(2, 3, 3); + natsConnection *nc = NULL; + jsCtx *js = NULL; + natsPid pid = NATS_INVALID_PID; + char confFile[256] = {'\0'}; + char datastore[256] = {'\0'}; + char cmdLine[1024] = {'\0'}; + + ENSURE_JS_VERSION(2, 3, 3); + + test("Start server: "); + _makeUniqueDir(datastore, sizeof(datastore), "datastore_"); - data = malloc(100*1024); + if (serverVersionAtLeast(2, 11, 0)) + { + _createConfFile(confFile, sizeof(confFile), + "jetstream: {\n" + " enabled: true\n" + " max_buffered_size: 1Gb\n" + " max_buffered_msgs: 20000\n" + "}\n"); + snprintf(cmdLine, sizeof(cmdLine), "-js -sd %s -c %s", datastore, confFile); + } + else + { + snprintf(cmdLine, sizeof(cmdLine), "-js -sd %s", datastore); + } + pid = _startServer("nats://127.0.0.1:4222", cmdLine, true); + CHECK_SERVER_STARTED(pid); + testCond(true); + + test("Connect: "); + s = natsConnection_Connect(&nc, NULL); + testCond(s == NATS_OK); + + test("Get context: "); + s = natsConnection_JetStream(&js, nc, NULL); + testCond(s == NATS_OK); + + data = malloc(100 * 1024); if (data == NULL) FAIL("Unable to allocate data"); @@ -28080,6 +28116,7 @@ void test_JetStreamSubscribeFlowControl(void) natsSubscription_Destroy(nsub); _destroyDefaultThreadArgs(&args); JS_TEARDOWN; + remove(confFile); } static void From 3de6a677b07df79b3c57e488047817c19062435a Mon Sep 17 00:00:00 2001 From: Lev <1187448+levb@users.noreply.github.com> Date: Wed, 18 Sep 2024 05:16:32 -0700 Subject: [PATCH 03/16] [ADDED] js_PullSubscribeAsync (#785) * Squashed and merged * Added limits.h * RelWithDebInfo on Travis * Reduced numMsgs in test_AssignSubToDispatch to make Travis happier * fixed a Travis NOTLS gcc warning * PR feedback: mostly formatting/whitespace * PR feedback: NATS_CALLOC error handling * PR feedback: do not cache sub->closed, draining in nats_dispatchThreadDedicated * nit: renamed dedicated->own * Make sure fetch onComplete is always invoked when sub closes * Reconnect tests * Fixed dispatch pool reallocation * (?) fixed flapping PullSubscribeAsync 'Fetch with a short expiration' test * Fixed flappers, refactored 'lifetime' * Fixes: - Properly account for *received* fetch messages/bytes - Fetch status set to NATS_CONNECTION_CLOSED if fetch is terminated by a disconnect - Auto-Unsubscribe when the (pull async) subscription reaches the end of life(*) - Handle errors from sending fetch requests(*) - Disallow MaxBytes and KeepAhead simultaneously so we can set MaxBytes on subsequent requests accurately - Unrelated: _unsubscribe should not stop timers for max>0 * Fix _testBatchCompleted timeout at 1s * PR feedback: naming/nits * Increased 2 more (test wait) timeouts * Increased sleep in JetStreamSubscribePull_Reconnect, flapping on Travis * PR feedback: no typedef for jsOptionsPullSubscribeAsync * PR feedback: removed jsOpts defaulting trickery * PR feedback: use actual defaults in the js-sub example * PR feedback: disallow use of KeepAhead with NoWait * PR feedback: extra comments for NoWait --- .travis.yml | 2 +- examples/examples.h | 5 + examples/js-sub.c | 96 +++- examples/micro-stats.c | 6 +- src/dispatch.c | 528 +++++++++++++++++++++- src/dispatch.h | 5 +- src/glib/glib_dispatch_pool.c | 62 ++- src/glib/glibp.h | 2 +- src/js.c | 354 +++++++++++++-- src/js.h | 9 +- src/jsm.c | 2 +- src/msg.c | 2 +- src/msg.h | 23 +- src/nats.c | 192 -------- src/nats.h | 108 +++++ src/natsp.h | 34 +- src/status.c | 1 + src/status.h | 3 + src/sub.c | 159 +------ test/list_test.txt | 6 + test/test.c | 815 +++++++++++++++++++++++++++++++++- 21 files changed, 1955 insertions(+), 459 deletions(-) diff --git a/.travis.yml b/.travis.yml index 6ab9ae1ae..1fb660f2e 100644 --- a/.travis.yml +++ b/.travis.yml @@ -172,7 +172,7 @@ jobs: env: - NATS_TEST_SERVER_VERSION=main - MATRIX_EVAL="CC=clang-8" - - BUILD_OPT="-DNATS_BUILD_ARCH=64 -DCMAKE_BUILD_TYPE=Release -DCMAKE_C_FLAGS=-fsanitize=thread" NATS_TEST_VALGRIND=yes DO_COVERAGE="no" + - BUILD_OPT="-DNATS_BUILD_ARCH=64 -DCMAKE_BUILD_TYPE=RelWithDebInfo -DCMAKE_C_FLAGS=-fsanitize=thread" NATS_TEST_VALGRIND=yes DO_COVERAGE="no" script: - $TRAVIS_BUILD_DIR/buildOnTravis.sh "$CC" "$DO_COVERAGE" "$BUILD_OPT -DNATS_SANITIZE=ON -DNATS_BUILD_TLS_USE_OPENSSL_1_1_API=ON -DNATS_PROTOBUF_DIR=$HOME/deps/pbuf -DNATS_BUILD_USE_SODIUM=ON -DNATS_SODIUM_DIR=$HOME/deps/sodium" "$CTEST_OPT" diff --git a/examples/examples.h b/examples/examples.h index ba30d38ff..5c50f5127 100644 --- a/examples/examples.h +++ b/examples/examples.h @@ -398,6 +398,11 @@ parseArgs(int argc, char **argv, const char *usage) async = false; pull = true; } + else if (strcasecmp(argv[i], "-pull-async") == 0) + { + async = true; + pull = true; + } else if (strcasecmp(argv[i], "-fc") == 0) { flowctrl = true; diff --git a/examples/js-sub.c b/examples/js-sub.c index 30fe1dbe4..6ac3e7ff2 100644 --- a/examples/js-sub.c +++ b/examples/js-sub.c @@ -17,18 +17,23 @@ static const char *usage = ""\ "-gd use global message delivery thread pool\n" \ "-sync receive synchronously (default is asynchronous)\n" \ "-pull use pull subscription\n" \ +"-pull-async use an async pull subscription\n" \ "-fc enable flow control\n" \ "-count number of expected messages\n"; +static bool fetchCompleteCalled = false; +static bool subCompleteCalled = false; + static void onMsg(natsConnection *nc, natsSubscription *sub, natsMsg *msg, void *closure) { if (print) - printf("Received msg: %s - %.*s\n", + { + printf("Received msg: %s - '%.*s'\n", natsMsg_GetSubject(msg), natsMsg_GetDataLength(msg), natsMsg_GetData(msg)); - + } if (start == 0) start = nats_Now(); @@ -50,6 +55,34 @@ asyncCb(natsConnection *nc, natsSubscription *sub, natsStatus err, void *closure natsSubscription_GetDropped(sub, (int64_t*) &dropped); } +static void +_completeFetchCb(natsConnection *nc, natsSubscription *sub, natsStatus s, void *closure) +{ + fetchCompleteCalled = true; + + if (print) + printf("Fetch completed with status: %u - %s\n", s, natsStatus_GetText(s)); +} + +static void +_completeSubCb(void *closure) +{ + subCompleteCalled = true; + if (print) + printf("Subscription completed\n"); +} + +static bool +nextFetchCb(jsFetchRequest *req, natsSubscription *sub, void *closure) +{ + if (print) + printf("NextFetch: always ask for 1 message, 0 MaxBytes\n"); + + req->Batch = 1; + req->MaxBytes = 0; + return true; +} + int main(int argc, char **argv) { natsConnection *conn = NULL; @@ -66,11 +99,19 @@ int main(int argc, char **argv) opts = parseArgs(argc, argv, usage); - printf("Created %s subscription on '%s'.\n", - (pull ? "pull" : (async ? "asynchronous" : "synchronous")), subj); + printf("Creating %s%s subscription on '%s'\n", + async ? "an asynchronous" : "a synchronous", + pull ? " pull" : "", + subj); s = natsOptions_SetErrorHandler(opts, asyncCb, NULL); + // Uncomment to use the global thread pool for message delivery. + // if (s == NATS_OK) + // s = natsOptions_UseGlobalMessageDelivery(opts, true); + // if (s == NATS_OK) + // s = nats_SetMessageDeliveryPoolSize(1); // 1 thread for all subscriptions. + if (s == NATS_OK) s = natsConnection_Connect(&conn, opts); @@ -130,21 +171,49 @@ int main(int argc, char **argv) if (s == NATS_OK) { - if (pull) + if (pull && async) + { + jsOpts.PullSubscribeAsync.MaxMessages = (int) total; + + // Defalut values, change as needed. + jsOpts.PullSubscribeAsync.FetchSize = 128; // ask for 128 messages at a time + jsOpts.PullSubscribeAsync.NoWait = false; + jsOpts.PullSubscribeAsync.Timeout = 0; // for the entire subscription, in milliseconds + jsOpts.PullSubscribeAsync.KeepAhead = 0; + jsOpts.PullSubscribeAsync.Heartbeat = 0; // in milliseconds + + jsOpts.PullSubscribeAsync.CompleteHandler = _completeFetchCb; + jsOpts.PullSubscribeAsync.CompleteHandlerClosure = NULL; + + // Uncomment to provide custom control over next fetch size. + // jsOpts.PullSubscribeAsync.NextHandler = nextFetchCb; + + // Uncomment to turn off AutoACK on delivered messages. + // so.ManualAck = true; + + s = js_PullSubscribeAsync(&sub, js, subj, durable, onMsg, NULL, &jsOpts, &so, &jerr); + } + else if (pull) s = js_PullSubscribe(&sub, js, subj, durable, &jsOpts, &so, &jerr); else if (async) s = js_Subscribe(&sub, js, subj, onMsg, NULL, &jsOpts, &so, &jerr); else s = js_SubscribeSync(&sub, js, subj, &jsOpts, &so, &jerr); } + + if ((s == NATS_OK) && async) + s = natsSubscription_SetOnCompleteCB(sub, _completeSubCb, NULL); + if ((s == NATS_OK) && async) + s = natsSubscription_AutoUnsubscribe(sub, (int) total); // to get the sub closed callback if (s == NATS_OK) s = natsSubscription_SetPendingLimits(sub, -1, -1); if (s == NATS_OK) s = natsStatistics_Create(&stats); - if ((s == NATS_OK) && pull) + if ((s == NATS_OK) && pull && !async) { + // Pull mode, simple "Fetch" loop natsMsgList list; int i; @@ -166,16 +235,25 @@ int main(int argc, char **argv) } else if ((s == NATS_OK) && async) { + // All async modes (push and pull) while (s == NATS_OK) { - if (count + dropped == total) - break; + bool end = (count + dropped >= total); + + if (end && subCompleteCalled) + { + if (!pull) + break; + else if (fetchCompleteCalled) + break; + } - nats_Sleep(1000); + nats_Sleep(500); } } else if (s == NATS_OK) { + // Sync mode for (count = 0; (s == NATS_OK) && (count < total); count++) { s = natsSubscription_NextMsg(&msg, sub, 5000); diff --git a/examples/micro-stats.c b/examples/micro-stats.c index c50e07648..b487ff261 100644 --- a/examples/micro-stats.c +++ b/examples/micro-stats.c @@ -51,16 +51,16 @@ handle_stats(microRequest *req) microServiceStats *stats = NULL; char buf[2048]; service_state_t *service_state = microRequest_GetServiceState(req); - int total, custom, len; + int totalReq, custom, len; err = microService_GetStats(&stats, microRequest_GetService(req)); if (err != NULL) return err; - total = (int) stats->Endpoints[0].NumRequests; + totalReq = (int) stats->Endpoints[0].NumRequests; custom = service_state->odd_count; len = snprintf(buf, sizeof(buf), - "{\"total\":%d,\"odd\":%d}", total, custom); + "{\"total\":%d,\"odd\":%d}", totalReq, custom); return microRequest_Respond(req, buf, len); } diff --git a/src/dispatch.c b/src/dispatch.c index 7284ae5ef..bf9f8c0f7 100644 --- a/src/dispatch.c +++ b/src/dispatch.c @@ -19,13 +19,15 @@ #include "mem.h" #include "conn.h" #include "sub.h" +#include "js.h" #include "glib/glib.h" // sub and dispatcher locks must be held. -void natsSub_enqueueMessage(natsSubscription *sub, natsMsg *msg) +void +natsSub_enqueueMessage(natsSubscription *sub, natsMsg *msg) { - bool signal = false; - natsDispatchQueue *q = &sub->dispatcher->queue; + bool signal = false; + natsDispatchQueue *q = &sub->dispatcher->queue; if (q->head == NULL) { @@ -46,12 +48,13 @@ void natsSub_enqueueMessage(natsSubscription *sub, natsMsg *msg) } // sub and dispatcher locks must be held. -natsStatus natsSub_enqueueUserMessage(natsSubscription *sub, natsMsg *msg) +natsStatus +natsSub_enqueueUserMessage(natsSubscription *sub, natsMsg *msg) { - natsDispatchQueue *toQ = &sub->dispatcher->queue; - natsDispatchQueue *statsQ = &sub->ownDispatcher.queue; - int newMsgs = statsQ->msgs + 1; - int newBytes = statsQ->bytes + natsMsg_dataAndHdrLen(msg); + natsDispatchQueue *toQ = &sub->dispatcher->queue; + natsDispatchQueue *statsQ = &sub->ownDispatcher.queue; + int newMsgs = statsQ->msgs + 1; + int newBytes = statsQ->bytes + natsMsg_dataAndHdrLen(msg); msg->sub = sub; @@ -67,8 +70,23 @@ natsStatus natsSub_enqueueUserMessage(natsSubscription *sub, natsMsg *msg) if (newBytes > sub->bytesMax) sub->bytesMax = newBytes; - if ((sub->jsi != NULL) && sub->jsi->ackNone) - natsMsg_setAcked(msg); + if (sub->jsi != NULL) + { + if (sub->jsi->ackNone) + natsMsg_setAcked(msg); + + if (sub->jsi->fetch != NULL) + { + // Just a quick check to see if this is a user message, ignore everything else. + bool isUserMessage = false; + js_checkFetchedMsg(sub, msg, 0, false, &isUserMessage); + if (isUserMessage) + { + sub->jsi->fetch->receivedMsgs++; + sub->jsi->fetch->receivedBytes += natsMsg_dataAndHdrLen(msg); + } + } + } // Update the subscription stats if separate, the queue stats will be // updated below. @@ -82,3 +100,493 @@ natsStatus natsSub_enqueueUserMessage(natsSubscription *sub, natsMsg *msg) return NATS_OK; } +// Sub/dispatch locks must be held. +static inline void +_removeHeadMsg(natsDispatcher *d, natsMsg *msg) +{ + d->queue.head = msg->next; + if (d->queue.tail == msg) + d->queue.tail = NULL; + msg->next = NULL; +} + +// Returns fetch status, sub/dispatch locks must be held. +static inline natsStatus +_preProcessUserMessage( + natsSubscription *sub, jsSub *jsi, jsFetch *fetch, natsMsg *msg, + bool *userMsg, bool *overLimit, bool *lastMessageInSub, bool *lastMessageInFetch, char **fcReply) +{ + natsStatus fetchStatus = NATS_OK; + *userMsg = true; + + // Is this a real message? If so, account for having processed it. + bool isRealMessage = (msg->subject[0] != '\0'); + if (isRealMessage) + { + sub->ownDispatcher.queue.msgs--; + sub->ownDispatcher.queue.bytes -= natsMsg_dataAndHdrLen(msg); + } + + // Fetch-specific handling of synthetic and header-only messages + if ((jsi != NULL) && (fetch != NULL)) + fetchStatus = js_checkFetchedMsg(sub, msg, jsi->fetchID, true, userMsg); + + // Is it another kind of synthetic message? + *userMsg = *userMsg && (msg->subject[0] != '\0'); + + // Check the limits. + if (*userMsg) + { + if (sub->max > 0) + { + *overLimit = (sub->delivered == sub->max); + *lastMessageInSub = (sub->delivered == (sub->max - 1)); + } + + if (fetch) + { + bool overMaxBytes = ((fetch->opts.MaxBytes > 0) && ((fetch->deliveredBytes) > fetch->opts.MaxBytes)); + bool overMaxFetch = overMaxBytes; + *lastMessageInFetch = overMaxBytes; + if (fetch->opts.MaxMessages > 0) + { + overMaxFetch |= (fetch->deliveredMsgs >= fetch->opts.MaxMessages); + *lastMessageInFetch |= (fetch->deliveredMsgs == (fetch->opts.MaxMessages - 1)); + } + // See if we want to override fetch status based on our own data. + if (fetchStatus == NATS_OK) + { + if (*lastMessageInFetch || overMaxFetch) + { + fetchStatus = NATS_MAX_DELIVERED_MSGS; + } + else if (overMaxBytes) + { + fetchStatus = NATS_LIMIT_REACHED; + } + } + *overLimit = (*overLimit || overMaxFetch || overMaxBytes); + } + + if (!*overLimit) + { + sub->delivered++; + if (fetch) + { + fetch->deliveredMsgs++; + fetch->deliveredBytes += natsMsg_dataAndHdrLen(msg); + } + } + + *fcReply = (jsi == NULL ? NULL : jsSub_checkForFlowControlResponse(sub)); + } + + return fetchStatus; +} + +// Thread main function for a thread pool of dispatchers. +void +nats_dispatchThreadPool(void *arg) +{ + natsDispatcher *d = (natsDispatcher *)arg; + + nats_lockDispatcher(d); + + while (true) + { + natsMsg *msg = NULL; + char *fcReply = NULL; + bool timerNeedReset = false; + bool userMsg = true; + bool timeout = false; + bool overLimit = false; + bool lastMessageInSub = false; + bool lastMessageInFetch = false; + natsStatus fetchStatus = NATS_OK; + + while (((msg = d->queue.head) == NULL) && !d->shutdown) + natsCondition_Wait(d->cond, d->mu); + + // Break out only when list is empty + if ((msg == NULL) && d->shutdown) + { + break; + } + + _removeHeadMsg(d, msg); + + // Get subscription reference from message and capture values we need + // while under lock. + natsSubscription *sub = msg->sub; + natsConnection *nc = sub->conn; + jsSub *jsi = sub->jsi; + jsFetch *fetch = (jsi != NULL) ? jsi->fetch : NULL; + natsMsgHandler messageCB = sub->msgCb; + void *messageClosure = sub->msgCbClosure; + natsOnCompleteCB completeCB = sub->onCompleteCB; + void *completeCBClosure = sub->onCompleteCBClosure; + natsSubscriptionControlMessages *ctrl = sub->control; + bool draining = sub->draining; + bool connClosed = sub->connClosed; + + fetchStatus = _preProcessUserMessage( + sub, jsi, fetch, msg, + &userMsg, &overLimit, &lastMessageInSub, &lastMessageInFetch, &fcReply); + + // Check the timeout timer. + timerNeedReset = false; + if (userMsg || (msg == sub->control->sub.timeout)) + { + sub->timeoutSuspended = true; + // Need to reset the timer after the user callback returns, but only + // if we are already in a timeout, or there are no more messages in + // the queue. + if (!sub->draining && !sub->closed && (sub->timeout > 0)) + if (timeout || (sub->ownDispatcher.queue.msgs == 0)) + { + timerNeedReset = true; // after the callbacks return + } + } + + // Process synthetic messages + if (msg == ctrl->sub.drain) + { + // Subscription is draining, we are past the last message, + // remove the subscription. This will schedule another + // control message for the close. + nats_unlockDispatcher(d); + natsSub_setDrainCompleteState(sub); + natsConn_removeSubscription(nc, sub); + nats_lockDispatcher(d); + continue; + } + else if (msg == ctrl->sub.close) + { + nats_unlockDispatcher(d); + + // Call this in case the subscription was draining. + natsSub_setDrainCompleteState(sub); + + // It's ok to access fetch->status without locking since it's only + // modified in this thread. completeCB and completeCBClosure are + // also safe to access. + if ((fetch != NULL) && (fetch->opts.CompleteHandler != NULL)) + { + fetchStatus = fetch->status; + if ((fetchStatus == NATS_OK) && connClosed) + fetchStatus = NATS_CONNECTION_CLOSED; + (*fetch->opts.CompleteHandler)(nc, sub, fetchStatus, fetch->opts.CompleteHandlerClosure); + } + + if (completeCB != NULL) + (*completeCB)(completeCBClosure); + + // Subscription closed, just release + natsSub_release(sub); + + nats_lockDispatcher(d); + continue; + } + else if (msg == ctrl->sub.timeout) + { + nats_unlockDispatcher(d); + + // Invoke the callback with a NULL message. + (*messageCB)(nc, sub, NULL, messageClosure); + + nats_lockDispatcher(d); + + if (!sub->draining && !sub->closed) + { + // Reset the timedOut boolean to allow for the + // subscription to timeout again, and reset the + // timer to fire again starting from now. + sub->timedOut = false; + natsTimer_Reset(sub->timeoutTimer, sub->timeout); + } + continue; + } + + // Fetch control messages + else if ((fetchStatus != NATS_OK) && !lastMessageInFetch) + { + // Finalize the fetch and the sub now. Need to store the fetch + // status, will call the user callback on close message. Override + // any prior (fetch request error?) value, since this is an explicit + // termination event. + fetch->status = fetchStatus; + + // TODO: future: options for handling missed heartbeat, for now + // treat it as any other error and terminate. + nats_unlockDispatcher(d); + + // Call this blindly, it will be a no-op if the subscription + // was not draining. + natsSubscription_Unsubscribe(sub); + natsMsg_Destroy(msg); // may be an actual headers-only message + nats_lockDispatcher(d); + continue; + } + else if ((fetch != NULL) && (fetchStatus == NATS_OK) && !userMsg) + { + // Fetch heartbeat. Need to set the active bit to prevent the missed + // heartbeat condition when the timer fires. + jsi->active = true; + natsMsg_Destroy(msg); + continue; + } + + // Need to check for closed subscription again here. The subscription + // could have been unsubscribed from a callback but there were already + // pending messages. The control message is queued up. Until it is + // processed, we need to simply discard the message and continue. + // + // Other invalid states: same handling, discard the message and + // continue. + else if ((sub->closed) || + (msg->sub == NULL) || (msg->subject == NULL) || (strcmp(msg->subject, "") == 0)) + { + natsMsg_Destroy(msg); + continue; + } + + // --- Normal user message delivery. --- + + // Is this a subscription that can timeout? + if (!sub->draining && (sub->timeout != 0)) + { + // Prevent the timer from posting a timeout synthetic message. + sub->timeoutSuspended = true; + + // If we are dealing with the last pending message for this sub, + // we will reset the timer after the user callback returns. + if (sub->ownDispatcher.queue.msgs == 0) + timerNeedReset = true; + } + + if (lastMessageInFetch) + fetch->status = fetchStatus; + + nats_unlockDispatcher(d); + + if (!overLimit) + (*messageCB)(nc, sub, msg, messageClosure); + else + natsMsg_Destroy(msg); + + if ((fetch != NULL) && !lastMessageInFetch && !draining) + { + fetch->status = js_maybeFetchMore(sub, fetch); + // If we failed to request more during a fetch, deliver whatever is + // already received. + if (fetch->status != NATS_OK) + natsSubscription_Drain(sub); + } + + + if (fcReply != NULL) + { + natsConnection_Publish(nc, fcReply, NULL, 0); + NATS_FREE(fcReply); + } + + if (lastMessageInFetch || lastMessageInSub) + { + // Call this blindly, it will be a no-op if the subscription + // was not draining. + natsSub_setDrainCompleteState(sub); + + // If we have reached the fetch limit, we need to send an + // unsubscribe to the server. Conversely, for the sub limit it has + // already been sent, so we just need to remove the sub from the + // connection's hash. + if (lastMessageInFetch) + natsSubscription_Unsubscribe(sub); + else + natsConn_removeSubscription(nc, sub); + } + + nats_lockDispatcher(d); + + // Check if timer need to be reset for subscriptions that can timeout. + if (!sub->closed && (sub->timeout != 0) && timerNeedReset) + { + timerNeedReset = false; + + // Do this only on timer reset instead of after each return + // from callback. The reason is that if there are still pending + // messages for this subscription (this is the case otherwise + // timerNeedReset would be false), we should prevent + // the subscription to timeout anyway. + sub->timeoutSuspended = false; + + // Reset the timer to fire in `timeout` from now. + natsTimer_Reset(sub->timeoutTimer, sub->timeout); + } + } + + nats_destroyQueuedMessages(&d->queue); + nats_unlockDispatcher(d); + + natsLib_Release(); +} + +// Thread main function for a subscription-owned dispatcher. +void +nats_dispatchThreadOwn(void *arg) +{ + natsSubscription *sub = (natsSubscription *)arg; + bool rmSub = false; + bool unsub = false; + bool connClosed = false; + + // These are set at sub creation time and never change, no need to lock. + natsConnection *nc = sub->conn; + natsMsgHandler messageCB = sub->msgCb; + void *messageClosure = sub->msgCbClosure; + natsOnCompleteCB completeCB = NULL; + void *completeCBClosure = NULL; + jsFetch *fetch = NULL; + + // This just serves as a barrier for the creation of this thread. + natsConn_Lock(nc); + natsConn_Unlock(nc); + + while (true) + { + natsStatus s = NATS_OK; + natsMsg *msg = NULL; + bool userMsg = true; + bool overLimit = false; + bool lastMessageInSub = false; + bool lastMessageInFetch = false; + + natsSub_Lock(sub); + int64_t timeout = sub->timeout; + + while (((msg = sub->ownDispatcher.queue.head) == NULL) && !(sub->closed) && !(sub->draining) && (s != NATS_TIMEOUT)) + { + if (timeout != 0) + s = natsCondition_TimedWait(sub->ownDispatcher.cond, sub->mu, timeout); + else + natsCondition_Wait(sub->ownDispatcher.cond, sub->mu); + } + + bool draining = sub->draining; + completeCB = sub->onCompleteCB; + completeCBClosure = sub->onCompleteCBClosure; + jsSub *jsi = sub->jsi; + connClosed = sub->connClosed; + + fetch = (jsi != NULL) ? jsi->fetch : NULL; + if (sub->closed) + { + natsSub_Unlock(sub); + break; + } + + // Will happen with timeout subscription + if (msg == NULL) + { + natsSub_Unlock(sub); + if (draining) + { + rmSub = true; + break; + } + // If subscription timed-out, invoke callback with NULL message. + if (s == NATS_TIMEOUT) + (*messageCB)(nc, sub, NULL, messageClosure); + continue; + } + + _removeHeadMsg(&sub->ownDispatcher, msg); + + char *fcReply = NULL; + natsStatus fetchStatus = _preProcessUserMessage( + sub, jsi, fetch, msg, + &userMsg, &overLimit, &lastMessageInSub, &lastMessageInFetch, &fcReply); + + // Fetch control messages + if ((fetchStatus != NATS_OK) && !lastMessageInFetch) + { + // We drop here only if this is not already marked as last message + // in fetch. The last message will be delivered first. fetch can not + // be NULL here since fetchStatus is set. + fetch->status = fetchStatus; + natsSub_Unlock(sub); + natsMsg_Destroy(msg); // may be an actual headers-only message + unsub = true; + break; + } + else if ((fetch != NULL) && (fetchStatus == NATS_OK) && !userMsg) + { + // Fetch heartbeat. Need to set the active bit to prevent the missed + // heartbeat condition when the timer fires. + jsi->active = true; + natsSub_Unlock(sub); + natsMsg_Destroy(msg); + continue; + } + + if (lastMessageInFetch) + fetch->status = fetchStatus; + + natsSub_Unlock(sub); + + if (!overLimit) + (*messageCB)(nc, sub, msg, messageClosure); + else + natsMsg_Destroy(msg); + + if ((fetch != NULL) && !lastMessageInFetch && !draining) + { + fetch->status = js_maybeFetchMore(sub, fetch); + // If we failed to request more during a fetch, deliver whatever is + // already received. + if (fetch->status != NATS_OK) + natsSubscription_Drain(sub); + } + + if (fcReply != NULL) + { + natsConnection_Publish(nc, fcReply, NULL, 0); + NATS_FREE(fcReply); + } + + if (lastMessageInFetch) + { + // If we hit the fetch limit, send unsubscribe to the server. + unsub = true; + break; + } + if (lastMessageInSub) + { + // If we have hit the max for delivered msgs, just remove sub. + rmSub = true; + break; + } + } + + natsSub_setDrainCompleteState(sub); + + if (unsub) + natsSubscription_Unsubscribe(sub); + else if (rmSub) + natsConn_removeSubscription(nc, sub); + + // It's ok to access fetch->status without locking since it's only modified + // in this thread. completeCB and completeCBClosure are also safe to access. + if ((fetch != NULL) && (fetch->opts.CompleteHandler != NULL)) + { + natsStatus fetchStatus = fetch->status; + if ((fetchStatus == NATS_OK) && connClosed) + fetchStatus = NATS_CONNECTION_CLOSED; + (*fetch->opts.CompleteHandler)(nc, sub, fetchStatus, fetch->opts.CompleteHandlerClosure); + } + + if (completeCB != NULL) + (*completeCB)(completeCBClosure); + + natsSub_release(sub); +} diff --git a/src/dispatch.h b/src/dispatch.h index 9662a5533..8c08b6630 100644 --- a/src/dispatch.h +++ b/src/dispatch.h @@ -25,9 +25,8 @@ typedef struct __natsDispatchQueue_s typedef struct __natsDispatcher_s { // When created as a dedicated dispatcher for a subscription, we use the - // sub's mutex (for performance? TODO: benchmack), so there is special - // handling for mu in the code. - natsSubscription *dedicatedTo; + // sub's mutex, so there is special handling for mu in the code. + natsSubscription *ownedBy; natsMutex *mu; natsThread *thread; diff --git a/src/glib/glib_dispatch_pool.c b/src/glib/glib_dispatch_pool.c index 5f4be28e4..3baf8c922 100644 --- a/src/glib/glib_dispatch_pool.c +++ b/src/glib/glib_dispatch_pool.c @@ -17,23 +17,23 @@ static inline void _destroyDispatcher(natsDispatcher *d) { - if ((d == NULL) || !d->running) + if (d == NULL) return; natsThread_Destroy(d->thread); nats_destroyQueuedMessages(&d->queue); // there's NEVER anything there, remove? natsCondition_Destroy(d->cond); natsMutex_Destroy(d->mu); - memset(d, 0, sizeof(*d)); + NATS_FREE(d); } static inline natsStatus -_startDispatcher(natsDispatcher *d, void (*threadf)(void *)) +_newDispatcher(natsDispatcher **newDispatcher, void (*threadf)(void *)) { natsStatus s = NATS_OK; - - if (d->running) - return NATS_OK; + natsDispatcher *d = NATS_CALLOC(1, sizeof(natsDispatcher)); + if (d == NULL) + return nats_setDefaultError(NATS_NO_MEMORY); s = natsMutex_Create(&d->mu); if (s != NATS_OK) @@ -52,8 +52,11 @@ _startDispatcher(natsDispatcher *d, void (*threadf)(void *)) { _destroyDispatcher(d); natsLib_Release(); + return NATS_UPDATE_ERR_STACK(s); } - return NATS_UPDATE_ERR_STACK(s); + + *newDispatcher = d; + return NATS_OK; } static natsStatus @@ -68,7 +71,7 @@ _growPool(natsDispatcherPool *pool, int cap) // the pool in the future. Make it a no-op for now. if (cap > pool->cap) { - natsDispatcher *newDispatchers = NATS_CALLOC(cap, sizeof(natsDispatcher)); + natsDispatcher **newDispatchers = NATS_CALLOC(cap, sizeof(natsDispatcher*)); if (newDispatchers == NULL) s = nats_setDefaultError(NATS_NO_MEMORY); if (s == NATS_OK) @@ -76,7 +79,7 @@ _growPool(natsDispatcherPool *pool, int cap) memcpy( newDispatchers, pool->dispatchers, - pool->cap * sizeof(*newDispatchers)); + pool->cap * sizeof(natsDispatcher*)); NATS_FREE(pool->dispatchers); pool->dispatchers = newDispatchers; pool->cap = cap; @@ -88,7 +91,7 @@ _growPool(natsDispatcherPool *pool, int cap) void nats_freeDispatcherPool(natsDispatcherPool *pool) { for (int i = 0; i < pool->cap; i++) - _destroyDispatcher(&pool->dispatchers[i]); + _destroyDispatcher(pool->dispatchers[i]); natsMutex_Destroy(pool->lock); NATS_FREE(pool->dispatchers); memset(pool, 0, sizeof(*pool)); @@ -112,18 +115,17 @@ nats_initDispatcherPool(natsDispatcherPool *pool, int cap) void nats_signalDispatcherPoolToShutdown(natsDispatcherPool *pool) { - natsCondition *cond = NULL; - for (int i = 0; i < pool->cap; i++) { - // These are no-ops for empty slots - nats_lockDispatcher(&pool->dispatchers[i]); - pool->dispatchers[i].shutdown = true; - cond = pool->dispatchers[i].cond; - if (cond != NULL) - natsCondition_Signal(cond); - - nats_unlockDispatcher(&pool->dispatchers[i]); + natsDispatcher *d = pool->dispatchers[i]; + if (d == NULL) + continue; + + nats_lockDispatcher(d); + d->shutdown = true; + if (d->cond != NULL) + natsCondition_Signal(d->cond); + nats_unlockDispatcher(d); } } @@ -131,8 +133,8 @@ void nats_waitForDispatcherPoolShutdown(natsDispatcherPool *pool) { for (int i = 0; i < pool->cap; i++) { - if (pool->dispatchers[i].thread != NULL) - natsThread_Join(pool->dispatchers[i].thread); + if (pool->dispatchers[i] != NULL) + natsThread_Join(pool->dispatchers[i]->thread); } } @@ -153,7 +155,6 @@ nats_assignSubToDispatch(natsSubscription *sub) { natsLib *lib = nats_lib(); natsStatus s = NATS_OK; - natsDispatcher *d = NULL; natsDispatcherPool *pool = &lib->messageDispatchers; natsMutex_Lock(pool->lock); @@ -161,18 +162,15 @@ nats_assignSubToDispatch(natsSubscription *sub) if (pool->cap == 0) s = nats_setError(NATS_FAILED_TO_INITIALIZE, "%s", "No message dispatchers available, the pool is empty."); - if (s == NATS_OK) - { - // Get the next dispatcher - d = &pool->dispatchers[pool->useNext]; - pool->useNext = (pool->useNext + 1) % pool->cap; - } - if ((s == NATS_OK) && (d->thread == NULL)) - s = _startDispatcher(d, nats_deliverMsgsPoolf); + // Get the next dispatcher + if (pool->dispatchers[pool->useNext] == NULL) + s = _newDispatcher(&pool->dispatchers[pool->useNext], nats_dispatchThreadPool); // Assign it to the sub. if (s == NATS_OK) - sub->dispatcher = d; + sub->dispatcher = pool->dispatchers[pool->useNext]; + + pool->useNext = (pool->useNext + 1) % pool->cap; natsMutex_Unlock(pool->lock); diff --git a/src/glib/glibp.h b/src/glib/glibp.h index c6c4a8bb2..9babc60d6 100644 --- a/src/glib/glibp.h +++ b/src/glib/glibp.h @@ -81,7 +81,7 @@ struct __natsDispatcherPool int useNext; // index of next dispatcher to use int cap; // maximum number of concurrent dispatchers allowed - natsDispatcher *dispatchers; + natsDispatcher **dispatchers; }; diff --git a/src/js.c b/src/js.c index 42a404275..dd0d5c4cf 100644 --- a/src/js.c +++ b/src/js.c @@ -12,6 +12,7 @@ // limitations under the License. #include +#include #include "js.h" #include "mem.h" @@ -53,7 +54,7 @@ const int64_t jsOrderedHBInterval = NATS_SECONDS_TO_NANOS(5); // Forward declarations static void _hbTimerFired(natsTimer *timer, void* closure); -static void _hbTimerStopped(natsTimer *timer, void* closure); +static void _releaseSubWhenStopped(natsTimer *timer, void* closure); typedef struct __jsOrderedConsInfo { @@ -1218,6 +1219,18 @@ _lookupStreamBySubject(const char **stream, natsConnection *nc, const char *subj return NATS_UPDATE_ERR_STACK(s); } +static void +_destroyFetch(jsFetch *fetch) +{ + if (fetch == NULL) + return; + + if (fetch->expiresTimer != NULL) + natsTimer_Destroy(fetch->expiresTimer); + + NATS_FREE(fetch); +} + void jsSub_free(jsSub *jsi) { @@ -1226,6 +1239,8 @@ jsSub_free(jsSub *jsi) if (jsi == NULL) return; + _destroyFetch(jsi->fetch); + js = jsi->js; natsTimer_Destroy(jsi->hbTimer); @@ -1683,34 +1698,77 @@ jsSub_scheduleFlowControlResponse(jsSub *jsi, const char *reply) return NATS_OK; } -static natsStatus -_checkMsg(natsMsg *msg, bool checkSts, bool *usrMsg, natsMsg *mhMsg, const char* reqSubj) +// returns the fetchID from subj, or -1 if not the fetch status subject for the +// sub. +static inline int64_t +_fetchIDFromSubject(natsSubscription *sub, const char *subj) { - natsStatus s = NATS_OK; - const char *val = NULL; - const char *desc= NULL; + int len = NATS_DEFAULT_INBOX_PRE_LEN + NUID_BUFFER_LEN + 1; // {INBOX}. but without the * + int64_t id = 0; - // Check for missed heartbeat special message - if (msg == mhMsg) + if (strncmp(sub->subject, subj, len) != 0) + return -1; + + subj += len; + if (*subj == '\0') + return -1; + + for (; *subj != '\0'; subj++) { - *usrMsg = false; - return NATS_MISSED_HEARTBEAT; + if ((*subj < '0') || (*subj > '9')) + return -1; + id = (id * 10) + (*subj - '0'); } + return id; +} - *usrMsg = true; +// returns the fetch status OK to continue, or an error to stop. Some errors +// like NATS_TIMEOUT are valid exit codes. +natsStatus +js_checkFetchedMsg(natsSubscription *sub, natsMsg *msg, uint64_t fetchID, bool checkSts, bool *usrMsg) +{ + natsStatus s = NATS_OK; + const char *val = NULL; + const char *desc = NULL; + + // Check for synthetic fetch event messages. + if (sub->control != NULL) + { + if (msg == sub->control->fetch.missedHeartbeat) + { + *usrMsg = false; + return NATS_MISSED_HEARTBEAT; + } + else if (msg == sub->control->fetch.expired) + { + *usrMsg = false; + return NATS_TIMEOUT; + } + } if ((msg->dataLen > 0) || (msg->hdrLen <= 0)) + { + // If we have data, or no header - user's + *usrMsg = true; return NATS_OK; + } s = natsMsgHeader_Get(msg, STATUS_HDR, &val); - // If no status header, this is still considered a user message, so OK. if (s == NATS_NOT_FOUND) + { + // If no status header, this is still considered a user message, so OK. + *usrMsg = true; return NATS_OK; - // If serious error, return it. + } else if (s != NATS_OK) + { + // If serious error, return it. + *usrMsg = false; return NATS_UPDATE_ERR_STACK(s); + } - // At this point, this is known to be a status message, not a user message. + // At this point, this is known to be a status message, not a user message, + // even if we don't recognize the status here. *usrMsg = false; // If we don't care about status, we are done. @@ -1718,26 +1776,38 @@ _checkMsg(natsMsg *msg, bool checkSts, bool *usrMsg, natsMsg *mhMsg, const char* return NATS_OK; // 100 Idle hearbeat, return OK - if (strncmp(val, CTRL_STATUS, HDR_STATUS_LEN) == 0) + if (strncmp(val, HDR_STATUS_CTRL_100, HDR_STATUS_LEN) == 0) return NATS_OK; // Before checking for "errors", if the incoming status message is // for a previous request (message's subject is not reqSubj), then // simply return NATS_OK. The caller will destroy the message and // proceed as if nothing was received. - if (strcmp(natsMsg_GetSubject(msg), reqSubj) != 0) + int64_t id = _fetchIDFromSubject(sub, natsMsg_GetSubject(msg)); + if (id != (int64_t) fetchID) return NATS_OK; // 404 indicating that there are no messages. - if (strncmp(val, NOT_FOUND_STATUS, HDR_STATUS_LEN) == 0) + if (strncmp(val, HDR_STATUS_NOT_FOUND_404, HDR_STATUS_LEN) == 0) return NATS_NOT_FOUND; // 408 indicating request timeout - if (strncmp(val, REQ_TIMEOUT, HDR_STATUS_LEN) == 0) + if (strncmp(val, HDR_STATUS_TIMEOUT_408, HDR_STATUS_LEN) == 0) return NATS_TIMEOUT; - // The possible 503 is handled directly in natsSub_nextMsg(), so we - // would never get it here in this function. + // 409 indicating that MaxBytes has been reached, but it can come as other + // errors (e.g. "Exceeded MaxWaiting"), so set the last error. + if (strncmp(val, HDR_STATUS_MAX_BYTES_409, HDR_STATUS_LEN) == 0) + { + natsMsgHeader_Get(msg, DESCRIPTION_HDR, &desc); + return nats_setError(NATS_LIMIT_REACHED, "%s", (desc == NULL ? "error checking pull subscribe message" : desc)); + } + + // The possible 503 is handled directly in natsSub_nextMsg(), so we would + // never get it here in this function, but in PullSubscribeAsync. There, we + // want to use it as the exit code (not NATS_ERR). + if (strncmp(val, HDR_STATUS_NO_RESP_503, HDR_STATUS_LEN) == 0) + return NATS_NO_RESPONDERS; natsMsgHeader_Get(msg, DESCRIPTION_HDR, &desc); return nats_setError(NATS_ERR, "%s", (desc == NULL ? "error checking pull subscribe message" : desc)); @@ -1783,8 +1853,8 @@ _fetch(natsMsgList *list, natsSubscription *sub, jsFetchRequest *req, bool simpl int batch = 0; natsConnection *nc = NULL; const char *subj = NULL; - const char *rply = NULL; - int pmc = 0; + char rply[NATS_DEFAULT_INBOX_PRE_LEN + NUID_BUFFER_LEN + 32]; + int pmc = 0; char buffer[64]; natsBuffer buf; int64_t start = 0; @@ -1793,9 +1863,8 @@ _fetch(natsMsgList *list, natsSubscription *sub, jsFetchRequest *req, bool simpl int size = 0; bool sendReq = true; jsSub *jsi = NULL; - natsMsg *mhMsg = NULL; - char *reqSubj = NULL; bool noWait = false; + uint64_t fetchID = 0; if (list == NULL) return nats_setDefaultError(NATS_INVALID_ARG); @@ -1832,10 +1901,8 @@ _fetch(natsMsgList *list, natsSubscription *sub, jsFetchRequest *req, bool simpl pmc = (sub->ownDispatcher.queue.msgs > 0); jsi->inFetch = true; jsi->fetchID++; - if (nats_asprintf(&reqSubj, "%.*s%" PRIu64, (int) strlen(sub->subject)-1, sub->subject, jsi->fetchID) < 0) - s = nats_setDefaultError(NATS_NO_MEMORY); - else - rply = (const char*) reqSubj; + fetchID = jsi->fetchID; + snprintf(rply, sizeof(rply), "%.*s%" PRIu64, (int)strlen(sub->subject) - 1, sub->subject, fetchID); if ((s == NATS_OK) && req->Heartbeat) { s = nats_createControlMessages(sub); @@ -1845,14 +1912,12 @@ _fetch(natsMsgList *list, natsSubscription *sub, jsFetchRequest *req, bool simpl sub->refs++; if (jsi->hbTimer == NULL) { - s = natsTimer_Create(&jsi->hbTimer, _hbTimerFired, _hbTimerStopped, hbi * 2, (void *)sub); + s = natsTimer_Create(&jsi->hbTimer, _hbTimerFired, _releaseSubWhenStopped, hbi * 2, (void *)sub); if (s != NATS_OK) sub->refs--; } else natsTimer_Reset(jsi->hbTimer, hbi); - - mhMsg = sub->control->batch.missedHeartbeat; } } natsSub_Unlock(sub); @@ -1884,7 +1949,7 @@ _fetch(natsMsgList *list, natsSubscription *sub, jsFetchRequest *req, bool simpl { // Here we care only about user messages. We don't need to pass // the request subject since it is not even checked in this case. - s = _checkMsg(msg, false, &usrMsg, mhMsg, NULL); + s = js_checkFetchedMsg(sub, msg, fetchID, false, &usrMsg); if ((s == NATS_OK) && usrMsg) { msgs[count++] = msg; @@ -1930,7 +1995,7 @@ _fetch(natsMsgList *list, natsSubscription *sub, jsFetchRequest *req, bool simpl IFOK(s, natsSub_nextMsg(&msg, sub, timeout, true)); if (s == NATS_OK) { - s = _checkMsg(msg, true, &usrMsg, mhMsg, rply); + s = js_checkFetchedMsg(sub, msg, fetchID, true, &usrMsg); if ((s == NATS_OK) && usrMsg) { msgs[count++] = msg; @@ -1980,8 +2045,6 @@ _fetch(natsMsgList *list, natsSubscription *sub, jsFetchRequest *req, bool simpl natsTimer_Stop(jsi->hbTimer); natsSub_Unlock(sub); - NATS_FREE(reqSubj); - return NATS_UPDATE_ERR_STACK(s); } @@ -2021,6 +2084,19 @@ natsSubscription_FetchRequest(natsMsgList *list, natsSubscription *sub, jsFetchR return NATS_UPDATE_ERR_STACK(s); } +static void +_fetchExpiredFired(natsTimer *timer, void *closure) +{ + natsSubscription *sub = (natsSubscription *)closure; + + // Let the dispatcher know that the fetch has expired. It will deliver all + // queued up messages, then do the right termination. + nats_lockSubAndDispatcher(sub); + natsSub_enqueueMessage(sub, sub->control->fetch.expired); + nats_unlockSubAndDispatcher(sub); + natsTimer_Stop(timer); +} + static void _hbTimerFired(natsTimer *timer, void* closure) { @@ -2042,7 +2118,7 @@ _hbTimerFired(natsTimer *timer, void* closure) // we will check missed HBs again. if (sub->ownDispatcher.queue.msgs == 0) { - natsSub_enqueueMessage(sub, sub->control->batch.missedHeartbeat); + natsSub_enqueueMessage(sub, sub->control->fetch.missedHeartbeat); natsTimer_Stop(timer); } nats_unlockSubAndDispatcher(sub); @@ -2081,7 +2157,7 @@ _hbTimerFired(natsTimer *timer, void* closure) // client, timers will automatically fire again, so this callback is // invoked when the timer has been stopped (and we are ready to destroy it). static void -_hbTimerStopped(natsTimer *timer, void* closure) +_releaseSubWhenStopped(natsTimer *timer, void* closure) { natsSubscription *sub = (natsSubscription*) closure; @@ -2634,7 +2710,7 @@ _subscribeMulti(natsSubscription **new_sub, jsCtx *js, const char **subjects, in { natsSub_Lock(sub); sub->refs++; - s = natsTimer_Create(&jsi->hbTimer, _hbTimerFired, _hbTimerStopped, hbi*2, (void*) sub); + s = natsTimer_Create(&jsi->hbTimer, _hbTimerFired, _releaseSubWhenStopped, hbi*2, (void*) sub); if (s != NATS_OK) sub->refs--; natsSub_Unlock(sub); @@ -2827,6 +2903,206 @@ js_PullSubscribe(natsSubscription **sub, jsCtx *js, const char *subject, const c return NATS_UPDATE_ERR_STACK(s); } +// Neither sub's nor dispatcher's lock must be held. +natsStatus +js_maybeFetchMore(natsSubscription *sub, jsFetch *fetch) +{ + jsFetchRequest req = {.Expires = 0}; + if (fetch->opts.NextHandler == NULL) + return NATS_OK; + + // Prepare the next fetch request + if (!fetch->opts.NextHandler(&req.Batch, &req.MaxBytes, sub, fetch->opts.NextHandlerClosure)) + return NATS_OK; + + // These are not changeable by the callback, only Batch and MaxBytes can be updated. + int64_t now = nats_Now(); + if (fetch->opts.Timeout != 0) + req.Expires = (fetch->opts.Timeout - (now - fetch->startTimeMillis)) * 1000 * 1000; // ns, go time.Duration + req.NoWait = fetch->opts.NoWait; + req.Heartbeat = fetch->opts.Heartbeat * 1000 * 1000; // ns, go time.Duration + + char buffer[128]; + natsBuffer buf; + natsBuf_InitWithBackend(&buf, buffer, 0, sizeof(buffer)); + + nats_lockSubAndDispatcher(sub); + + jsSub *jsi = sub->jsi; + jsi->inFetch = true; + jsi->fetchID++; + snprintf(fetch->replySubject, sizeof(fetch->replySubject), "%.*s%" PRIu64, + (int)strlen(sub->subject) - 1, sub->subject, // exclude the last '*' + jsi->fetchID); + natsStatus s = _sendPullRequest(sub->conn, jsi->nxtMsgSubj, fetch->replySubject, &buf, &req); + if (s == NATS_OK) + { + fetch->requestedMsgs += req.Batch; + } + + nats_unlockSubAndDispatcher(sub); + + natsBuf_Destroy(&buf); + return NATS_UPDATE_ERR_STACK(s); +} + +// Sets Batch and MaxBytes for the next fetch request. +static bool +_autoNextFetchRequest(int *messages, int64_t *maxBytes, natsSubscription *sub, void *closure) +{ + jsFetch *fetch = (jsFetch *)closure; + int remainingUnrequested = INT_MAX; + int64_t remainingBytes = 0; + int want = 0; + bool maybeMore = true; + + nats_lockSubAndDispatcher(sub); + + int isAhead = fetch->requestedMsgs - fetch->deliveredMsgs; + int wantAhead = fetch->opts.KeepAhead; + if (isAhead > wantAhead) + maybeMore = false; + + if (maybeMore && (fetch->opts.MaxMessages > 0)) + { + remainingUnrequested = fetch->opts.MaxMessages - fetch->requestedMsgs; + if (remainingUnrequested <= 0) + maybeMore = false; + } + + if (maybeMore && (fetch->opts.MaxBytes > 0)) + { + remainingBytes = fetch->opts.MaxBytes - fetch->receivedBytes; + if (remainingBytes <= 0) + maybeMore = false; + } + + if (maybeMore) + { + want = remainingUnrequested; + if (want > fetch->opts.FetchSize) + want = fetch->opts.FetchSize; + + maybeMore = (want > 0); + } + + nats_unlockSubAndDispatcher(sub); + + if (!maybeMore) + return false; + + // Since we do not allow keepAhead with MaxBytes, this is an accurate count + // of how many more bytes we expect. + *maxBytes = remainingBytes; + *messages = want; + return true; +} + +natsStatus +js_PullSubscribeAsync(natsSubscription **newsub, jsCtx *js, const char *subject, const char *durable, + natsMsgHandler msgCB, void *msgCBClosure, + jsOptions *jsOpts, jsSubOptions *opts, jsErrCode *errCode) +{ + natsStatus s = NATS_OK; + natsSubscription *sub = NULL; + jsSub *jsi = NULL; + jsFetch *fetch = NULL; + + if ((newsub == NULL) || (msgCB == NULL)) + return nats_setDefaultError(NATS_INVALID_ARG); + + if ((jsOpts != NULL) && (jsOpts->PullSubscribeAsync.KeepAhead > 0)) + { + if (jsOpts->PullSubscribeAsync.MaxBytes > 0) + return nats_setError(NATS_INVALID_ARG, "%s", "Can not use MaxBytes and KeepAhead together"); + if (jsOpts->PullSubscribeAsync.NoWait) + return nats_setError(NATS_INVALID_ARG, "%s", "Can not use NoWait with KeepAhead together"); + } + + if (errCode != NULL) + *errCode = 0; + + // Do a basic pull subscribe first, but with a callback so it is treated as + // "async" and assigned to a dispatcher. Since we don't fetch anything, it + // will not be active yet. + s = _subscribe(&sub, js, subject, durable, msgCB, msgCBClosure, true, jsOpts, opts, errCode); + if(s == NATS_OK) + { + fetch = NATS_CALLOC(1, sizeof(jsFetch)); + if (fetch == NULL) + s = nats_setDefaultError(NATS_NO_MEMORY); + } + if (s != NATS_OK) + { + NATS_FREE(fetch); + natsSubscription_Destroy(sub); + return NATS_UPDATE_ERR_STACK(s); + } + + // Initialize fetch parameters. + fetch->status = NATS_OK; + fetch->startTimeMillis = nats_Now(); + + if (jsOpts != NULL) + fetch->opts = jsOpts->PullSubscribeAsync; + if (fetch->opts.FetchSize == 0) + fetch->opts.FetchSize = NATS_DEFAULT_ASYNC_FETCH_SIZE; + if (fetch->opts.NextHandler == NULL) + { + fetch->opts.NextHandler = _autoNextFetchRequest; + fetch->opts.NextHandlerClosure = (void *)fetch; + } + + nats_lockSubAndDispatcher(sub); + jsi = sub->jsi; + + // Set up the fetch options + jsi->fetch = fetch; + jsi->inFetch = true; + + // Start the timers. They will live for the entire length of the + // subscription (the missed heartbeat timer may be reset as needed). + if (fetch->opts.Timeout > 0) + { + sub->refs++; + s = natsTimer_Create(&fetch->expiresTimer, _fetchExpiredFired, _releaseSubWhenStopped, + fetch->opts.Timeout, (void *)sub); + if (s != NATS_OK) + sub->refs--; + } + + if ((s == NATS_OK) && (fetch->opts.Heartbeat > 0)) + { + int64_t dur = fetch->opts.Heartbeat * 2; + sub->refs++; + if (jsi->hbTimer == NULL) + { + s = natsTimer_Create(&jsi->hbTimer, _hbTimerFired, _releaseSubWhenStopped, dur, (void *)sub); + if (s != NATS_OK) + sub->refs--; + } + else + natsTimer_Reset(jsi->hbTimer, dur); + } + + if (s == NATS_OK) + { + // Send the first fetch request. + s = js_maybeFetchMore(sub, fetch); + } + + nats_unlockSubAndDispatcher(sub); + + if (s != NATS_OK) + { + natsSubscription_Destroy(sub); + return NATS_UPDATE_ERR_STACK(s); + } + + *newsub = sub; + return NATS_OK; +} + typedef struct __ackOpts { const char *ackType; @@ -3043,7 +3319,7 @@ natsMsg_isJSCtrl(natsMsg *msg, int *ctrlType) if ((*p == '\r') || (*p == '\n') || (*p == '\0')) return false; - if (strstr(p, CTRL_STATUS) != p) + if (strstr(p, HDR_STATUS_CTRL_100) != p) return false; p += HDR_STATUS_LEN; diff --git a/src/js.h b/src/js.h index 0503849a7..5c8fbd4f9 100644 --- a/src/js.h +++ b/src/js.h @@ -1,4 +1,4 @@ -// Copyright 2021 The NATS Authors +// Copyright 2021-2024 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -28,6 +28,7 @@ void js_unlock(jsCtx *js); #endif // DEV_MODE +#define NATS_DEFAULT_ASYNC_FETCH_SIZE 128 // messages extern const char* jsDefaultAPIPrefix; extern const int64_t jsDefaultRequestWait; @@ -276,3 +277,9 @@ js_cloneConsumerConfig(jsConsumerConfig *org, jsConsumerConfig **clone); void js_destroyConsumerConfig(jsConsumerConfig *cc); + +natsStatus +js_checkFetchedMsg(natsSubscription *sub, natsMsg *msg, uint64_t fetchID, bool checkSts, bool *usrMsg); + +natsStatus +js_maybeFetchMore(natsSubscription *sub, jsFetch *fetch); diff --git a/src/jsm.c b/src/jsm.c index 7c2201231..355733c0e 100644 --- a/src/jsm.c +++ b/src/jsm.c @@ -1978,7 +1978,7 @@ js_directGetMsgToJSMsg(const char *stream, natsMsg *msg) if ((natsMsg_GetDataLength(msg) == 0) && (natsMsgHeader_Get(msg, STATUS_HDR, &val) == NATS_OK)) { - if (strcmp(val, NOT_FOUND_STATUS) == 0) + if (strcmp(val, HDR_STATUS_NOT_FOUND_404) == 0) return nats_setDefaultError(NATS_NOT_FOUND); else { diff --git a/src/msg.c b/src/msg.c index 44e434e32..740b4abf5 100644 --- a/src/msg.c +++ b/src/msg.c @@ -898,7 +898,7 @@ natsMsg_IsNoResponders(natsMsg *m) && (natsMsg_GetDataLength(m) == 0) && (natsMsgHeader_Get(m, STATUS_HDR, &val) == NATS_OK) && (val != NULL) - && (strncmp(val, NO_RESP_STATUS, HDR_STATUS_LEN) == 0)); + && (strncmp(val, HDR_STATUS_NO_RESP_503, HDR_STATUS_LEN) == 0)); } void diff --git a/src/msg.h b/src/msg.h index 4f56611d5..6ff2477fa 100644 --- a/src/msg.h +++ b/src/msg.h @@ -17,17 +17,18 @@ #include "status.h" #include "gc.h" -#define HDR_LINE_PRE "NATS/1.0" -#define HDR_LINE_PRE_LEN (8) -#define HDR_LINE HDR_LINE_PRE _CRLF_ -#define HDR_LINE_LEN (10) -#define STATUS_HDR "Status" -#define DESCRIPTION_HDR "Description" -#define NO_RESP_STATUS "503" -#define NOT_FOUND_STATUS "404" -#define REQ_TIMEOUT "408" -#define CTRL_STATUS "100" -#define HDR_STATUS_LEN (3) +#define HDR_LINE_PRE "NATS/1.0" +#define HDR_LINE_PRE_LEN (8) +#define HDR_LINE HDR_LINE_PRE _CRLF_ +#define HDR_LINE_LEN (10) +#define STATUS_HDR "Status" +#define DESCRIPTION_HDR "Description" +#define HDR_STATUS_NO_RESP_503 "503" +#define HDR_STATUS_NOT_FOUND_404 "404" +#define HDR_STATUS_TIMEOUT_408 "408" +#define HDR_STATUS_MAX_BYTES_409 "409" +#define HDR_STATUS_CTRL_100 "100" +#define HDR_STATUS_LEN (3) #define natsMsg_setNeedsLift(m) ((m)->flags |= (1 << 0)) #define natsMsg_needsLift(m) (((m)->flags & (1 << 0)) != 0) diff --git a/src/nats.c b/src/nats.c index 06e46c5b9..dda51cd8d 100644 --- a/src/nats.c +++ b/src/nats.c @@ -200,198 +200,6 @@ nats_CheckCompatibilityImpl(uint32_t headerReqVerNumber, uint32_t headerVerNumbe return true; } -void -nats_deliverMsgsPoolf(void *arg) -{ - natsDispatcher *d = (natsDispatcher *)arg; - natsConnection *nc; - natsSubscription *sub; - natsMsgHandler mcb; - void *mcbClosure; - uint64_t delivered; - uint64_t max; - natsMsg *msg; - bool timerNeedReset = false; - jsSub *jsi; - char *fcReply; - - natsMutex_Lock(d->mu); - - while (true) - { - while (((msg = d->queue.head) == NULL) && !d->shutdown) - natsCondition_Wait(d->cond, d->mu); - - // Break out only when list is empty - if ((msg == NULL) && d->shutdown) - { - break; - } - - // Remove message from list now... - d->queue.head = msg->next; - if (d->queue.tail == msg) - d->queue.tail = NULL; - msg->next = NULL; - - // Get subscription reference from message - sub = msg->sub; - - // Capture these under lock - nc = sub->conn; - mcb = sub->msgCb; - mcbClosure = sub->msgCbClosure; - max = sub->max; - - // Is this a control message? - if (msg->subject[0] == '\0') - { - bool closed = (msg == sub->control->sub.close); - bool timedOut = (msg == sub->control->sub.timeout); - bool draining = (msg == sub->control->sub.drain); - - // We need to release this lock... - natsMutex_Unlock(d->mu); - - // Release the message - natsMsg_Destroy(msg); - - if (draining) - { - // Subscription is draining, we are past the last message, - // remove the subscription. This will schedule another - // control message for the close. - natsSub_setDrainCompleteState(sub); - natsConn_removeSubscription(nc, sub); - } - else if (closed) - { - natsOnCompleteCB cb = NULL; - void *closure = NULL; - - // Call this in case the subscription was draining. - natsSub_setDrainCompleteState(sub); - - // Check for completion callback - natsSub_Lock(sub); - cb = sub->onCompleteCB; - closure = sub->onCompleteCBClosure; - natsSub_Unlock(sub); - - if (cb != NULL) - (*cb)(closure); - - // Subscription closed, just release - natsSub_release(sub); - } - else if (timedOut) - { - // Invoke the callback with a NULL message. - (*mcb)(nc, sub, NULL, mcbClosure); - } - - // Grab the lock, we go back to beginning of loop. - natsMutex_Lock(d->mu); - - if (!draining && !closed && timedOut) - { - // Reset the timedOut boolean to allow for the - // subscription to timeout again, and reset the - // timer to fire again starting from now. - sub->timedOut = false; - natsTimer_Reset(sub->timeoutTimer, sub->timeout); - } - - // Go back to top of loop. - continue; - } - - // Update the sub's stats before checking closed state. (We now post - // control messages to the sub's queue, because hbTimer processing is - // expecting it, so need to clear the stats for them, too) - sub->ownDispatcher.queue.msgs--; - sub->ownDispatcher.queue.bytes -= natsMsg_dataAndHdrLen(msg); - - // Need to check for closed subscription again here. - // The subscription could have been unsubscribed from a callback - // but there were already pending messages. The control message - // is queued up. Until it is processed, we need to simply - // discard the message and continue. - if (sub->closed) - { - natsMsg_Destroy(msg); - continue; - } - - delivered = ++(sub->delivered); - - jsi = sub->jsi; - fcReply = (jsi == NULL ? NULL : jsSub_checkForFlowControlResponse(sub)); - - // Is this a subscription that can timeout? - if (!sub->draining && (sub->timeout != 0)) - { - // Prevent the timer to post a timeout control message - sub->timeoutSuspended = true; - - // If we are dealing with the last pending message for this sub, - // we will reset the timer after the user callback returns. - if (sub->ownDispatcher.queue.msgs == 0) - timerNeedReset = true; - } - - natsMutex_Unlock(d->mu); - - if ((max == 0) || (delivered <= max)) - { - (*mcb)(nc, sub, msg, mcbClosure); - } - else - { - // We need to destroy the message since the user can't do it - natsMsg_Destroy(msg); - } - - if (fcReply != NULL) - { - natsConnection_Publish(nc, fcReply, NULL, 0); - NATS_FREE(fcReply); - } - - // Don't do 'else' because we need to remove when we have hit - // the max (after the callback returns). - if ((max > 0) && (delivered >= max)) - { - // Call this blindly, it will be a no-op if the subscription was not draining. - natsSub_setDrainCompleteState(sub); - // If we have hit the max for delivered msgs, remove sub. - natsConn_removeSubscription(nc, sub); - } - - natsMutex_Lock(d->mu); - - // Check if timer need to be reset for subscriptions that can timeout. - if (!sub->closed && (sub->timeout != 0) && timerNeedReset) - { - timerNeedReset = false; - - // Do this only on timer reset instead of after each return - // from callback. The reason is that if there are still pending - // messages for this subscription (this is the case otherwise - // timerNeedReset would be false), we should prevent - // the subscription to timeout anyway. - sub->timeoutSuspended = false; - - // Reset the timer to fire in `timeout` from now. - natsTimer_Reset(sub->timeoutTimer, sub->timeout); - } - } - - natsMutex_Unlock(d->mu); - - natsLib_Release(); -} - natsStatus nats_SetMessageDeliveryPoolSize(int max) { diff --git a/src/nats.h b/src/nats.h index 9436412ed..e1961231b 100644 --- a/src/nats.h +++ b/src/nats.h @@ -1206,6 +1206,41 @@ typedef struct jsFetchRequest } jsFetchRequest; +/** \brief Callback used to indicate that the work of js_PullSubscribeAsync is + * done. + * + * @param nc - Connection to the NATS server + * @param sub - Subscription being used + * @param s - Completion status code + * - `NATS_OK` - should never happen here! + * - `NATS_TIMEOUT` indicates that the fetch has reached its lifetime expiration + * time, or had NoWait set and there are no more messages. + * - `NATS_NOT_FOUND` is returned when the server has no messages to deliver at + * the beginning of a specific request. It may be returned for NoWait + * subscriptions, effectively the same meaning as NATS_TIMEOUT - early + * termination for NoWait. + * - `NATS_MAX_DELIVERED_MSGS` indicates that lifetime `Batch` message limit has + * been reached. + * - `NATS_LIMIT_REACHED` is returned when the lifetime byte limit is reached. + * - Other status values represent error conditions. + * @param closure completeClosure that was passed to js_PullSubscribeAsync + * + * @see js_PullSubscribeAsync + */ +typedef void (*natsFetchCompleteHandler)(natsConnection *nc, natsSubscription *sub, natsStatus s, void *closure); + +/** \brief Callback used to customize flow control for js_PullSubscribeAsync. + * + * The library will invoke this callback when it may be time to request more + * messages from the server. + * + * @return true to fetch more, false to skip. If true, @messages and @maxBytes + * should be set to the number of messages and max bytes to fetch. + * + * @see js_PullSubscribeAsync + */ +typedef bool (*natsFetchNextHandler)(int *messages, int64_t *bytes, natsSubscription *sub, void *closure); + /** * JetStream context options. * @@ -1243,6 +1278,51 @@ typedef struct jsOptions } PublishAsync; + struct jsOptionsPullSubscribeAsync + { + // Lifetime of the subscription (completes when any one of the + // targets is reached). + int64_t Timeout; // in milliseconds + int MaxMessages; + int64_t MaxBytes; + + // If NoWait is set, the subscription will receive the messages + // already stored on the server subject to the limits, but will + // not wait for more messages. + // + // Note that if Timeout is set we would still wait for first + // message to become available, even if there are currently any + // on the server + bool NoWait; + + // Fetch complete handler that receives the exit status code, + // the subscription's Complete handler is also invoked, but does + // not have the status code. + natsFetchCompleteHandler CompleteHandler; + void *CompleteHandlerClosure; + + // Have server sends heartbeats at this interval to help detect + // communication failures. + int64_t Heartbeat; // in milliseconds + + // Options to control automatic Fetch flow control. The number + // of messages to ask for in a single request, and if we should + // try to fetch ahead, KeepAhead more than we need to finish the + // current request. Fetch this many messages ahead of time. + // + // KeepAhead can not be used in conjunction with MaxBytes or + // NoWait. + int FetchSize; + int KeepAhead; + + // Manual fetch flow control. If provided gets called before + // each message is deliverered to msgCB, and overrides the + // default algorithm for sending Next requests. + natsFetchNextHandler NextHandler; + void *NextHandlerClosure; + + } PullSubscribeAsync; + /** * Advanced stream options * @@ -6519,6 +6599,34 @@ natsSubscription_Fetch(natsMsgList *list, natsSubscription *sub, int batch, int6 NATS_EXTERN natsStatus jsFetchRequest_Init(jsFetchRequest *request); +/** \brief Starts a Pull based JetStream subscription, and delivers messages to + * a user callback asynchronously. + * + * The subscription can be set up to run indefinitely, and issue pull requests + * as needed, or it can be set up to auto-terminate when certain conditions + * (like max messages, or a time-based expiration) are met. `lifetime` is used + * to control the basic limits, and whether to use server Heartbeats to detect + * connection failures. jsOpts->PullSubscribeAsync is used to control the pulling + * parameters, provide extra event callbacks and hooks, and to tune the handling + * of missing heartbets. + * + * @param newsub the location where to store the pointer to the newly created + * #natsSubscription object. + * @param js the pointer to the #jsCtx object. + * @param subject the subject this subscription is created for. + * @param durable the optional durable name. + * @param msgCB the #natsMsgHandler callback. + * @param msgCBClosure a pointer to an user defined object (can be `NULL`). + * @param jsOpts the pointer to the #jsOptions object, possibly `NULL`. + * @param opts the subscribe options, possibly `NULL`. + * @param errCode the location where to store the JetStream specific error code, + * or `NULL` if not needed. + */ +NATS_EXTERN natsStatus +js_PullSubscribeAsync(natsSubscription **newsub, jsCtx *js, const char *subject, const char *durable, + natsMsgHandler msgCB, void *msgCBClosure, + jsOptions *jsOpts, jsSubOptions *opts, jsErrCode *errCode); + /** \brief Fetches messages for a pull subscription with a complete request configuration * * Similar to #natsSubscription_Fetch but a full #jsFetchRequest configuration is provided diff --git a/src/natsp.h b/src/natsp.h index 84bf00612..2f99de066 100644 --- a/src/natsp.h +++ b/src/natsp.h @@ -274,7 +274,7 @@ struct __natsOptions void *evLoop; natsEvLoopCallbacks evCbs; - // If set to false, the client will start a per-subscription dedicated + // If set to false, the client will start a per-subscription "own" // thread to deliver messages to the user callbacks. If true, a shared // thread out of a thread pool is used. natsClientConfig controls the pool // size. @@ -372,6 +372,28 @@ struct __jsCtx bool closed; }; +typedef struct __jsFetch +{ + struct jsOptionsPullSubscribeAsync opts; + + natsStatus status; + + // Stats + int64_t startTimeMillis; + int receivedMsgs; + int64_t receivedBytes; + int deliveredMsgs; + int64_t deliveredBytes; + int requestedMsgs; + + // Timer for the fetch expiration. We leverage the existing jsi->hbTimer for + // checking missed heartbeats. + natsTimer *expiresTimer; + + // Matches jsi->fetchID + char replySubject[NATS_DEFAULT_INBOX_PRE_LEN + NUID_BUFFER_LEN + 32]; // big enough for {INBOX}.number +} jsFetch; + typedef struct __jsSub { jsCtx *js; @@ -385,6 +407,7 @@ typedef struct __jsSub bool dc; // delete JS consumer in Unsub()/Drain() bool ackNone; uint64_t fetchID; + jsFetch *fetch; // This is ConsumerInfo's Pending+Consumer.Delivered that we get from the // add consumer response. Note that some versions of the server gather the @@ -498,7 +521,7 @@ typedef struct __natsSubscriptionControlMessages { natsMsg *expired; natsMsg *missedHeartbeat; - } batch; + } fetch; } natsSubscriptionControlMessages; struct __natsSubscription @@ -512,8 +535,8 @@ struct __natsSubscription // We always have a dispatcher to keep track of things, even if the // subscription is sync. The dispatcher is set up at the subscription - // creation time, and may point to a dedicated thread using sub's own - // dispatchQueue, or a shared worker using its own dispatch queue, which + // creation time, and may point to a dedicated thread that uses sub's own + // dispatchQueue, or a shared worker with a shared queue, which // dispatcher->queue then points to. natsDispatcher *dispatcher; natsDispatcher ownDispatcher; @@ -896,6 +919,7 @@ static inline void nats_unlockDispatcher(natsDispatcher *d) natsMutex_Unlock(d->mu); } -void nats_deliverMsgsPoolf(void *arg); +void nats_dispatchThreadPool(void *arg); +void nats_dispatchThreadOwn(void *arg); #endif /* NATSP_H_ */ diff --git a/src/status.c b/src/status.c index 1339bf4b4..d52b07cb7 100644 --- a/src/status.c +++ b/src/status.c @@ -71,6 +71,7 @@ static const char *statusText[] = { "Mismatch", "Missed Server Heartbeat", + "Limit reached", }; const char* diff --git a/src/status.h b/src/status.h index ee8bfe703..e8f90c7ed 100644 --- a/src/status.h +++ b/src/status.h @@ -130,6 +130,9 @@ typedef enum NATS_MISSED_HEARTBEAT, ///< For JetStream subscriptions, it means that the library detected that server heartbeats have been missed. + NATS_LIMIT_REACHED, ///< Attempt to receive messages than allowed by the byte limit, for + /// instance in js_PullSubscribeAsync(). + } natsStatus; typedef enum { diff --git a/src/sub.c b/src/sub.c index 561867b2b..d5ffe33e9 100644 --- a/src/sub.c +++ b/src/sub.c @@ -56,8 +56,8 @@ static inline void _freeControlMessages(natsSubscription *sub) _destroyControlMessage(sub->control->sub.timeout); _destroyControlMessage(sub->control->sub.close); _destroyControlMessage(sub->control->sub.drain); - _destroyControlMessage(sub->control->batch.expired); - _destroyControlMessage(sub->control->batch.missedHeartbeat); + _destroyControlMessage(sub->control->fetch.expired); + _destroyControlMessage(sub->control->fetch.missedHeartbeat); NATS_FREE(sub->control); } @@ -80,10 +80,10 @@ _initOwnDispatcher(natsSubscription *sub) { natsStatus s = NATS_OK; - if (sub->ownDispatcher.dedicatedTo != NULL) + if (sub->ownDispatcher.ownedBy != NULL) return nats_setDefaultError(NATS_ILLEGAL_STATE); - sub->ownDispatcher.dedicatedTo = sub; + sub->ownDispatcher.ownedBy = sub; sub->ownDispatcher.mu = sub->mu; s = natsCondition_Create(&sub->ownDispatcher.cond); return NATS_UPDATE_ERR_STACK(s); @@ -200,141 +200,17 @@ void natsSub_setDrainCompleteState(natsSubscription *sub) natsSub_Unlock(sub); } -// _deliverMsgs is used to deliver messages to asynchronous subscribers. -void natsSub_deliverMsgs(void *arg) -{ - natsSubscription *sub = (natsSubscription *)arg; - natsConnection *nc = sub->conn; - natsMsgHandler mcb = sub->msgCb; - void *mcbClosure = sub->msgCbClosure; - uint64_t delivered; - uint64_t max; - natsMsg *msg; - int64_t timeout; - natsStatus s = NATS_OK; - bool draining = false; - bool rmSub = false; - natsOnCompleteCB onCompleteCB = NULL; - void *onCompleteCBClosure = NULL; - char *fcReply = NULL; - jsSub *jsi = NULL; - - // This just serves as a barrier for the creation of this thread. - natsConn_Lock(nc); - natsConn_Unlock(nc); - - natsSub_Lock(sub); - timeout = sub->timeout; - jsi = sub->jsi; - natsSub_Unlock(sub); - - while (true) - { - natsSub_Lock(sub); - - s = NATS_OK; - while (((msg = sub->ownDispatcher.queue.head) == NULL) && !(sub->closed) && !(sub->draining) && (s != NATS_TIMEOUT)) - { - if (timeout != 0) - s = natsCondition_TimedWait(sub->ownDispatcher.cond, sub->mu, timeout); - else - natsCondition_Wait(sub->ownDispatcher.cond, sub->mu); - } - - if (sub->closed) - { - natsSub_Unlock(sub); - break; - } - draining = sub->draining; - - // Will happen with timeout subscription - if (msg == NULL) - { - natsSub_Unlock(sub); - if (draining) - { - rmSub = true; - break; - } - // If subscription timed-out, invoke callback with NULL message. - if (s == NATS_TIMEOUT) - (*mcb)(nc, sub, NULL, mcbClosure); - continue; - } - - delivered = ++(sub->delivered); - - sub->ownDispatcher.queue.head = msg->next; - - if (sub->ownDispatcher.queue.tail == msg) - sub->ownDispatcher.queue.tail = NULL; - - sub->ownDispatcher.queue.msgs--; - sub->ownDispatcher.queue.bytes -= natsMsg_dataAndHdrLen(msg); - - msg->next = NULL; - - // Capture this under lock. - max = sub->max; - - // Check for JS flow control - fcReply = (jsi == NULL ? NULL : jsSub_checkForFlowControlResponse(sub)); - - natsSub_Unlock(sub); - - if ((max == 0) || (delivered <= max)) - { - (*mcb)(nc, sub, msg, mcbClosure); - } - else - { - // We need to destroy the message since the user can't do it - natsMsg_Destroy(msg); - } - - if (fcReply != NULL) - { - natsConnection_Publish(nc, fcReply, NULL, 0); - NATS_FREE(fcReply); - } - - // Don't do 'else' because we need to remove when we have hit - // the max (after the callback returns). - if ((max > 0) && (delivered >= max)) - { - // If we have hit the max for delivered msgs, remove sub. - rmSub = true; - break; - } - } - - natsSub_Lock(sub); - onCompleteCB = sub->onCompleteCB; - onCompleteCBClosure = sub->onCompleteCBClosure; - _setDrainCompleteState(sub); - natsSub_Unlock(sub); - - if (rmSub) - natsConn_removeSubscription(nc, sub); - - if (onCompleteCB != NULL) - (*onCompleteCB)(onCompleteCBClosure); - - natsSub_release(sub); -} - // Should be called only during the subscription creation process, no need to lock static inline natsStatus _runOwnDispatcher(natsSubscription *sub, bool forReplies) { natsStatus s = NATS_OK; if (sub->ownDispatcher.thread != NULL) - return nats_setDefaultError(NATS_ILLEGAL_STATE); // already running + return NATS_ILLEGAL_STATE; // already running sub->dispatcher = &sub->ownDispatcher; - s = natsThread_Create(&sub->ownDispatcher.thread, natsSub_deliverMsgs, (void *) sub); - return NATS_UPDATE_ERR_STACK(s); + s = natsThread_Create(&sub->ownDispatcher.thread, nats_dispatchThreadOwn, (void *) sub); + return s; } bool natsSub_setMax(natsSubscription *sub, uint64_t max) @@ -378,8 +254,13 @@ void natsSub_close(natsSubscription *sub, bool connectionClosed) sub->closed = true; sub->connClosed = connectionClosed; - if ((sub->jsi != NULL) && (sub->jsi->hbTimer != NULL)) - natsTimer_Stop(sub->jsi->hbTimer); + if (sub->jsi != NULL) + { + if (sub->jsi->hbTimer != NULL) + natsTimer_Stop(sub->jsi->hbTimer); + if ((sub->jsi->fetch != NULL) && (sub->jsi->fetch->expiresTimer != NULL)) + natsTimer_Stop(sub->jsi->fetch->expiresTimer); + } // If this is a subscription with timeout, stop the timer. if (sub->timeout != 0) @@ -451,8 +332,8 @@ natsStatus nats_createControlMessages(natsSubscription *sub) IFOK(s, _createControlMessage(&(sub->control->sub.timeout), sub)); IFOK(s, _createControlMessage(&sub->control->sub.close, sub)); IFOK(s, _createControlMessage(&sub->control->sub.drain, sub)); - IFOK(s, _createControlMessage(&sub->control->batch.expired, sub)); - IFOK(s, _createControlMessage(&sub->control->batch.missedHeartbeat, sub)); + IFOK(s, _createControlMessage(&sub->control->fetch.expired, sub)); + IFOK(s, _createControlMessage(&sub->control->fetch.missedHeartbeat, sub)); // no need to free on failure, sub's free will clean it up. return NATS_UPDATE_ERR_STACK(s); @@ -863,10 +744,12 @@ _unsubscribe(natsSubscription *sub, int max, bool drainMode, int64_t timeout) nc = sub->conn; _retain(sub); - if ((jsi = sub->jsi) != NULL) + if ((max == 0) && (jsi = sub->jsi) != NULL) { if (jsi->hbTimer != NULL) natsTimer_Stop(jsi->hbTimer); + if ((jsi->fetch != NULL) && (jsi->fetch->expiresTimer != NULL)) + natsTimer_Stop(jsi->fetch->expiresTimer); dc = jsi->dc; } @@ -1213,8 +1096,8 @@ natsSubscription_GetSubject(natsSubscription *sub) return subject; } -// This works for both shared and dedicated dispatchers since we maintain the -// per-sub stats. +// This works for both shared and own dispatchers since we maintain the per-sub +// stats. natsStatus natsSubscription_GetPending(natsSubscription *sub, int *msgs, int *bytes) { diff --git a/test/list_test.txt b/test/list_test.txt index 8a8e82926..04e9e930d 100644 --- a/test/list_test.txt +++ b/test/list_test.txt @@ -102,7 +102,13 @@ _test(JetStreamSubscribeConfigCheck) _test(JetStreamSubscribeFlowControl) _test(JetStreamSubscribeHeadersOnly) _test(JetStreamSubscribeIdleHearbeat) +_test(JetStreamSubscribePull_Reconnect) _test(JetStreamSubscribePull) +_test(JetStreamSubscribePullAsync_Disconnect) +_test(JetStreamSubscribePullAsync_MissedHB) +_test(JetStreamSubscribePullAsync_Reconnect) +_test(JetStreamSubscribePullAsync_Unsubscribe) +_test(JetStreamSubscribePullAsync) _test(JetStreamSubscribeSync) _test(JetStreamSubscribeWithFWC) _test(JetStreamUnmarshalAccountInfo) diff --git a/test/test.c b/test/test.c index 11c850171..d2bd91666 100644 --- a/test/test.c +++ b/test/test.c @@ -86,8 +86,8 @@ static const char *natsStreamingServerExe = "nats-streaming-server"; natsMutex *slMu = NULL; natsHash *slMap = NULL; -#define test(s) { printf("#%02d ", ++tests); printf("%s\n", (s)); fflush(stdout); } -#define testf(s, ...) { printf("#%02d ", ++tests); printf((s "\n"), __VA_ARGS__); fflush(stdout); } +#define test(s) { printf("#%02d ", ++tests); printf("%s", (s)); fflush(stdout); } +#define testf(s, ...) { printf("#%02d ", ++tests); printf((s), __VA_ARGS__); fflush(stdout); } #ifdef _WIN32 #define testCond(c) if(c) { printf("PASSED\n"); fflush(stdout); } else { printf("FAILED\n"); nats_PrintLastErrorStack(stdout); fflush(stdout); failed=true; return; } @@ -124,6 +124,7 @@ struct threadArg int timerStopped; natsStrHash *inboxes; natsStatus status; + char lastErrorBuf[256]; const char* string; int N; bool connected; @@ -7699,7 +7700,7 @@ static int _numRunningThreads(natsDispatcherPool *pool) { int i, n; for (n = 0, i = 0; i < pool->cap; i++) - n += (pool->dispatchers[i].thread != NULL); + n += (pool->dispatchers[i] != NULL); return n; } @@ -7726,7 +7727,7 @@ void test_AssignSubToDispatch(void) char pubSubj[32]; int i=0, n=0; - const int numMsgs = 5; + const int numMsgs = 100; typedef struct { @@ -7849,7 +7850,7 @@ void test_AssignSubToDispatch(void) for (i = 0; (s == NATS_OK) && (i < tc->numSubs); i++) { natsSub_Lock(subs[i]); - if (subs[i]->dispatcher != &pool->dispatchers[i % tc->expectedDispatchers]) + if (subs[i]->dispatcher != pool->dispatchers[i % tc->expectedDispatchers]) s = NATS_ERR; natsSub_Unlock(subs[i]); } @@ -22727,7 +22728,7 @@ if (!serverVersionAtLeast((major), (minor), (update))) \ return; \ } -#define JS_SETUP(major, minor, update) \ +#define JS_SETUP_WITH_OPTS(major, minor, update, opts) \ natsConnection *nc = NULL; \ jsCtx *js = NULL; \ natsPid pid = NATS_INVALID_PID; \ @@ -22744,13 +22745,15 @@ CHECK_SERVER_STARTED(pid); \ testCond(true); \ \ test("Connect: "); \ -s = natsConnection_ConnectTo(&nc, NATS_DEFAULT_URL); \ +s = natsConnection_Connect(&nc, opts); \ testCond(s == NATS_OK); \ \ test("Get context: "); \ s = natsConnection_JetStream(&js, nc, NULL); \ testCond(s == NATS_OK); +#define JS_SETUP(major, minor, update) JS_SETUP_WITH_OPTS((major), (minor), (update), NULL) + #define JS_TEARDOWN \ jsCtx_Destroy(js); \ natsConnection_Destroy(nc); \ @@ -28541,7 +28544,7 @@ void test_JetStreamSubscribePull(void) test("Max waiting error: "); s = natsSubscription_Fetch(&list, sub, 1, 1000, &jerr); - testCond((s == NATS_ERR) && (strstr(nats_GetLastError(NULL), "Exceeded") != NULL)); + testCond((s == NATS_LIMIT_REACHED) && (strstr(nats_GetLastError(NULL), "Exceeded") != NULL)); nats_clearLastError(); natsSubscription_Destroy(sub2); @@ -28684,7 +28687,7 @@ void test_JetStreamSubscribePull(void) fr.MaxBytes = 2048; fr.Expires = NATS_SECONDS_TO_NANOS(1); s = natsSubscription_FetchRequest(&list, sub, &fr); - testCond((s == NATS_ERR) && (list.Count == 0) && (list.Msgs == NULL) + testCond((s == NATS_LIMIT_REACHED) && (list.Count == 0) && (list.Msgs == NULL) && (strstr(nats_GetLastError(NULL), "Exceeded MaxRequestMaxBytes") != NULL)); nats_clearLastError(); @@ -28778,6 +28781,794 @@ void test_JetStreamSubscribePull(void) _destroyDefaultThreadArgs(&args); } +static void +_jsPubReconnectThread(void *closure) +{ + jsCtx *js = (jsCtx*) closure; + + nats_Sleep(5); + js_Publish(NULL, js, "foo", "hello", 5, NULL, NULL); + natsConnection_Flush(js->nc); + + natsConnection_Reconnect(js->nc); + nats_Sleep(500); + + js_Publish(NULL, js, "foo", "hell1", 5, NULL, NULL); + natsConnection_Flush(js->nc); +} + +void test_JetStreamSubscribePull_Reconnect(void) +{ + natsStatus s; + JS_SETUP(2, 9, 2); + + test("Create stream: "); + jsErrCode jerr = 0; + jsStreamConfig sc; + jsStreamConfig_Init(&sc); + sc.Name = "TEST"; + sc.Subjects = (const char *[1]){"foo"}; + sc.SubjectsLen = 1; + s = js_AddStream(NULL, js, &sc, NULL, &jerr); + testCond((s == NATS_OK) && (jerr == 0)); + + test("Subscribe: "); + natsSubscription *sub = NULL; + s = js_PullSubscribe(&sub, js, "foo", "dur", NULL, NULL, &jerr); + testCond(s == NATS_OK); + + test("Start thread to send: "); + natsThread *t = NULL; + s = natsThread_Create(&t, _jsPubReconnectThread, (void*) js); + testCond(s == NATS_OK); + + test("Fetch request succeeds over a reconnect: "); + jsFetchRequest fr = { + .Batch = 2, + .Expires = NATS_SECONDS_TO_NANOS(10), + }; + natsMsgList list; + s = natsSubscription_FetchRequest(&list, sub, &fr); + testCond((s == NATS_OK) && (list.Count == 2)); + natsMsgList_Destroy(&list); + + natsThread_Join(t); + natsThread_Destroy(t); + natsSubscription_Destroy(sub); + JS_TEARDOWN; +} + +static void +_recvPullAsync(natsConnection *nc, natsSubscription *sub, natsMsg *msg, + void *closure) +{ + struct threadArg *arg = (struct threadArg *)closure; + + natsMutex_Lock(arg->m); + arg->sum++; + + switch (arg->control) + { + case 1: + arg->msgReceived = true; + natsMsg_Ack(msg, NULL); + natsCondition_Signal(arg->c); + break; + + case 2: + arg->msgReceived = true; + natsCondition_Signal(arg->c); + break; + + case 3: + arg->msgReceived = true; + natsCondition_Signal(arg->c); + natsCondition_Wait(arg->c, arg->m); + break; + } + + natsMsg_Destroy(msg); + natsMutex_Unlock(arg->m); +} + +static void +_completePullAsync(natsConnection *nc, natsSubscription *sub, natsStatus exitStatus, + void *closure) +{ + struct threadArg *arg = (struct threadArg *)closure; + natsMutex_Lock(arg->m); + arg->closed = true; + arg->status = exitStatus; + const char *le = nats_GetLastError(NULL); + if (le != NULL) + strncpy(arg->lastErrorBuf, le, sizeof(arg->lastErrorBuf)-1); + natsCondition_Broadcast(arg->c); + natsMutex_Unlock(arg->m); +} + +static bool +_testBatchCompleted(struct threadArg *args, natsSubscription *sub, natsStatus expectedStatus, int expectedMsgs, bool orFewer) +{ + natsStatus s = NATS_OK; + natsMutex_Lock(args->m); + while ((s != NATS_TIMEOUT) && !args->closed) + s = natsCondition_TimedWait(args->c, args->m, 1000); // 1 second should always be enough + + bool result = ((s == NATS_OK) && + args->closed && + (args->status == expectedStatus)); + + if (orFewer) + result = result && (args->sum <= expectedMsgs); + else + result = result && (args->sum == expectedMsgs); + natsMutex_Unlock(args->m); + + // We may get called before the delivery thread terminates the sub, this + // yields and avoids the race for the purpose of the test. + nats_Sleep(1); + + result = result && !natsSubscription_IsValid(sub); + if (!result) + { + if (s != NATS_OK) + printf("FAILED: timed out: %d\n", s); + if (!args->closed) + printf("FAILED: onComplete has not been called\n"); + if (args->status != expectedStatus) + printf("FAILED: status: %d, expected: %d\n", args->status, expectedStatus); + if (orFewer) + { + if (args->sum > expectedMsgs) + printf("FAILED: msgs: %d, expected at most: %d\n", args->sum, expectedMsgs); + } + else + { + if (args->sum != expectedMsgs) + printf("FAILED: msgs: %d, expected: %d\n", args->sum, expectedMsgs); + } + if (natsSubscription_IsValid(sub)) + printf("FAILED: subscription is still valid\n"); + } + return result; +} + +void test_JetStreamSubscribePullAsync(void) +{ + natsStatus s; + natsSubscription *sub = NULL; + natsMsg *msg = NULL; + jsErrCode jerr = 0; + natsMsgList list; + jsStreamConfig sc; + jsOptions jsOpts; + jsSubOptions so; + struct threadArg args; + jsConsumerConfig cc; + + JS_SETUP(2, 9, 2); + + s = _createDefaultThreadArgsForCbTests(&args); + if (s != NATS_OK) + FAIL("Unable to setup test"); + + test("Create stream: "); + jsStreamConfig_Init(&sc); + sc.Name = "TEST"; + sc.Subjects = (const char *[1]){"foo"}; + sc.SubjectsLen = 1; + s = js_AddStream(NULL, js, &sc, NULL, &jerr); + testCond((s == NATS_OK) && (jerr == 0)); + + // TEST various error conditions. + + test("Create pull sub async (invalid args): "); + s = js_PullSubscribeAsync(NULL, js, "foo", "dur", _recvPullAsync, &args, NULL, NULL, &jerr); + if (s == NATS_INVALID_ARG) + s = js_PullSubscribeAsync(&sub, NULL, "foo", "dur", _recvPullAsync, &args, NULL, NULL, &jerr); + if (s == NATS_INVALID_ARG) + s = js_PullSubscribeAsync(&sub, js, NULL, "dur", _recvPullAsync, &args, NULL, NULL, &jerr); + if (s == NATS_INVALID_ARG) + s = js_PullSubscribeAsync(NULL, js, "", "dur", _recvPullAsync, &args, NULL, NULL, &jerr); + if (s == NATS_INVALID_ARG) + s = js_PullSubscribeAsync(&sub, js, "foo", "dur", NULL, &args, NULL, NULL, &jerr); + testCond((s == NATS_INVALID_ARG) && (sub == NULL) && (jerr == 0)); + nats_clearLastError(); + + test("AckNone ok: "); + jsSubOptions_Init(&so); + so.Config.AckPolicy = js_AckNone; + s = js_PullSubscribeAsync(&sub, js, "foo", "ackNone", _recvPullAsync, &args, NULL, &so, &jerr); + testCond((s == NATS_OK) && (sub != NULL) && (jerr == 0)); + natsSubscription_Unsubscribe(sub); + natsSubscription_Destroy(sub); + sub = NULL; + + test("AckAll ok: "); + jsSubOptions_Init(&so); + so.Config.AckPolicy = js_AckAll; + s = js_PullSubscribeAsync(&sub, js, "foo", "ackAll", _recvPullAsync, &args, NULL, &so, &jerr); + testCond((s == NATS_OK) && (sub != NULL) && (jerr == 0)); + natsSubscription_Unsubscribe(sub); + natsSubscription_Destroy(sub); + sub = NULL; + + test("Create push consumer: "); + jsConsumerConfig_Init(&cc); + cc.Durable = "push_dur"; + cc.DeliverSubject = "push.deliver"; + s = js_AddConsumer(NULL, js, "TEST", &cc, NULL, &jerr); + testCond((s == NATS_OK) && (jerr == 0)); + + test("Try create pull sub from push consumer: "); + s = js_PullSubscribeAsync(&sub, js, "foo", "push_dur", _recvPullAsync, &args, NULL, &so, &jerr); + testCond((s == NATS_ERR) && (sub == NULL) && (jerr == 0) && (strstr(nats_GetLastError(NULL), jsErrPullSubscribeToPushConsumer) != NULL)); + nats_clearLastError(); + + test("Create pull bound failure: "); + jsSubOptions_Init(&so); + so.Stream = "TEST"; + so.Consumer = "bar"; + s = js_PullSubscribeAsync(&sub, js, "foo", "bar", _recvPullAsync, &args, NULL, &so, &jerr); + testCond((s == NATS_NOT_FOUND) && (sub == NULL) && (jerr == JSConsumerNotFoundErr)); + nats_clearLastError(); + + // TEST real subscription, auto-ack. + + test("js_PullAsyncSubscribe for real, manual ack: "); + // Signal as soon as we get the first message. + args.control = 2; // don't ack + + jsSubOptions_Init(&so); + so.Config.MaxAckPending = 10; + so.Config.AckWait = NATS_MILLIS_TO_NANOS(300); + so.ManualAck = true; + s = js_PullSubscribeAsync(&sub, js, "foo", "dur", _recvPullAsync, &args, NULL, &so, &jerr); + testCond((s == NATS_OK) && (sub != NULL) && (jerr == 0)); + + test("Can't call NextMsg: "); + s = natsSubscription_NextMsg(&msg, sub, 1000); + testCond((s == NATS_ILLEGAL_STATE) && (msg == NULL)); + nats_clearLastError(); + + test("Can't call Fetch: "); + s = natsSubscription_Fetch(&list, sub, 1, 500, &jerr); + testCond((s == NATS_ERR) && (msg == NULL) && (strstr(nats_GetLastError(NULL), jsErrConcurrentFetchNotAllowed) != NULL)); + nats_clearLastError(); + + int noMessageTimeout = 80; + int messageArrivesImmediatelyTimeout = 40; + int ackTimeout = (int)(so.Config.AckWait / 1E6) + 100; + testf("No messages yet, timeout in %dms: ", noMessageTimeout); + natsMutex_Lock(args.m); + while ((s != NATS_TIMEOUT) && !args.msgReceived) + s = natsCondition_TimedWait(args.c, args.m, noMessageTimeout); + testCond(s == NATS_TIMEOUT); + natsMutex_Unlock(args.m); + + test("Send a message: "); + s = js_Publish(NULL, js, "foo", "hello", 5, NULL, &jerr); + testCond((s == NATS_OK) && (jerr == 0)); + + testf("Arrives in under %dms: ", messageArrivesImmediatelyTimeout); + natsMutex_Lock(args.m); + while ((s != NATS_TIMEOUT) && !args.msgReceived) + s = natsCondition_TimedWait(args.c, args.m, messageArrivesImmediatelyTimeout); + testCond((s == NATS_OK) && args.msgReceived); + args.msgReceived = false; + natsMutex_Unlock(args.m); + + testf("No more messages yet, timeout in %dms: ", noMessageTimeout); + natsMutex_Lock(args.m); + while ((s != NATS_TIMEOUT) && !args.msgReceived) + s = natsCondition_TimedWait(args.c, args.m, noMessageTimeout); + testCond(s == NATS_TIMEOUT); + s = NATS_OK; + args.msgReceived = false; + args.control = 1; // ack next + natsMutex_Unlock(args.m); + + testf("Wait another so.Config.AckWait+100 = %dms and see that the message is re-delivered, ACK it: ", ackTimeout); + natsMutex_Lock(args.m); + while ((s != NATS_TIMEOUT) && !args.msgReceived) + s = natsCondition_TimedWait(args.c, args.m, ackTimeout); + testCond(s == NATS_OK); + args.msgReceived = false; + natsMutex_Unlock(args.m); + + testf("Wait another so.Config.AckWait+100 = %dms and see that the message is not re-delivered: ", ackTimeout); + natsMutex_Lock(args.m); + while ((s != NATS_TIMEOUT) && !args.msgReceived) + s = natsCondition_TimedWait(args.c, args.m, ackTimeout); + testCond((s == NATS_TIMEOUT) && !args.msgReceived); + s = NATS_OK; + natsMutex_Unlock(args.m); + + test("Receive msg with header and no data comes through: "); + s = natsMsg_create(&msg, sub->subject, (int)strlen(sub->subject), NULL, 0, + "NATS/1.0\r\nk:v\r\n\r\n", 17, 17); + IFOK(s, natsConnection_PublishMsg(nc, msg)); + natsMsg_Destroy(msg); + + natsMutex_Lock(args.m); + while ((s != NATS_TIMEOUT) && !args.msgReceived) + s = natsCondition_TimedWait(args.c, args.m, messageArrivesImmediatelyTimeout); + testCond(s == NATS_OK); + args.msgReceived = false; + natsMutex_Unlock(args.m); + + natsSubscription_Destroy(sub); + sub = NULL; + + // TEST exit criteria. + typedef struct + { + const char *name; + bool noWait; + int want; + int expires; // ms + int maxBytes; + int before; + int during; + int fetchSize; + natsStatus expectedStatus; + int expectedN; + bool orFewer; + } nowaitTC_t; + nowaitTC_t nowaitTCs[] = { + { + .name = "single fetch NOWAIT, partially fulfilled NATS_TIMEOUT", + .noWait = true, + .want = 1000, + .before = 2, + .fetchSize = 13, + .expectedStatus = NATS_TIMEOUT, + .expectedN = 2, + }, + { + .name = "multi-fetch NOWAIT, partially fulfilled NATS_TIMEOUT", + .noWait = true, + .want = 1000, + .before = 117, + .fetchSize = 20, // 117 is not divisible by 20 + .expectedStatus = NATS_TIMEOUT, + .expectedN = 117, + }, + { + .name = "fetch NOWAIT can get a NATS_NOT_FOUND immediately", + .noWait = true, + .want = 1000, + .before = 0, + .fetchSize = 5, + .expectedStatus = NATS_NOT_FOUND, // on the second request, since there are no messages waiting. + .expectedN = 0, + }, + { + .name = "fetch NOWAIT can get a NATS_NOT_FOUND on Nth request", + .noWait = true, + .want = 1000, + .before = 10, + .fetchSize = 5, + .expectedStatus = NATS_NOT_FOUND, // on the second request, since there are no messages waiting. + .expectedN = 10, + }, + { + .name = "single fetch NOWAIT, fulfilled msgs NATS_MAX_DELIVERED_MSGS", + .noWait = true, + .want = 2, + .before = 20, + .fetchSize = 13, + .expectedStatus = NATS_MAX_DELIVERED_MSGS, + .expectedN = 2, + }, + { + .name = "multi-fetch NOWAIT, fulfilled msgs NATS_MAX_DELIVERED_MSGS", + .noWait = true, + .want = 100, + .before = 117, + .fetchSize = 13, + .expectedStatus = NATS_MAX_DELIVERED_MSGS, + .expectedN = 100, + }, + { + .name = "single fetch, fulfilled msgs NATS_MAX_DELIVERED_MSGS", + .want = 2, + .before = 20, + .fetchSize = 13, + .expectedStatus = NATS_MAX_DELIVERED_MSGS, + .expectedN = 2, + }, + { + .name = "multi-fetch WAIT, fulfilled msgs NATS_MAX_DELIVERED_MSGS", + .want = 100, + .before = 117, + .fetchSize = 13, + .expectedStatus = NATS_MAX_DELIVERED_MSGS, + .expectedN = 100, + }, + { + .name = "MaxBytes", + .want = 1000, + .maxBytes = 100, + .before = 20, + .expectedStatus = NATS_LIMIT_REACHED, + .expectedN = 1, + }, + { + .name = "Fetch with expiration is fulfilled NATS_MAX_DELIVERED_MSGS", + .want = 30, + .expires = 100, + .before = 20, + .during = 10, + .expectedStatus = NATS_MAX_DELIVERED_MSGS, + .expectedN = 30, + }, + { + .name = "Fetch with a short expiration is partially fulfilled NATS_TIMEOUT", + .fetchSize = 11, // just to slow things down + .want = 200, + .expires = 5, + .during = 200, + .expectedStatus = NATS_TIMEOUT, + .expectedN = 200, + .orFewer = true, + }, + { + .name = NULL, + }, + }; + for (nowaitTC_t *tc = nowaitTCs; tc->name != NULL; tc++) + { + for (int i = 0; (s == NATS_OK) && (i < tc->before); i++) + s = js_Publish(NULL, js, "foo", "hello", 5, NULL, &jerr); + if (s != NATS_OK) + FAIL("Failed to publish, unusual"); + + natsMutex_Lock(args.m); + args.control = 0; // don't ack, will be auto-ack + args.status = NATS_OK; // batch exit status will be here + args.msgReceived = false; + args.closed = false; + args.sum = 0; + natsMutex_Unlock(args.m); + + jsSubOptions_Init(&so); + so.Config.MaxAckPending = 10; + so.Config.AckWait = NATS_MILLIS_TO_NANOS(300); + so.Config.MaxRequestMaxBytes = 777; + so.ManualAck = false; + + jsOptions_Init(&jsOpts); + jsOpts.PullSubscribeAsync.CompleteHandler = _completePullAsync; + jsOpts.PullSubscribeAsync.CompleteHandlerClosure = &args; + jsOpts.PullSubscribeAsync.FetchSize = tc->fetchSize; + jsOpts.PullSubscribeAsync.MaxMessages = tc->want; + jsOpts.PullSubscribeAsync.MaxBytes = tc->maxBytes; + jsOpts.PullSubscribeAsync.NoWait = tc->noWait; + jsOpts.PullSubscribeAsync.Timeout = tc->expires; + s = js_PullSubscribeAsync(&sub, js, "foo", "dur", _recvPullAsync, &args, &jsOpts, &so, &jerr); + if ((s != NATS_OK) && (sub == NULL) && (jerr != 0)) + FAIL("Failed to create pull subscription, unusual"); + + for (int i = 0; (s == NATS_OK) && (i < tc->during); i++) + s = js_Publish(NULL, js, "foo", "hello", 5, NULL, &jerr); + if (s != NATS_OK) + FAIL("Failed to publish, unusual"); + + testf("%s: ", tc->name); + testCond(_testBatchCompleted( + &args, sub, tc->expectedStatus, tc->expectedN, tc->orFewer)); + + natsSubscription_Destroy(sub); + sub = NULL; + } + + JS_TEARDOWN; + _destroyDefaultThreadArgs(&args); +} + +void test_JetStreamSubscribePullAsync_MissedHB(void) +{ + natsStatus s; + natsSubscription *sub = NULL; + jsErrCode jerr = 0; + jsStreamConfig sc; + jsOptions jsOpts; + struct threadArg args; + + JS_SETUP(2, 9, 2); + + s = _createDefaultThreadArgsForCbTests(&args); + if (s != NATS_OK) + FAIL("Unable to setup test"); + + test("Create stream: "); + jsStreamConfig_Init(&sc); + sc.Name = "TEST"; + sc.Subjects = (const char *[1]){"foo"}; + sc.SubjectsLen = 1; + s = js_AddStream(NULL, js, &sc, NULL, &jerr); + testCond((s == NATS_OK) && (jerr == 0)); + + test("Check invalid heartbeat : "); + natsMutex_Lock(args.m); + args.control = 0; // don't ack, will be auto-ack + args.status = NATS_OK; // batch exit status will be here + args.msgReceived = false; + args.closed = false; + args.sum = 0; + natsMutex_Unlock(args.m); + + // Heartbeat too large for the timeout + jsOptions_Init(&jsOpts); + jsOpts.PullSubscribeAsync.CompleteHandler = _completePullAsync; + jsOpts.PullSubscribeAsync.CompleteHandlerClosure = &args; + jsOpts.PullSubscribeAsync.MaxMessages = 100; + jsOpts.PullSubscribeAsync.Timeout = 100; + jsOpts.PullSubscribeAsync.Heartbeat = 200; + + s = js_PullSubscribeAsync(&sub, js, "foo", "dur", _recvPullAsync, &args, &jsOpts, NULL, &jerr); + testCond((s == NATS_OK) && _testBatchCompleted(&args, sub, NATS_ERR, 0, false)); + + test("Check the error to be 'heartbeat value too large': "); + natsMutex_Lock(args.m); + testCond(strstr(args.lastErrorBuf, "heartbeat value too large") != NULL); + natsMutex_Unlock(args.m); + natsSubscription_Destroy(sub); + sub = NULL; + + test("Subscribe with 50ms idle hearbeat: "); + natsMutex_Lock(args.m); + args.control = 0; // don't ack, will be auto-ack + args.status = NATS_OK; // batch exit status will be here + args.msgReceived = false; + args.closed = false; + args.sum = 0; + natsMutex_Unlock(args.m); + + // Let's make it wait for 20 seconds, and have HBs every 50ms + jsOptions_Init(&jsOpts); + jsOpts.PullSubscribeAsync.CompleteHandler = _completePullAsync; + jsOpts.PullSubscribeAsync.CompleteHandlerClosure = &args; + jsOpts.PullSubscribeAsync.MaxMessages = 100; + jsOpts.PullSubscribeAsync.Timeout = 20 * 1000; + jsOpts.PullSubscribeAsync.Heartbeat = 50; + + s = js_PullSubscribeAsync(&sub, js, "foo", "dur", _recvPullAsync, &args, &jsOpts, NULL, &jerr); + testCond(s == NATS_OK); + + test("Subscription is active after 2 heartbeats: "); + nats_Sleep(100); + natsMutex_Lock(args.m); + testCond(!args.closed) + natsMutex_Unlock(args.m); + + // Set a message filter that will drop subsequent server's heartbeat + // messages. + test("Drop heartbeats and see the sub terminate: "); + natsConn_setFilter(nc, _dropIdleHBs); + testCond((s == NATS_OK) && + _testBatchCompleted(&args, sub,NATS_MISSED_HEARTBEAT, 0, false)); + + natsSubscription_Destroy(sub); + JS_TEARDOWN; + _destroyDefaultThreadArgs(&args); +} + +void test_JetStreamSubscribePullAsync_Unsubscribe(void) +{ + natsStatus s; + natsSubscription *sub = NULL; + jsErrCode jerr = 0; + jsStreamConfig sc; + jsOptions jsOpts; + struct threadArg args; + + JS_SETUP(2, 9, 2); + + s = _createDefaultThreadArgsForCbTests(&args); + if (s != NATS_OK) + FAIL("Unable to setup test"); + + test("Create stream: "); + jsStreamConfig_Init(&sc); + sc.Name = "TEST"; + sc.Subjects = (const char *[1]){"foo"}; + sc.SubjectsLen = 1; + s = js_AddStream(NULL, js, &sc, NULL, &jerr); + testCond((s == NATS_OK) && (jerr == 0)); + + test("Publish a few messages: "); + s = js_Publish(NULL, js, "foo", "hell0", 5, NULL, &jerr); + if (s == NATS_OK) + s = js_Publish(NULL, js, "foo", "hell1", 5, NULL, &jerr); + if (s == NATS_OK) + s = js_Publish(NULL, js, "foo", "hell2", 5, NULL, &jerr); + testCond(s == NATS_OK); + + test("Subscribe: "); + natsMutex_Lock(args.m); + args.control = 3; // wait for signal back + args.status = NATS_OK; // batch exit status will be here + args.msgReceived = false; + args.closed = false; + args.sum = 0; + natsMutex_Unlock(args.m); + + jsOptions_Init(&jsOpts); + jsOpts.PullSubscribeAsync.CompleteHandler = _completePullAsync; + jsOpts.PullSubscribeAsync.CompleteHandlerClosure = &args; + jsOpts.PullSubscribeAsync.MaxMessages = 100; + jsOpts.PullSubscribeAsync.Timeout = 1000; + + s = js_PullSubscribeAsync(&sub, js, "foo", "dur", _recvPullAsync, &args, &jsOpts, NULL, &jerr); + testCond(s == NATS_OK); + + test("Receive 1 message: "); + natsMutex_Lock(args.m); + while ((s != NATS_TIMEOUT) && !args.msgReceived) + s = natsCondition_TimedWait(args.c, args.m, 100); + testCond((s == NATS_OK) && args.msgReceived && (args.closed == false)); + args.msgReceived = false; + + test("Unsubscribe: "); + s = natsSubscription_Unsubscribe(sub); + testCond(s == NATS_OK); + natsCondition_Signal(args.c); // release the callback + natsMutex_Unlock(args.m); + + testCond((s == NATS_OK) && + _testBatchCompleted(&args, sub, NATS_OK, 1, false)); + + natsSubscription_Destroy(sub); + sub = NULL; + JS_TEARDOWN; + _destroyDefaultThreadArgs(&args); +} + +void test_JetStreamSubscribePullAsync_Reconnect(void) +{ + natsStatus s; + JS_SETUP(2, 9, 2); + + test("Create stream: "); + jsErrCode jerr = 0; + jsStreamConfig sc; + jsStreamConfig_Init(&sc); + sc.Name = "TEST"; + sc.Subjects = (const char *[1]){"foo"}; + sc.SubjectsLen = 1; + s = js_AddStream(NULL, js, &sc, NULL, &jerr); + testCond((s == NATS_OK) && (jerr == 0)); + + test("Subscribe, expect 3 messsages, fetch size 2: "); + natsSubscription *sub = NULL; + jsOptions jsOpts; + struct threadArg args; + + s = _createDefaultThreadArgsForCbTests(&args); + if (s != NATS_OK) + FAIL("Unable to setup test"); + + natsMutex_Lock(args.m); + args.control = 2; // no ack + args.status = NATS_OK; // batch exit status will be here + natsMutex_Unlock(args.m); + + jsOptions_Init(&jsOpts); + jsOpts.PullSubscribeAsync.CompleteHandler = _completePullAsync; + jsOpts.PullSubscribeAsync.CompleteHandlerClosure = &args; + jsOpts.PullSubscribeAsync.FetchSize = 2; + jsOpts.PullSubscribeAsync.MaxMessages = 3; + jsOpts.PullSubscribeAsync.Timeout = 10 * 1000; + + s = js_PullSubscribeAsync(&sub, js, "foo", "dur", _recvPullAsync, &args, &jsOpts, NULL, &jerr); + testCond(s == NATS_OK); + + test("Send 1 message: "); + s = js_Publish(NULL, js, "foo", "hell0", 5, NULL, &jerr); + testCond(s == NATS_OK); + + test("Receive 1 message: "); + natsMutex_Lock(args.m); + while ((s != NATS_TIMEOUT) && !args.msgReceived) + s = natsCondition_TimedWait(args.c, args.m, 20); + testCond((s == NATS_OK) && args.msgReceived && (args.closed == false)); + args.msgReceived = false; + natsMutex_Unlock(args.m); + + test("Reconnect: "); + s = natsConnection_Flush(nc); + if (s == NATS_OK) + s = natsConnection_Reconnect(nc); + nats_Sleep(100); + testCond(s == NATS_OK); + + test("Send 2 more messages: "); + s = js_Publish(NULL, js, "foo", "hell1", 5, NULL, &jerr); + if (s == NATS_OK) + s = js_Publish(NULL, js, "foo", "hell2", 5, NULL, &jerr); + testCond(s == NATS_OK); + + test("Receive all expected messages: "); + testCond(_testBatchCompleted(&args, sub, NATS_MAX_DELIVERED_MSGS, 3, false)); + + natsSubscription_Destroy(sub); + JS_TEARDOWN; + _destroyDefaultThreadArgs(&args); +} + +void test_JetStreamSubscribePullAsync_Disconnect(void) +{ + natsStatus s; + natsOptions *opts = NULL; + s = natsOptions_Create(&opts); + if (s != NATS_OK) + FAIL("Unable to create options"); + natsOptions_SetAllowReconnect(opts, false); + JS_SETUP_WITH_OPTS(2, 9, 2, opts); + + test("Create stream: "); + jsErrCode jerr = 0; + jsStreamConfig sc; + jsStreamConfig_Init(&sc); + sc.Name = "TEST"; + sc.Subjects = (const char *[1]){"foo"}; + sc.SubjectsLen = 1; + s = js_AddStream(NULL, js, &sc, NULL, &jerr); + testCond((s == NATS_OK) && (jerr == 0)); + + test("Subscribe, expect 2 messsages, fetch size 2: "); + natsSubscription *sub = NULL; + jsOptions jsOpts; + struct threadArg args; + + s = _createDefaultThreadArgsForCbTests(&args); + if (s != NATS_OK) + FAIL("Unable to setup test"); + + natsMutex_Lock(args.m); + args.control = 2; // no ack + args.status = NATS_OK; // batch exit status will be here + natsMutex_Unlock(args.m); + + jsOptions_Init(&jsOpts); + jsOpts.PullSubscribeAsync.CompleteHandler = _completePullAsync; + jsOpts.PullSubscribeAsync.CompleteHandlerClosure = &args; + jsOpts.PullSubscribeAsync.FetchSize = 2; + jsOpts.PullSubscribeAsync.MaxMessages = 2; + jsOpts.PullSubscribeAsync.Timeout = 10 * 1000; + + s = js_PullSubscribeAsync(&sub, js, "foo", "dur", _recvPullAsync, &args, &jsOpts, NULL, &jerr); + testCond(s == NATS_OK); + + test("Send 1 message: "); + s = js_Publish(NULL, js, "foo", "hell0", 5, NULL, &jerr); + testCond(s == NATS_OK); + + test("Receive 1 message: "); + natsMutex_Lock(args.m); + while ((s != NATS_TIMEOUT) && !args.msgReceived) + s = natsCondition_TimedWait(args.c, args.m, 20); + testCond((s == NATS_OK) && args.msgReceived && (args.closed == false)); + args.msgReceived = false; + natsMutex_Unlock(args.m); + + test("Disconnect: "); + natsConn_Lock(nc); + natsSock_Shutdown(nc->sockCtx.fd); + natsConn_Unlock(nc); + testCond(s == NATS_OK); + + test("Check fetch completion, expect NATS_OK: "); + testCond(_testBatchCompleted(&args, sub, NATS_CONNECTION_CLOSED, 1, false)); + + natsSubscription_Destroy(sub); + JS_TEARDOWN; + _destroyDefaultThreadArgs(&args); + natsOptions_Destroy(opts); +} + void test_JetStreamSubscribeHeadersOnly(void) { natsStatus s; @@ -29944,14 +30735,14 @@ void test_JetStreamConvertDirectMsg(void) test("Bad request: "); s = natsMsg_Create(&msg, "inbox", NULL, NULL, 0); - IFOK(s, natsMsgHeader_Set(msg, STATUS_HDR, REQ_TIMEOUT)); + IFOK(s, natsMsgHeader_Set(msg, STATUS_HDR, HDR_STATUS_TIMEOUT_408)); IFOK(s, natsMsgHeader_Set(msg, DESCRIPTION_HDR, "Bad Request")); IFOK(s, js_directGetMsgToJSMsg("test", msg)); testCond((s == NATS_ERR) && (strstr(nats_GetLastError(NULL), "Bad Request") != NULL)); nats_clearLastError(); test("Not found: "); - s = natsMsgHeader_Set(msg, STATUS_HDR, NOT_FOUND_STATUS); + s = natsMsgHeader_Set(msg, STATUS_HDR, HDR_STATUS_NOT_FOUND_404); IFOK(s, natsMsgHeader_Set(msg, DESCRIPTION_HDR, "Message Not Found")); IFOK(s, js_directGetMsgToJSMsg("test", msg)); testCond((s == NATS_NOT_FOUND) && (strstr(nats_GetLastError(NULL), natsStatus_GetText(NATS_NOT_FOUND)) != NULL)); @@ -35773,7 +36564,7 @@ _subDlvThreadPooled(natsSubscription *sub) { bool pooled; natsSub_Lock(sub); - pooled = (sub->dispatcher->dedicatedTo == NULL); + pooled = (sub->dispatcher->ownedBy == NULL); natsSub_Unlock(sub); return pooled; } From 87d7472f38b23339d76aadae94f4106f3855eed6 Mon Sep 17 00:00:00 2001 From: Lev <1187448+levb@users.noreply.github.com> Date: Wed, 25 Sep 2024 12:04:27 -0700 Subject: [PATCH 04/16] [ADDED, BREAKING] Services: queue group now configurable and can be disabled (#800) * [ADDED, BREAKING] Services: queue group now configurable and can be disabled. * Fixed the test It appears possible that the client exits before the (own) dispatcher thread finishes and quits. Waiting until the "Done" callback is invoked fixes it. Also, since a sub is (often? always?) freed from the own dispatcher thread, use natsThread_Detach rather than natsThread_Join when cleaning up own dispatcher. * PR feedback --- examples/micro-arithmetics.c | 43 +++++---- examples/micro-func.c | 45 +++++----- src/micro.c | 167 ++++++++++++++++++++++++++--------- src/micro_endpoint.c | 49 +++++++--- src/micro_monitoring.c | 10 ++- src/microp.h | 10 ++- src/nats.h | 82 +++++++++++++++-- src/sub.c | 8 +- test/list_test.txt | 3 +- test/test.c | 127 ++++++++++++++++++++++++-- 10 files changed, 420 insertions(+), 124 deletions(-) diff --git a/examples/micro-arithmetics.c b/examples/micro-arithmetics.c index c3e8f5ee4..6bc94522a 100644 --- a/examples/micro-arithmetics.c +++ b/examples/micro-arithmetics.c @@ -88,24 +88,6 @@ int main(int argc, char **argv) microGroup *g = NULL; char errorbuf[1024]; - microServiceConfig cfg = { - .Description = "Arithmetic operations - NATS microservice example in C", - .Name = "c-arithmetics", - .Version = "1.0.0", - }; - microEndpointConfig add_cfg = { - .Name = "add", - .Handler = handle_add, - }; - microEndpointConfig divide_cfg = { - .Name = "divide", - .Handler = handle_divide, - }; - microEndpointConfig multiply_cfg = { - .Name = "multiply", - .Handler = handle_multiply, - }; - // Connect to NATS server opts = parseArgs(argc, argv, ""); s = natsConnection_Connect(&conn, opts); @@ -118,17 +100,34 @@ int main(int argc, char **argv) } // Create the Microservice that listens on nc. + microServiceConfig cfg = { + .Description = "Arithmetic operations - NATS microservice example in C", + .Name = "c-arithmetics", + .Version = "1.0.0", + }; err = micro_AddService(&m, conn, &cfg); // Add the endpoints for the functions. if (err == NULL) - microService_AddGroup(&g, m, "op"); + { + microGroupConfig groupConfig = { .Prefix = "op" }; + err = microService_AddGroup(&g, m, &groupConfig); + } if (err == NULL) - err = microGroup_AddEndpoint(g, &add_cfg); + { + microEndpointConfig addConfig = { .Name = "add", .Handler = handle_add }; + err = microGroup_AddEndpoint(g, &addConfig); + } if (err == NULL) - err = microGroup_AddEndpoint(g, &multiply_cfg); + { + microEndpointConfig multiplyConfig = { .Name = "multiply", .Handler = handle_multiply }; + err = microGroup_AddEndpoint(g, &multiplyConfig); + } if (err == NULL) - err = microGroup_AddEndpoint(g, ÷_cfg); + { + microEndpointConfig divideConfig = { .Name = "divide", .Handler = handle_divide }; + err = microGroup_AddEndpoint(g, ÷Config); + } // Run the service, until stopped. if (err == NULL) diff --git a/examples/micro-func.c b/examples/micro-func.c index dfac1dad6..97b899999 100644 --- a/examples/micro-func.c +++ b/examples/micro-func.c @@ -170,24 +170,6 @@ int main(int argc, char **argv) microGroup *g = NULL; char errorbuf[1024]; - microServiceConfig cfg = { - .Description = "Functions - NATS microservice example in C", - .Name = "c-functions", - .Version = "1.0.0", - }; - microEndpointConfig factorial_cfg = { - .Name = "factorial", - .Handler = handle_factorial, - }; - microEndpointConfig fibonacci_cfg = { - .Name = "fibonacci", - .Handler = handle_fibonacci, - }; - microEndpointConfig power2_cfg = { - .Name = "power2", - .Handler = handle_power2, - }; - // Connect to NATS server opts = parseArgs(argc, argv, ""); s = natsConnection_Connect(&conn, opts); @@ -200,17 +182,34 @@ int main(int argc, char **argv) } // Create the Microservice that listens on nc. - err = micro_AddService(&m, conn, &cfg); + microServiceConfig serviceConfig = { + .Description = "Functions - NATS microservice example in C", + .Name = "c-functions", + .Version = "1.0.0", + }; + err = micro_AddService(&m, conn, &serviceConfig); // Add the endpoints for the functions. if (err == NULL) - err = microService_AddGroup(&g, m, "f"); + { + microGroupConfig groupConfig = { .Prefix = "f" }; + err = microService_AddGroup(&g, m, &groupConfig); + } if (err == NULL) - err = microGroup_AddEndpoint(g, &factorial_cfg); + { + microEndpointConfig factorialConfig = { .Name = "factorial", .Handler = handle_factorial }; + err = microGroup_AddEndpoint(g, &factorialConfig); + } if (err == NULL) - err = microGroup_AddEndpoint(g, &fibonacci_cfg); + { + microEndpointConfig fibonacciConfig = { .Name = "fibonacci", .Handler = handle_fibonacci }; + err = microGroup_AddEndpoint(g, &fibonacciConfig); + } if (err == NULL) - err = microGroup_AddEndpoint(g, &power2_cfg); + { + microEndpointConfig power2Config = { .Name = "power2", .Handler = handle_power2 }; + err = microGroup_AddEndpoint(g, &power2Config); + } // Run the service, until stopped. if (err == NULL) diff --git a/src/micro.c b/src/micro.c index 86a572739..354824184 100644 --- a/src/micro.c +++ b/src/micro.c @@ -39,6 +39,8 @@ micro_AddService(microService **new_m, natsConnection *nc, microServiceConfig *c if ((new_m == NULL) || (nc == NULL) || (cfg == NULL) || !micro_is_valid_name(cfg->Name) || nats_IsStringEmpty(cfg->Version)) return micro_ErrorInvalidArg; + if ((cfg->QueueGroup != NULL) && nats_IsStringEmpty(cfg->QueueGroup)) + return micro_ErrorInvalidArg; // Make a microservice object, with a reference to a natsConnection. err = _new_service(&m, nc); @@ -68,7 +70,7 @@ micro_AddService(microService **new_m, natsConnection *nc, microServiceConfig *c } microError * -micro_add_endpoint(microEndpoint **new_ep, microService *m, const char *prefix, microEndpointConfig *cfg, bool is_internal) +micro_add_endpoint(microEndpoint **new_ep, microService *m, microGroup *g, microEndpointConfig *cfg, bool is_internal) { microError *err = NULL; microEndpoint *ptr = NULL; @@ -81,7 +83,7 @@ micro_add_endpoint(microEndpoint **new_ep, microService *m, const char *prefix, if (cfg == NULL) return NULL; - err = micro_new_endpoint(&ep, m, prefix, cfg, is_internal); + err = micro_new_endpoint(&ep, m, g, cfg, is_internal); if (err != NULL) return microError_Wrapf(err, "failed to create endpoint %s", cfg->Name); @@ -129,9 +131,9 @@ micro_add_endpoint(microEndpoint **new_ep, microService *m, const char *prefix, if (prev_ep != NULL) { - // Rid of the previous endpoint with the same name, if any. If this + // Rid of the previous endpoint with the same subject, if any. If this // fails we can return the error, leave the newly added endpoint in the - // list, not started. A retry with the same name will clean it up. + // list, not started. A retry with the same subject will clean it up. if (err = micro_stop_endpoint(prev_ep), err != NULL) return err; micro_release_endpoint(prev_ep); @@ -165,7 +167,7 @@ microGroup_AddEndpoint(microGroup *g, microEndpointConfig *cfg) if (g == NULL) return micro_ErrorInvalidArg; - return micro_add_endpoint(NULL, g->m, g->prefix, cfg, false); + return micro_add_endpoint(NULL, g->m, g, cfg, false); } microError * @@ -406,23 +408,44 @@ _release_service(microService *m) _free_service(m); } -static void -_free_service(microService *m) +static inline void +_free_cloned_group_config(microGroupConfig *cfg) { - microGroup *next = NULL; + if (cfg == NULL) + return; + + // the strings are declared const for the public, but in a clone these need + // to be freed. + NATS_FREE((char *)cfg->Prefix); + NATS_FREE((char *)cfg->QueueGroup); + NATS_FREE(cfg); +} +static inline void +_free_group(microGroup *g) +{ + if (g == NULL) + return; + + _free_cloned_group_config(g->config); + NATS_FREE(g); +} + +static inline void +_free_service(microService *m) +{ if (m == NULL) return; // destroy all groups. if (m->groups != NULL) { - microGroup *g = m->groups; - while (g != NULL) + microGroup *next = NULL; + microGroup *g; + for (g = m->groups; g != NULL; g = next) { next = g->next; - NATS_FREE(g); - g = next; + _free_group(g); } } @@ -445,7 +468,7 @@ _clone_service_config(microServiceConfig **out, microServiceConfig *cfg) microError *err = NULL; microServiceConfig *new_cfg = NULL; - if (out == NULL || cfg == NULL) + if ((out == NULL) || (cfg == NULL)) return micro_ErrorInvalidArg; err = _new_service_config(&new_cfg); @@ -458,6 +481,7 @@ _clone_service_config(microServiceConfig **out, microServiceConfig *cfg) MICRO_CALL(err, micro_strdup((char **)&new_cfg->Name, cfg->Name)); MICRO_CALL(err, micro_strdup((char **)&new_cfg->Version, cfg->Version)); MICRO_CALL(err, micro_strdup((char **)&new_cfg->Description, cfg->Description)); + MICRO_CALL(err, micro_strdup((char **)&new_cfg->QueueGroup, cfg->QueueGroup)); MICRO_CALL(err, micro_ErrorFromStatus( nats_cloneMetadata(&new_cfg->Metadata, cfg->Metadata))); MICRO_CALL(err, micro_clone_endpoint_config(&new_cfg->Endpoint, cfg->Endpoint)); @@ -482,6 +506,7 @@ _free_cloned_service_config(microServiceConfig *cfg) NATS_FREE((char *)cfg->Name); NATS_FREE((char *)cfg->Version); NATS_FREE((char *)cfg->Description); + NATS_FREE((char *)cfg->QueueGroup); nats_freeMetadata(&cfg->Metadata); micro_free_cloned_endpoint_config(cfg->Endpoint); NATS_FREE(cfg); @@ -663,20 +688,86 @@ _wrap_connection_event_callbacks(microService *m) return microError_Wrapf(err, "failed to wrap connection event callbacks"); } -microError * -microService_AddGroup(microGroup **new_group, microService *m, const char *prefix) +static inline microError * +_new_group_config(microGroupConfig **ptr) { - if ((m == NULL) || (new_group == NULL) || (prefix == NULL)) + *ptr = NATS_CALLOC(1, sizeof(microGroupConfig)); + return (*ptr == NULL) ? micro_ErrorOutOfMemory : NULL; +} + +static inline microError * +_clone_group_config(microGroupConfig **out, microGroupConfig *in, microGroup *parent) +{ + microError *err = NULL; + microGroupConfig *new_cfg = NULL; + + if ((out == NULL) || (in == NULL)) return micro_ErrorInvalidArg; - *new_group = NATS_CALLOC(1, sizeof(microGroup) + - strlen(prefix) + 1); // "prefix\0" - if (new_group == NULL) + err = _new_group_config(&new_cfg); + if (err == NULL) + { + memcpy(new_cfg, in, sizeof(microGroupConfig)); + } + + // If the queue group is not explicitly set, copy from the parent. + if (err == NULL) { + if (in->NoQueueGroup) + new_cfg->QueueGroup = NULL; + else if (!nats_IsStringEmpty(in->QueueGroup)) + err = micro_strdup((char **)&new_cfg->QueueGroup, in->QueueGroup); + else if (parent != NULL) + { + new_cfg->NoQueueGroup = parent->config->NoQueueGroup; + err = micro_strdup((char **)&new_cfg->QueueGroup, parent->config->QueueGroup); + } + } + + // prefix = parent_prefix.prefix + if (err == NULL) + { + size_t prefixSize = strlen(in->Prefix) + 1; + if (parent != NULL) + prefixSize += strlen(parent->config->Prefix) + 1; + new_cfg->Prefix = NATS_CALLOC(1, prefixSize); + if (new_cfg->Prefix != NULL) + { + if (parent != NULL) + snprintf((char *)new_cfg->Prefix, prefixSize, "%s.%s", parent->config->Prefix, in->Prefix); + else + memcpy((char *)new_cfg->Prefix, in->Prefix, prefixSize); + } + else + err = micro_ErrorOutOfMemory; + } + + if (err != NULL) + { + _free_cloned_group_config(new_cfg); + return err; + } + + *out = new_cfg; + return NULL; +} + +static inline microError * +_add_group(microGroup **new_group, microService *m, microGroup *parent, microGroupConfig *config) +{ + + *new_group = NATS_CALLOC(1, sizeof(microGroup)); + if (new_group == NULL) return micro_ErrorOutOfMemory; + + microError *err = NULL; + err = _clone_group_config(&(*new_group)->config, config, parent); + if (err != NULL) + { + NATS_FREE(*new_group); + return err; } - memcpy((*new_group)->prefix, prefix, strlen(prefix) + 1); (*new_group)->m = m; (*new_group)->next = m->groups; m->groups = *new_group; @@ -685,33 +776,21 @@ microService_AddGroup(microGroup **new_group, microService *m, const char *prefi } microError * -microGroup_AddGroup(microGroup **new_group, microGroup *parent, const char *prefix) +microService_AddGroup(microGroup **new_group, microService *m, microGroupConfig *config) { - char *p; - size_t len; - - if ((parent == NULL) || (new_group == NULL) || (prefix == NULL)) + if ((m == NULL) || (new_group == NULL) || (config == NULL) || nats_IsStringEmpty(config->Prefix)) return micro_ErrorInvalidArg; - *new_group = NATS_CALLOC(1, sizeof(microGroup) + - strlen(parent->prefix) + 1 + // "parent_prefix." - strlen(prefix) + 1); // "prefix\0" - if (new_group == NULL) - { - return micro_ErrorOutOfMemory; - } + return _add_group(new_group, m, NULL, config); +} - p = (*new_group)->prefix; - len = strlen(parent->prefix); - memcpy(p, parent->prefix, len); - p[len] = '.'; - p += len + 1; - memcpy(p, prefix, strlen(prefix) + 1); - (*new_group)->m = parent->m; - (*new_group)->next = parent->m->groups; - parent->m->groups = *new_group; +microError * +microGroup_AddGroup(microGroup **new_group, microGroup *parent, microGroupConfig *config) +{ + if ((parent == NULL) || (new_group == NULL) || (config == NULL) || nats_IsStringEmpty(config->Prefix)) + return micro_ErrorInvalidArg; - return NULL; + return _add_group(new_group, parent->m, parent, config); } natsConnection * @@ -771,6 +850,7 @@ microService_GetInfo(microServiceInfo **new_info, microService *m) { MICRO_CALL(err, micro_strdup((char **)&info->Endpoints[len].Name, ep->name)); MICRO_CALL(err, micro_strdup((char **)&info->Endpoints[len].Subject, ep->subject)); + MICRO_CALL(err, micro_strdup((char **)&info->Endpoints[len].QueueGroup, micro_queue_group_for_endpoint(ep))); MICRO_CALL(err, micro_ErrorFromStatus( nats_cloneMetadata(&info->Endpoints[len].Metadata, ep->config->Metadata))); if (err == NULL) @@ -805,6 +885,7 @@ void microServiceInfo_Destroy(microServiceInfo *info) { NATS_FREE((char *)info->Endpoints[i].Name); NATS_FREE((char *)info->Endpoints[i].Subject); + NATS_FREE((char *)info->Endpoints[i].QueueGroup); nats_freeMetadata(&info->Endpoints[i].Metadata); } NATS_FREE((char *)info->Endpoints); @@ -868,6 +949,7 @@ microService_GetStats(microServiceStats **new_stats, microService *m) MICRO_CALL(err, micro_strdup((char **)&stats->Endpoints[len].Name, ep->name)); MICRO_CALL(err, micro_strdup((char **)&stats->Endpoints[len].Subject, ep->subject)); + MICRO_CALL(err, micro_strdup((char **)&stats->Endpoints[len].QueueGroup, micro_queue_group_for_endpoint(ep))); if (err == NULL) { avg = (long double)ep->stats.ProcessingTimeSeconds * 1000000000.0 + (long double)ep->stats.ProcessingTimeNanoseconds; @@ -903,6 +985,7 @@ void microServiceStats_Destroy(microServiceStats *stats) { NATS_FREE((char *)stats->Endpoints[i].Name); NATS_FREE((char *)stats->Endpoints[i].Subject); + NATS_FREE((char *)stats->Endpoints[i].QueueGroup); } NATS_FREE(stats->Endpoints); NATS_FREE((char *)stats->Name); diff --git a/src/micro_endpoint.c b/src/micro_endpoint.c index c5149e57a..efe72f41d 100644 --- a/src/micro_endpoint.c +++ b/src/micro_endpoint.c @@ -16,7 +16,7 @@ #include "microp.h" #include "util.h" -static microError *_dup_with_prefix(char **dst, const char *prefix, const char *src); +static microError *_subjectWithGroupPrefix(char **dst, microGroup *g, const char *src); static void _handle_request(natsConnection *nc, natsSubscription *sub, natsMsg *msg, void *closure); @@ -24,7 +24,7 @@ static void _retain_endpoint(microEndpoint *ep, bool lock); static void _release_endpoint(microEndpoint *ep); microError * -micro_new_endpoint(microEndpoint **new_ep, microService *m, const char *prefix, microEndpointConfig *cfg, bool is_internal) +micro_new_endpoint(microEndpoint **new_ep, microService *m, microGroup *g, microEndpointConfig *cfg, bool is_internal) { microError *err = NULL; microEndpoint *ep = NULL; @@ -51,17 +51,42 @@ micro_new_endpoint(microEndpoint **new_ep, microService *m, const char *prefix, MICRO_CALL(err, micro_ErrorFromStatus(natsMutex_Create(&ep->endpoint_mu))); MICRO_CALL(err, micro_clone_endpoint_config(&ep->config, cfg)); MICRO_CALL(err, micro_strdup(&ep->name, cfg->Name)); - MICRO_CALL(err, _dup_with_prefix(&ep->subject, prefix, subj)); + MICRO_CALL(err, _subjectWithGroupPrefix(&ep->subject, g, subj)); if (err != NULL) { micro_free_endpoint(ep); return err; } + ep->group = g; *new_ep = ep; return NULL; } +const char * +micro_queue_group_for_endpoint(microEndpoint *ep) +{ + if (ep->config->NoQueueGroup) + return NULL; + else if (!nats_IsStringEmpty(ep->config->QueueGroup)) + return ep->config->QueueGroup; + + if (ep->group != NULL) + { + if(ep->group->config->NoQueueGroup) + return NULL; + else if (!nats_IsStringEmpty(ep->group->config->QueueGroup)) + return ep->group->config->QueueGroup; + } + + if (ep->m->cfg->NoQueueGroup) + return NULL; + else if(!nats_IsStringEmpty(ep->m->cfg->QueueGroup)) + return ep->m->cfg->QueueGroup; + + return MICRO_DEFAULT_QUEUE_GROUP; +} + microError * micro_start_endpoint(microEndpoint *ep) { @@ -75,10 +100,11 @@ micro_start_endpoint(microEndpoint *ep) // reset the stats. memset(&ep->stats, 0, sizeof(ep->stats)); - if (ep->is_monitoring_endpoint) + const char *queueGroup = micro_queue_group_for_endpoint(ep); + if (ep->is_monitoring_endpoint || (queueGroup == NULL)) s = natsConnection_Subscribe(&sub, ep->m->nc, ep->subject, _handle_request, ep); else - s = natsConnection_QueueSubscribe(&sub, ep->m->nc, ep->subject, MICRO_QUEUE_GROUP, _handle_request, ep); + s = natsConnection_QueueSubscribe(&sub, ep->m->nc, ep->subject, queueGroup, _handle_request, ep); if (s == NATS_OK) { @@ -324,6 +350,7 @@ micro_clone_endpoint_config(microEndpointConfig **out, microEndpointConfig *cfg) MICRO_CALL(err, micro_strdup((char **)&new_cfg->Name, cfg->Name)); MICRO_CALL(err, micro_strdup((char **)&new_cfg->Subject, cfg->Subject)); + MICRO_CALL(err, micro_strdup((char **)&new_cfg->QueueGroup, cfg->QueueGroup)); MICRO_CALL(err, micro_ErrorFromStatus( nats_cloneMetadata(&new_cfg->Metadata, cfg->Metadata))); @@ -407,23 +434,23 @@ bool micro_match_endpoint_subject(const char *ep_subject, const char *actual_sub } } -static microError *_dup_with_prefix(char **dst, const char *prefix, const char *src) +static microError *_subjectWithGroupPrefix(char **dst, microGroup *g, const char *src) { size_t len = strlen(src) + 1; char *p; - if (!nats_IsStringEmpty(prefix)) - len += strlen(prefix) + 1; + if (g != NULL) + len += strlen(g->config->Prefix) + 1; *dst = NATS_CALLOC(1, len); if (*dst == NULL) return micro_ErrorOutOfMemory; p = *dst; - if (!nats_IsStringEmpty(prefix)) + if (g != NULL) { - len = strlen(prefix); - memcpy(p, prefix, len); + len = strlen(g->config->Prefix); + memcpy(p, g->config->Prefix, len); p[len] = '.'; p += len + 1; } diff --git a/src/micro_monitoring.c b/src/micro_monitoring.c index 80f1188d6..c8639392b 100644 --- a/src/micro_monitoring.c +++ b/src/micro_monitoring.c @@ -78,7 +78,7 @@ handle_info(microRequest *req) } static microError * -handle_stats_internal(microRequest *req) +handle_stats_default(microRequest *req) { microError *err = NULL; microService *m = microRequest_GetService(req); @@ -108,7 +108,7 @@ handle_stats(microRequest *req) if (m->cfg->StatsHandler != NULL) return m->cfg->StatsHandler(req); else - return handle_stats_internal(req); + return handle_stats_default(req); } static microError * @@ -188,7 +188,7 @@ add_internal_handler(microService *m, const char *verb, const char *kind, .Name = name, .Handler = handler, }; - err = micro_add_endpoint(NULL, m, "", &cfg, true); + err = micro_add_endpoint(NULL, m, NULL, &cfg, true); NATS_FREE(subj); return err; } @@ -272,6 +272,8 @@ marshal_info(natsBuffer **new_buf, microServiceInfo *info) IFOK_attr("name", info->Endpoints[i].Name, ""); IFOK(s, nats_marshalMetadata(buf, true, "metadata", info->Endpoints[i].Metadata)); IFOK(s, natsBuf_AppendByte(buf, ',')); + if (!nats_IsStringEmpty(info->Endpoints[i].QueueGroup)) + IFOK_attr("queue_group", info->Endpoints[i].QueueGroup, ","); IFOK_attr("subject", info->Endpoints[i].Subject, ""); IFOK(s, natsBuf_AppendByte(buf, '}')); // end endpoint if (i != info->EndpointsLen - 1) @@ -323,6 +325,8 @@ marshal_stats(natsBuffer **new_buf, microServiceStats *stats) IFOK(s, natsBuf_AppendByte(buf, '{')); IFOK_attr("name", ep->Name, ","); IFOK_attr("subject", ep->Subject, ","); + if (!nats_IsStringEmpty(ep->QueueGroup)) + IFOK_attr("queue_group", ep->QueueGroup, ","); IFOK(s, nats_marshalLong(buf, false, "num_requests", ep->NumRequests)); IFOK(s, nats_marshalLong(buf, true, "num_errors", ep->NumErrors)); IFOK(s, nats_marshalDuration(buf, true, "average_processing_time", ep->AverageProcessingTimeNanoseconds)); diff --git a/src/microp.h b/src/microp.h index 620d7a0e7..8184691c4 100644 --- a/src/microp.h +++ b/src/microp.h @@ -27,7 +27,7 @@ if ((__err) == NULL) \ __block; -#define MICRO_QUEUE_GROUP "q" +#define MICRO_DEFAULT_QUEUE_GROUP "q" #define MICRO_DEFAULT_ENDPOINT_NAME "default" @@ -58,6 +58,7 @@ struct micro_endpoint_s // Retained/released by the service that owns the endpoint to avoid race // conditions. microService *m; + microGroup *group; // Monitoring endpoints are different in a few ways. For now, express it as // a single flag but consider unbundling: @@ -88,9 +89,9 @@ struct micro_endpoint_s struct micro_group_s { + struct micro_group_config_s *config; struct micro_service_s *m; struct micro_group_s *next; - char prefix[]; }; struct micro_service_s @@ -138,12 +139,12 @@ struct micro_request_s microEndpoint *Endpoint; }; -microError *micro_add_endpoint(microEndpoint **new_ep, microService *m, const char *prefix, microEndpointConfig *cfg, bool is_internal); +microError *micro_add_endpoint(microEndpoint **new_ep, microService *m, microGroup *g, microEndpointConfig *cfg, bool is_internal); microError *micro_clone_endpoint_config(microEndpointConfig **out, microEndpointConfig *cfg); microError *micro_init_monitoring(microService *m); microError *micro_is_error_message(natsStatus s, natsMsg *msg); microError *micro_new_control_subject(char **newSubject, const char *verb, const char *name, const char *id); -microError *micro_new_endpoint(microEndpoint **new_ep, microService *m, const char *prefix, microEndpointConfig *cfg, bool is_internal); +microError *micro_new_endpoint(microEndpoint **new_ep, microService *m, microGroup *g, microEndpointConfig *cfg, bool is_internal); microError *micro_new_request(microRequest **new_request, microService *m, microEndpoint *ep, natsMsg *msg); microError *micro_start_endpoint(microEndpoint *ep); microError *micro_stop_endpoint(microEndpoint *ep); @@ -155,6 +156,7 @@ void micro_release_endpoint(microEndpoint *ep); void micro_release_on_endpoint_complete(void *closure); void micro_retain_endpoint(microEndpoint *ep); void micro_update_last_error(microEndpoint *ep, microError *err); +const char *micro_queue_group_for_endpoint(microEndpoint *ep); bool micro_is_valid_name(const char *name); bool micro_is_valid_subject(const char *subject); diff --git a/src/nats.h b/src/nats.h index e1961231b..3b0a30f2e 100644 --- a/src/nats.h +++ b/src/nats.h @@ -7635,6 +7635,14 @@ typedef struct micro_error_s microError; */ typedef struct micro_group_s microGroup; +/** + * @brief The Microservice endpoint *group* configuration object. + * + * @see micro_group_config_s for descriptions of the fields, + * micro_service_config_s, micro_service_config_s, microService_AddGroup + */ +typedef struct micro_group_config_s microGroupConfig; + /** * @brief a request received by a microservice endpoint. * @@ -7781,7 +7789,19 @@ struct micro_endpoint_config_s const char *Subject; /** - * @briefMetadata for the endpoint, a JSON-encoded user-provided object, + * @brief Overrides the default queue group for the service. + * + */ + const char *QueueGroup; + + /** + * @brief Disables the use of a queue group for the service. + * + */ + bool NoQueueGroup; + + /** + * @brief Metadata for the endpoint, a JSON-encoded user-provided object, * e.g. `{"key":"value"}` */ natsMetadata Metadata; @@ -7813,6 +7833,12 @@ struct micro_endpoint_info_s */ const char *Subject; + /** + * @brief Endpoint's actual queue group (the default "q", or one explicitly + * set by the user), or omitted if NoQueueGroup was applied. + */ + const char *QueueGroup; + /** * @briefMetadata for the endpoint, a JSON-encoded user-provided object, * e.g. `{"key":"value"}` @@ -7828,6 +7854,12 @@ struct micro_endpoint_stats_s const char *Name; const char *Subject; + /** + * @brief Endpoint's actual queue group (the default "q", or one explicitly + * set by the user), or omitted if NoQueueGroup was applied. + */ + const char *QueueGroup; + /** * @brief The number of requests received by the endpoint. */ @@ -7860,6 +7892,29 @@ struct micro_endpoint_stats_s char LastErrorString[2048]; }; +/** + * #brief The Microservice endpoint *group* configuration object. + */ +struct micro_group_config_s +{ + /** + * @brief The subject prefix for the group. + */ + const char *Prefix; + + /** + * @brief Overrides the default queue group for the service. + * + */ + const char *QueueGroup; + + /** + * @brief Disables the use of a queue group for the service. + * + */ + bool NoQueueGroup; +}; + /** * @brief The Microservice top-level configuration object. * @@ -7886,7 +7941,20 @@ struct micro_service_config_s const char *Description; /** - * @brief Metadata for the service, a JSON-encoded user-provided object, e.g. `{"key":"value"}` + * @brief Overrides the default queue group for the service ("q"). + * + */ + const char *QueueGroup; + + /** + * @brief Disables the use of a queue group for the service. + * + */ + bool NoQueueGroup; + + /** + * @brief Immutable metadata for the service, a JSON-encoded user-provided + * object, e.g. `{"key":"value"}` */ natsMetadata Metadata; @@ -8147,15 +8215,14 @@ microService_AddEndpoint(microService *m, microEndpointConfig *config); * @param new_group the location where to store the pointer to the new * #microGroup object. * @param m the #microService that the group will be added to. - * @param prefix a prefix to use on names and subjects of all endpoints in the - * group. + * @param config group parameters. * * @return a #microError if an error occurred. * * @see #microGroup_AddGroup, #microGroup_AddEndpoint */ NATS_EXTERN microError * -microService_AddGroup(microGroup **new_group, microService *m, const char *prefix); +microService_AddGroup(microGroup **new_group, microService *m, microGroupConfig *config); /** @brief Destroys a microservice, stopping it first if needed. * @@ -8283,15 +8350,14 @@ NATS_EXTERN microError *microService_Stop(microService *m); * @param new_group the location where to store the pointer to the new * #microGroup object. * @param parent the #microGroup that the new group will be added to. - * @param prefix a prefix to use on names and subjects of all endpoints in the - * group. + * @param config group parameters. * * @return a #microError if an error occurred. * * @see #microGroup_AddGroup, #microGroup_AddEndpoint */ NATS_EXTERN microError * -microGroup_AddGroup(microGroup **new_group, microGroup *parent, const char *prefix); +microGroup_AddGroup(microGroup **new_group, microGroup *parent, microGroupConfig *config); /** @brief Adds an endpoint to a #microGroup and starts listening for messages. * diff --git a/src/sub.c b/src/sub.c index d5ffe33e9..925c27a10 100644 --- a/src/sub.c +++ b/src/sub.c @@ -89,13 +89,14 @@ _initOwnDispatcher(natsSubscription *sub) return NATS_UPDATE_ERR_STACK(s); } -static inline void _cleanupOwnDispatcher(natsSubscription *sub) +static inline void +_cleanupOwnDispatcher(natsSubscription *sub) { nats_destroyQueuedMessages(&sub->ownDispatcher.queue); if (sub->ownDispatcher.thread != NULL) { - natsThread_Join(sub->ownDispatcher.thread); + natsThread_Detach(sub->ownDispatcher.thread); natsThread_Destroy(sub->ownDispatcher.thread); sub->ownDispatcher.thread = NULL; } @@ -103,7 +104,8 @@ static inline void _cleanupOwnDispatcher(natsSubscription *sub) natsCondition_Destroy(sub->ownDispatcher.cond); } -void _freeSub(natsSubscription *sub) +static inline void +_freeSub(natsSubscription *sub) { if (sub == NULL) return; diff --git a/test/list_test.txt b/test/list_test.txt index 04e9e930d..a7a2218a1 100644 --- a/test/list_test.txt +++ b/test/list_test.txt @@ -139,6 +139,7 @@ _test(MicroAsyncErrorHandlerMaxPendingMsgs) _test(MicroBasics) _test(MicroGroups) _test(MicroMatchEndpointSubject) +_test(MicroQueueGroupForEndpoint) _test(MicroServiceStopsOnClosedConn) _test(MicroServiceStopsWhenServerStops) _test(MicroStartStop) @@ -254,10 +255,10 @@ _test(SSLCertAndKeyFromMemory) _test(SSLCiphers) _test(SSLConnectVerboseOption) _test(SSLHandshakeFirst) -_test(SSLServerNameIndication) _test(SSLLoadCAFromMemory) _test(SSLMultithreads) _test(SSLReconnectWithAuthError) +_test(SSLServerNameIndication) _test(SSLSkipServerVerification) _test(SSLSocketLeakWithEventLoop) _test(SSLVerify) diff --git a/test/test.c b/test/test.c index d2bd91666..770eb8b22 100644 --- a/test/test.c +++ b/test/test.c @@ -33876,10 +33876,6 @@ void test_MicroGroups(void) microServiceInfo *info = NULL; int i; - microEndpointConfig ep1_cfg = { - .Name = "ep1", - .Handler = _microHandleRequest42, - }; microEndpointConfig ep2_cfg = { .Name = "ep2", .Handler = _microHandleRequest42, @@ -33917,16 +33913,19 @@ void test_MicroGroups(void) _startMicroservice(&m, nc, &cfg, NULL, 0, &arg); test("AddEndpoint 1 to service: "); + microEndpointConfig ep1_cfg = { .Name = "ep1", .Handler = _microHandleRequest42 }; testCond(NULL == microService_AddEndpoint(m, &ep1_cfg)); test("AddGroup g1: "); - testCond(NULL == microService_AddGroup(&g1, m, "g1")); + microGroupConfig g1_cfg = { .Prefix = "g1" }; + testCond(NULL == microService_AddGroup(&g1, m, &g1_cfg)); test("AddEndpoint 1 to g1: "); testCond(NULL == microGroup_AddEndpoint(g1, &ep1_cfg)); test("Add sub-Group g2: "); - testCond(NULL == microGroup_AddGroup(&g2, g1, "g2")); + microGroupConfig g2_cfg = { .Prefix = "g2" }; + testCond(NULL == microGroup_AddGroup(&g2, g1, &g2_cfg)); test("AddEndpoint 1 to g2: "); testCond(NULL == microGroup_AddEndpoint(g2, &ep1_cfg)); @@ -33969,6 +33968,118 @@ void test_MicroGroups(void) _stopServer(serverPid); } +void test_MicroQueueGroupForEndpoint(void) +{ + microError *err = NULL; + natsPid serverPid = NATS_INVALID_PID; + natsOptions *opts = NULL; + natsConnection *nc = NULL; + + microServiceConfig serviceConfig = { .Name = "testService", .Version = "1.0.0" }; + microGroupConfig group1Config = { .Prefix = "testGroup1" }; + microGroupConfig group2Config = { .Prefix = "testGroup2" }; + microEndpointConfig epConfig = { .Name = "testEP", .Handler = _microHandleRequest42 }; + + typedef struct { + const char *name; + + bool serviceNoQueueGroup; + const char *serviceQueueGroup; + bool group1NoQueueGroup; + const char *group1QueueGroup; + bool group2NoQueueGroup; + const char *group2QueueGroup; + bool epNoQueueGroup; + const char *epQueueGroup; + + const char *expectedServiceLevel; + const char *expectedGroup1Level; + const char *expectedGroup2Level; + } TC; + TC tcs[] = { + {.name="default", + .expectedServiceLevel=MICRO_DEFAULT_QUEUE_GROUP, .expectedGroup1Level=MICRO_DEFAULT_QUEUE_GROUP, .expectedGroup2Level=MICRO_DEFAULT_QUEUE_GROUP}, + {.name="service value override", .serviceQueueGroup="test", + .expectedServiceLevel="test", .expectedGroup1Level="test", .expectedGroup2Level="test"}, + {.name="G1 value override", .group1QueueGroup="G1", + .expectedServiceLevel=MICRO_DEFAULT_QUEUE_GROUP, .expectedGroup1Level="G1", .expectedGroup2Level="G1"}, + {.name="service and G1 value overrides", .serviceQueueGroup="S", .group1QueueGroup="G1", + .expectedServiceLevel="S", .expectedGroup1Level="G1", .expectedGroup2Level="G1"}, + {.name="service and G2 value overrides", .serviceQueueGroup="S", .group2QueueGroup="G2", + .expectedServiceLevel="S", .expectedGroup1Level="S", .expectedGroup2Level="G2"}, + {.name="disabled", .serviceNoQueueGroup=true, + .expectedServiceLevel=NULL, .expectedGroup1Level=NULL, .expectedGroup2Level=NULL}, + {.name="disabled for S, set for G1", .serviceNoQueueGroup=true, .group1QueueGroup="G1", + .expectedServiceLevel=NULL, .expectedGroup1Level="G1", .expectedGroup2Level="G1"}, + {.name="disabled for G1", .group1NoQueueGroup=true, + .expectedServiceLevel=MICRO_DEFAULT_QUEUE_GROUP, .expectedGroup1Level=NULL, .expectedGroup2Level=NULL}, + {.name="disabled for G1, set for G2", .group1NoQueueGroup=true, .group2QueueGroup="G2", + .expectedServiceLevel=MICRO_DEFAULT_QUEUE_GROUP, .expectedGroup1Level=NULL, .expectedGroup2Level="G2"}, + }; + + serverPid = _startServer("nats://127.0.0.1:4222", NULL, true); + CHECK_SERVER_STARTED(serverPid); + + test("Connect to server: "); + testCond(NATS_OK == natsConnection_Connect(&nc, opts)); + + for (int i = 0; i < (int)(sizeof(tcs) / sizeof(tcs[0])); i++) + { + TC tc = tcs[i]; + struct threadArg arg; + microService *service = NULL; + microGroup *group1 = NULL, *group2 = NULL; + microServiceInfo *info = NULL; + microServiceStats *stats = NULL; + + if(_createDefaultThreadArgsForCbTests(&arg) != NATS_OK) + FAIL("Unable to setup test args"); + + testf("%s: ", tc.name); + + serviceConfig.NoQueueGroup = tc.serviceNoQueueGroup; + serviceConfig.QueueGroup = tc.serviceQueueGroup; + epConfig.NoQueueGroup = tc.epNoQueueGroup; + epConfig.QueueGroup = tc.epQueueGroup; + group1Config.NoQueueGroup = tc.group1NoQueueGroup; + group1Config.QueueGroup = tc.group1QueueGroup; + group2Config.NoQueueGroup = tc.group2NoQueueGroup; + group2Config.QueueGroup = tc.group2QueueGroup; + + err = _startMicroservice(&service, nc, &serviceConfig, NULL, 0, &arg); + MICRO_CALL(err, microService_AddEndpoint(service, &epConfig)); + MICRO_CALL(err, microService_AddGroup(&group1, service, &group1Config)); + MICRO_CALL(err, microGroup_AddEndpoint(group1, &epConfig)); + MICRO_CALL(err, microGroup_AddGroup(&group2, group1, &group2Config)); + MICRO_CALL(err, microGroup_AddEndpoint(group2, &epConfig)); + MICRO_CALL(err, microService_GetInfo(&info, service)); + MICRO_CALL(err, microService_GetStats(&stats, service)); + +#define _testQueueGroup(_expected, _actual) \ + (_expected) == NULL ? (_actual) == NULL : strcmp((_expected), (_actual)) == 0 + + testCond((err == NULL) && + (info != NULL) && (info->EndpointsLen == 3) && + (stats != NULL) && (stats->EndpointsLen == 3) && + (_testQueueGroup(tc.expectedServiceLevel, info->Endpoints[0].QueueGroup)) && + (_testQueueGroup(tc.expectedServiceLevel, stats->Endpoints[0].QueueGroup)) && + (_testQueueGroup(tc.expectedGroup1Level, stats->Endpoints[1].QueueGroup)) && + (_testQueueGroup(tc.expectedGroup1Level, info->Endpoints[1].QueueGroup)) && + (_testQueueGroup(tc.expectedGroup2Level, info->Endpoints[2].QueueGroup)) && + (_testQueueGroup(tc.expectedGroup2Level, stats->Endpoints[2].QueueGroup))); + + microService_Destroy(service); + _waitForMicroservicesAllDone(&arg); + microServiceInfo_Destroy(info); + microServiceStats_Destroy(stats); + _destroyDefaultThreadArgs(&arg); + } + + natsConnection_Destroy(nc); + natsOptions_Destroy(opts); + _stopServer(serverPid); +} + #define NUM_MICRO_SERVICES 5 void test_MicroBasics(void) @@ -34127,6 +34238,7 @@ void test_MicroBasics(void) testCond( (NATS_OK == nats_JSONGetStrPtr(array[0], "name", &str)) && (strcmp(str, "do") == 0) && (NATS_OK == nats_JSONGetStrPtr(array[0], "subject", &str)) && (strcmp(str, "svc.do") == 0) + && (NATS_OK == nats_JSONGetStrPtr(array[0], "queue_group", &str)) && (strcmp(str, MICRO_DEFAULT_QUEUE_GROUP) == 0) && (NATS_OK == nats_JSONGetObject(array[0], "metadata", &md)) && (md == NULL) ); @@ -34135,6 +34247,7 @@ void test_MicroBasics(void) testCond( (NATS_OK == nats_JSONGetStrPtr(array[1], "name", &str)) && (strcmp(str, "unused") == 0) && (NATS_OK == nats_JSONGetStrPtr(array[1], "subject", &str)) && (strcmp(str, "svc.unused") == 0) + && (NATS_OK == nats_JSONGetStrPtr(array[0], "queue_group", &str)) && (strcmp(str, MICRO_DEFAULT_QUEUE_GROUP) == 0) && (NATS_OK == nats_JSONGetObject(array[1], "metadata", &md)) && (NATS_OK == nats_JSONGetStrPtr(md, "key1", &str)) && (strcmp(str, "value1") == 0) && (NATS_OK == nats_JSONGetStrPtr(md, "key2", &str)) && (strcmp(str, "value2") == 0) @@ -34434,8 +34547,8 @@ void test_MicroServiceStopsWhenServerStops(void) natsMutex_Lock(arg.m); while ((s != NATS_TIMEOUT) && !arg.microAllDone) s = natsCondition_TimedWait(arg.c, arg.m, 1000); - natsMutex_Unlock(arg.m); testCond(arg.microAllDone); + natsMutex_Unlock(arg.m); test("Test microservice is not running: "); testCond(microService_IsStopped(m)) From f5a7e6034a616e92137c55030916c4579498a4c8 Mon Sep 17 00:00:00 2001 From: Lev <1187448+levb@users.noreply.github.com> Date: Mon, 30 Sep 2024 13:17:31 -0700 Subject: [PATCH 05/16] FIXED: nats_GetJWTOrSeed to understand Windows \r\n lines (#801) * FIXED: nats_GetJWTOrSeed to understand Windows \r\n lites * Adjusted the test * Adjusted the test, +1 * PR feedback --- src/util.c | 95 ++++++++++++++++++++++++++++------------------------- test/test.c | 3 +- 2 files changed, 52 insertions(+), 46 deletions(-) diff --git a/src/util.c b/src/util.c index 2d33b3d23..c93fe6a1e 100644 --- a/src/util.c +++ b/src/util.c @@ -2064,10 +2064,9 @@ nats_HostIsIP(const char *host) } static bool -_isLineAnHeader(const char *ptr) +_isLineAnHeader(const char *ptr, int len) { char *last = NULL; - int len = 0; int count = 0; bool done = false; @@ -2075,7 +2074,6 @@ _isLineAnHeader(const char *ptr) // the strict requirement is that it ends with at least 3 consecutive // `-` characters. It must also have 3 consecutive `-` before that. // So the minimum size would be 6. - len = (int) strlen(ptr); if (len < 6) return false; @@ -2118,61 +2116,72 @@ _isLineAnHeader(const char *ptr) return false; } +// Finds the next entire line in next, sets start to the beginning of the line, +// updates next to point to the remaining bytes, and returns the line's length. +// If there are no more non-empty lines, returns 0. +static inline int +_scan_line(const char **start, const char **next) +{ + const char *ptr = *next; + int n = 0; + + // skip empty lines in the beginning + while (*ptr == '\r' || *ptr == '\n') + ptr++; + *start = ptr; + + // Consume until we reach the end of the line + while (*ptr != '\r' && *ptr != '\n' && *ptr != '\0') + { + ptr++; + n++; + } + + if (n == 0) + return 0; + + // skip the subsequent empty lines, 'cause why not? + while (*ptr == '\r' || *ptr == '\n') + ptr++; + *next = ptr; + + return n; +} + natsStatus nats_GetJWTOrSeed(char **val, const char *content, int item) { - natsStatus s = NATS_OK; - char *pch = NULL; - char *str = NULL; - char *saved = NULL; - int curItem = 0; - int orgLen = 0; - char *nt = NULL; - - // First, make a copy of the original content since - // we are going to call strtok on it, which alters it. - str = NATS_STRDUP(content); - if (str == NULL) - return nats_setDefaultError(NATS_NO_MEMORY); - - orgLen = (int) strlen(str); + natsStatus s = NATS_OK; + const char *next = content; + const char *line = NULL; + int lineLen = 0; + int curItem = 0; + const char *saved = NULL; + int savedLen = 0; - pch = nats_strtok(str, "\n", &nt); - while (pch != NULL) + while ((lineLen = _scan_line(&line, &next)) > 0) { - if (_isLineAnHeader(pch)) + if (_isLineAnHeader(line, lineLen)) { // We got the start of the section. Save the next line // as the possible returned value if the following line // is a header too. - pch = nats_strtok(NULL, "\n", &nt); - saved = pch; + savedLen = _scan_line(&saved, &next); + if (savedLen == 0) + break; // premature end of file? - while (pch != NULL) - { - pch = nats_strtok(NULL, "\n", &nt); - if (pch == NULL) - break; - - // We tolerate empty string(s). - if (*pch == '\0') - continue; - - break; - } - if (pch == NULL) - break; - - if (_isLineAnHeader(pch)) + lineLen = _scan_line(&line, &next); + if (_isLineAnHeader(line, lineLen)) { // Is this the item we were looking for? if (curItem == item) { // Return a copy of the saved line - *val = NATS_STRDUP(saved); + *val = NATS_CALLOC(savedLen + 1, 1); if (*val == NULL) s = nats_setDefaultError(NATS_NO_MEMORY); - + else + memcpy(*val, saved, savedLen); break; } else if (++curItem > 1) @@ -2181,12 +2190,8 @@ nats_GetJWTOrSeed(char **val, const char *content, int item) } } } - pch = nats_strtok(NULL, "\n", &nt); } - memset(str, 0, orgLen); - NATS_FREE(str); - // Nothing was found, return NATS_NOT_FOUND but don't set the stack error. if ((s == NATS_OK) && (*val == NULL)) return NATS_NOT_FOUND; diff --git a/test/test.c b/test/test.c index 770eb8b22..c2792f282 100644 --- a/test/test.c +++ b/test/test.c @@ -4759,10 +4759,11 @@ void test_natsGetJWTOrSeed(void) char buf[256]; const char *valids[] = { "--- START JWT ---\nsome value\n--- END JWT ---\n", + "\r\n\r\n--- START JWT ---\r\nsome value\r\n--- END JWT ---\r\n", "--- ---\nsome value\n--- ---\n", "------\nsome value\n------\n", "---\nabc\n--\n---START---\nsome value\n---END---\n----\ndef\n--- ---\n", - "nothing first\nthen it starts\n --- START ---\nsome value\n--- END ---\n---START---\nof something else\n---END---\n", + "nothing first\nthen it starts\n\r\n --- START ---\r\n\n\n\r\nsome value\n--- END ---\n\n---START---\nof something else\n---END---\n", "--- START ---\nsome value\n\n\n--- END ---\n", }; const char *invalids[] = { From 50eaa67d4d542bd69c0adb35e4c2ea9830df8fad Mon Sep 17 00:00:00 2001 From: Lev <1187448+levb@users.noreply.github.com> Date: Mon, 30 Sep 2024 13:43:02 -0700 Subject: [PATCH 06/16] [DOCS ONLY] fixed documentation changes in preparation for v3.9 release (#802) * [DOCS ONLY] Fixed doxygen warnings * Renamed natsFetchCompleteHandler to jsFetchCompleteHandler * Fixed brief vs description on natsConnection_Reconnect * Fixed the \briefs in jsStreamConfig * Updated natsOptions_SetServers for %-encoding * Updated for js_PullSubscribeAsync --- doc/DoxyFile.NATS.Client | 4 +- src/nats.h | 185 +++++++++++++++++++-------------------- 2 files changed, 91 insertions(+), 98 deletions(-) diff --git a/doc/DoxyFile.NATS.Client b/doc/DoxyFile.NATS.Client index d6370b8c1..d5f5b6508 100644 --- a/doc/DoxyFile.NATS.Client +++ b/doc/DoxyFile.NATS.Client @@ -1340,7 +1340,7 @@ HTML_COLORSTYLE_GAMMA = 80 # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. -HTML_TIMESTAMP = NO +# HTML_TIMESTAMP = NO # If the HTML_DYNAMIC_MENUS tag is set to YES then the generated HTML # documentation will contain a main index with vertical navigation menus that @@ -2008,7 +2008,7 @@ LATEX_BIB_STYLE = plain # The default value is: NO. # This tag requires that the tag GENERATE_LATEX is set to YES. -LATEX_TIMESTAMP = NO +# LATEX_TIMESTAMP = NO # The LATEX_EMOJI_DIRECTORY tag is used to specify the (relative or absolute) # path from which the emoji images will be read. If a relative path is entered, diff --git a/src/nats.h b/src/nats.h index 3b0a30f2e..1948218ef 100644 --- a/src/nats.h +++ b/src/nats.h @@ -561,48 +561,46 @@ typedef struct jsStreamConfig { bool Sealed; ///< Seal a stream so no messages can get our or in. bool DenyDelete; ///< Restrict the ability to delete messages. bool DenyPurge; ///< Restrict the ability to purge messages. - /** - * Allows messages to be placed into the system and purge - * all older messages using a special message header. - */ + + /// @brief Allow messages to be placed into the system and purge all + /// older messages using a special message header. bool AllowRollup; - // Allow republish of the message after being sequenced and stored. + /// @brief Allow republish of the message after being sequenced and + /// stored. jsRePublish *RePublish; - // Allow higher performance, direct access to get individual messages. E.g. KeyValue + /// @brief Allow higher performance, direct access to get individual + /// messages. E.g. KeyValue bool AllowDirect; - // Allow higher performance and unified direct access for mirrors as well. + + /// @brief Allow higher performance and unified direct access for + /// mirrors as well. bool MirrorDirect; - // Allow KV like semantics to also discard new on a per subject basis + /// @brief Allow KV like semantics to also discard new on a per subject + /// basis bool DiscardNewPerSubject; - /** - * @brief Configuration options introduced in 2.10 - * - * - Metadata is a user-provided array of key/value pairs, encoded as a - * string array [n1, v1, n2, v2, ...] representing key/value pairs - * {n1:v1, n2:v2, ...}. - * - * - Compression: js_StorageCompressionNone (default) or - * js_StorageCompressionS2 - * - * - FirstSeq: the starting sequence number for the stream. - * - * - SubjectTransformConfig is for applying a subject transform (to - * matching messages) before doing anything else when a new message is - * received - * - * - ConsumerLimits is for setting the limits on certain options on all - * consumers of the stream. - */ + /// @brief A user-provided array of key/value pairs, encoded as a string + /// array [n1, v1, n2, v2, ...] representing key/value pairs {n1:v1, + /// n2:v2, ...}. + natsMetadata Metadata; + + /// @brief js_StorageCompressionNone (default) or + /// js_StorageCompressionS2. + jsStorageCompression Compression; + + /// @brief the starting sequence number for the stream. + uint64_t FirstSeq; - natsMetadata Metadata; - jsStorageCompression Compression; - uint64_t FirstSeq; + /// @brief Applies a subject transform (to matching messages) before + /// doing anything else when a new message is received. jsSubjectTransformConfig SubjectTransform; - jsStreamConsumerLimits ConsumerLimits; + + /// @brief Sets the limits on certain options on all consumers of the + /// stream. + jsStreamConsumerLimits ConsumerLimits; } jsStreamConfig; /** @@ -1227,19 +1225,19 @@ typedef struct jsFetchRequest * * @see js_PullSubscribeAsync */ -typedef void (*natsFetchCompleteHandler)(natsConnection *nc, natsSubscription *sub, natsStatus s, void *closure); +typedef void (*jsFetchCompleteHandler)(natsConnection *nc, natsSubscription *sub, natsStatus s, void *closure); /** \brief Callback used to customize flow control for js_PullSubscribeAsync. * * The library will invoke this callback when it may be time to request more * messages from the server. * - * @return true to fetch more, false to skip. If true, @messages and @maxBytes - * should be set to the number of messages and max bytes to fetch. + * @return true to fetch more, false to skip. If true, \p messages and \p + * maxBytes should be set to the number of messages and max bytes to fetch. * * @see js_PullSubscribeAsync */ -typedef bool (*natsFetchNextHandler)(int *messages, int64_t *bytes, natsSubscription *sub, void *closure); +typedef bool (*jsFetchNextHandler)(int *messages, int64_t *maxBytes, natsSubscription *sub, void *closure); /** * JetStream context options. @@ -1252,9 +1250,6 @@ typedef struct jsOptions const char *Domain; ///< Domain changes the domain part of JetSteam API prefix. int64_t Wait; ///< Amount of time (in milliseconds) to wait for various JetStream API requests, default is 5000 ms (5 seconds). - /** - * Publish Async options - */ struct jsOptionsPublishAsync { int64_t MaxPending; ///< Maximum outstanding asynchronous publishes that can be inflight at one time. @@ -1276,52 +1271,56 @@ typedef struct jsOptions int64_t StallWait; ///< Amount of time (in milliseconds) to wait in a PublishAsync call when there is MaxPending inflight messages, default is 200 ms. - } PublishAsync; + } PublishAsync; ///< extra options for #js_PublishAsync struct jsOptionsPullSubscribeAsync { - // Lifetime of the subscription (completes when any one of the - // targets is reached). - int64_t Timeout; // in milliseconds - int MaxMessages; - int64_t MaxBytes; - - // If NoWait is set, the subscription will receive the messages - // already stored on the server subject to the limits, but will - // not wait for more messages. - // - // Note that if Timeout is set we would still wait for first - // message to become available, even if there are currently any - // on the server + int64_t Timeout; ///< Auto-unsubsribe after this many milliseconds. + int MaxMessages; ///< Auto-unsubscribed after receiving this many messages. + int64_t MaxBytes; ///< Auto-unsubscribe after receiving this many bytes. + + /// \brief If NoWait is set, the subscription will receive the + /// messages already stored on the server subject to the limits, + /// but will not wait for more messages. + /// + /// \note that if Timeout is set we would still wait for first + /// message to become available, even if there are currently any + /// on the server bool NoWait; - // Fetch complete handler that receives the exit status code, - // the subscription's Complete handler is also invoked, but does - // not have the status code. - natsFetchCompleteHandler CompleteHandler; + /// \brief Fetch complete handler that receives the exit status + /// code, the subscription's Complete handler is also invoked, + /// but does not have the status code. + jsFetchCompleteHandler CompleteHandler; void *CompleteHandlerClosure; - // Have server sends heartbeats at this interval to help detect - // communication failures. - int64_t Heartbeat; // in milliseconds - - // Options to control automatic Fetch flow control. The number - // of messages to ask for in a single request, and if we should - // try to fetch ahead, KeepAhead more than we need to finish the - // current request. Fetch this many messages ahead of time. - // - // KeepAhead can not be used in conjunction with MaxBytes or - // NoWait. + /// \brief Have server sends heartbeats at this interval (in + /// milliseconds) to help detect communication failures. + int64_t Heartbeat; + + /// @brief When using the automatic Fetch flow control (default + /// NextHandler), this is the number of messages to ask for in a + /// single request. int FetchSize; + + /// @brief When using the automatic Fetch flow control (default + /// NextHandler), initiate the next fetch request (this many + /// messages) prior to the fulfillment of the current request. + /// + /// @note KeepAhead can not be used in conjunction with MaxBytes + /// or NoWait. int KeepAhead; - // Manual fetch flow control. If provided gets called before - // each message is deliverered to msgCB, and overrides the - // default algorithm for sending Next requests. - natsFetchNextHandler NextHandler; + /// @brief If set, switches to manual fetch flow control. + /// + /// If provided, this function gets called before each message + /// is deliverered to msgCB, and overrides the default algorithm + /// for sending Next fetch requests. + jsFetchNextHandler NextHandler; void *NextHandlerClosure; - } PullSubscribeAsync; + } PullSubscribeAsync; ///< extra options for #js_PullSubscribeAsync + /** * Advanced stream options @@ -2277,8 +2276,9 @@ natsOptions_Create(natsOptions **newOpts); /** \brief Sets the URL to connect to. * - * Sets the URL of the `NATS Server` the client should try to connect to. - * The URL can contain optional user name and password. + * Sets the URL of the `NATS Server` the client should try to connect to. The + * URL can contain optional user name and password. %-encoding is supported for + * entering special characters. * * Some valid URLS: * @@ -2291,8 +2291,8 @@ natsOptions_Create(natsOptions **newOpts); * @see natsOptions_SetToken * * @param opts the pointer to the #natsOptions object. - * @param url the string representing the URL the connection should use - * to connect to the server. + * @param url the string representing the URL the connection should use to + * connect to the server. * */ /* @@ -4169,12 +4169,12 @@ stanMsg_Destroy(stanMsg *msg); NATS_EXTERN natsStatus natsConnection_Connect(natsConnection **nc, natsOptions *options); -/** \brief Causes the client to drop the connection to the current server and - * perform standard reconnection process. +/** \brief Drops the current connection, reconnects including re-subscribing. * - * This means that all subscriptions and consumers should be resubscribed and - * their work resumed after successful reconnect where all reconnect options are - * respected. + * Causes the client to drop the connection to the current server and to + * initiate the standard reconnection process. This means that all subscriptions + * and consumers will be resubscribed and their work resumed after successful + * reconnect where all reconnect options are respected. * * @param nc the pointer to the #natsConnection object. */ @@ -6493,7 +6493,8 @@ js_Subscribe(natsSubscription **sub, jsCtx *js, const char *subject, * @param sub the location where to store the pointer to the newly created * #natsSubscription object. * @param js the pointer to the #jsCtx object. - * @param subject the subject this subscription is created for. + * @param subjects the subject this subscription is created for. + * @param numSubjects the number of subjects for the subscription. * @param cb the #natsMsgHandler callback. * @param cbClosure a pointer to an user defined object (can be `NULL`). See * the #natsMsgHandler prototype. @@ -6541,7 +6542,7 @@ js_SubscribeSync(natsSubscription **sub, jsCtx *js, const char *subject, */ NATS_EXTERN natsStatus js_SubscribeSyncMulti(natsSubscription **sub, jsCtx *js, const char **subjects, int numSubjects, - jsOptions *jsOpts, jsSubOptions *opts, jsErrCode *errCode); + jsOptions *opts, jsSubOptions *subOpts, jsErrCode *errCode); /** \brief Create a pull subscriber. * @@ -7840,7 +7841,7 @@ struct micro_endpoint_info_s const char *QueueGroup; /** - * @briefMetadata for the endpoint, a JSON-encoded user-provided object, + * @brief Metadata for the endpoint, a JSON-encoded user-provided object, * e.g. `{"key":"value"}` */ natsMetadata Metadata; @@ -7893,25 +7894,17 @@ struct micro_endpoint_stats_s }; /** - * #brief The Microservice endpoint *group* configuration object. + * @brief The Microservice endpoint *group* configuration object. */ struct micro_group_config_s { - /** - * @brief The subject prefix for the group. - */ + /// @brief The subject prefix for the group. const char *Prefix; - /** - * @brief Overrides the default queue group for the service. - * - */ + /// @brief Overrides the default queue group for the service. const char *QueueGroup; - /** - * @brief Disables the use of a queue group for the service. - * - */ + /// @brief Disables the use of a queue group for the service. bool NoQueueGroup; }; From 233ca8efc428d5b84b41f1a9aaedcd81441ed6bc Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 30 Sep 2024 13:48:01 -0700 Subject: [PATCH 07/16] [skip ci] Update docs: 50eaa67: [DOCS ONLY] fixed documentation changes in preparation for v3.9 release (#802) (#782) Co-authored-by: levb <1187448+levb@users.noreply.github.com> --- doc/html/annotated.html | 113 +- doc/html/annotated_dup.js | 2 + doc/html/classes.html | 9 +- doc/html/functions_c.html | 6 + doc/html/functions_d.html | 9 + doc/html/functions_dup.js | 1 + doc/html/functions_f.html | 3 + doc/html/functions_h.html | 1 + doc/html/functions_k.html | 3 + doc/html/functions_l.html | 3 + doc/html/functions_m.html | 4 + doc/html/functions_n.html | 12 + doc/html/functions_p.html | 4 + doc/html/functions_q.html | 7 + doc/html/functions_r.html | 3 + doc/html/functions_t.html | 6 +- doc/html/functions_u.html | 105 + doc/html/functions_vars.js | 1 + doc/html/functions_vars_c.html | 6 + doc/html/functions_vars_d.html | 9 + doc/html/functions_vars_f.html | 3 + doc/html/functions_vars_h.html | 1 + doc/html/functions_vars_k.html | 3 + doc/html/functions_vars_l.html | 3 + doc/html/functions_vars_m.html | 4 + doc/html/functions_vars_n.html | 12 + doc/html/functions_vars_p.html | 4 + doc/html/functions_vars_q.html | 7 + doc/html/functions_vars_r.html | 3 + doc/html/functions_vars_t.html | 6 +- doc/html/functions_vars_u.html | 105 + doc/html/globals.html | 11 +- doc/html/globals_eval.html | 3 + doc/html/globals_func.html | 5 +- doc/html/globals_func_k.html | 3 + doc/html/globals_func_m.html | 4 +- doc/html/globals_func_n.html | 6 + doc/html/globals_k.html | 3 + doc/html/globals_m.html | 7 +- doc/html/globals_n.html | 12 + doc/html/globals_type.html | 12 + doc/html/group__conn_mgt_group.html | 4 +- doc/html/group__js_sub_group.html | 105 +- doc/html/group__js_sub_group.js | 3 +- doc/html/group__kv_group.html | 67 + doc/html/group__kv_group.js | 1 + doc/html/group__library_group.html | 30 + doc/html/group__library_group.js | 1 + doc/html/group__micro_group_functions.html | 18 +- doc/html/group__micro_group_functions.js | 2 +- doc/html/group__micro_service_functions.html | 18 +- doc/html/group__micro_service_functions.js | 2 +- doc/html/group__micro_structs.html | 3 + doc/html/group__micro_structs.js | 11 + doc/html/group__micro_types.html | 18 + doc/html/group__micro_types.js | 1 + doc/html/group__opts_group.html | 31 +- doc/html/group__opts_group.js | 1 + doc/html/group__types_group.html | 76 + doc/html/group__types_group.js | 26 + doc/html/menudata.js | 2 + doc/html/nats_8h.html | 52 +- doc/html/nats_8h.js | 15 +- doc/html/nats_8h_source.html | 5107 +++++++++-------- doc/html/navtreedata.js | 12 +- doc/html/navtreeindex0.js | 190 +- doc/html/navtreeindex1.js | 282 +- doc/html/navtreeindex2.js | 318 +- doc/html/navtreeindex3.js | 334 +- doc/html/navtreeindex4.js | 302 +- doc/html/navtreeindex5.js | 346 +- doc/html/navtreeindex6.js | 416 +- doc/html/search/all_1.js | 14 +- doc/html/search/all_10.js | 15 +- doc/html/search/all_11.js | 8 +- doc/html/search/all_12.js | 89 +- doc/html/search/all_13.js | 93 +- doc/html/search/all_14.js | 13 +- doc/html/search/all_15.js | 3 +- doc/html/search/all_16.html | 37 + doc/html/search/all_16.js | 4 + doc/html/search/all_17.html | 37 + doc/html/search/all_17.js | 5 + doc/html/search/all_2.js | 18 +- doc/html/search/all_3.js | 18 +- doc/html/search/all_4.js | 36 +- doc/html/search/all_5.js | 43 +- doc/html/search/all_6.js | 26 +- doc/html/search/all_7.js | 12 +- doc/html/search/all_8.js | 10 +- doc/html/search/all_9.js | 277 +- doc/html/search/all_a.js | 340 +- doc/html/search/all_b.js | 82 +- doc/html/search/all_c.js | 130 +- doc/html/search/all_d.js | 381 +- doc/html/search/all_e.js | 276 +- doc/html/search/all_f.js | 18 +- doc/html/search/classes_0.js | 43 +- doc/html/search/classes_1.js | 48 +- doc/html/search/classes_2.js | 11 +- doc/html/search/classes_3.js | 9 +- doc/html/search/classes_4.html | 37 + doc/html/search/classes_4.js | 5 + doc/html/search/defines_0.js | 18 +- doc/html/search/defines_1.js | 4 +- doc/html/search/enums_0.js | 16 +- doc/html/search/enums_1.js | 2 +- doc/html/search/enums_2.js | 4 +- doc/html/search/enumvalues_0.js | 302 +- doc/html/search/enumvalues_1.js | 8 +- doc/html/search/enumvalues_2.js | 89 +- doc/html/search/files_0.js | 4 +- doc/html/search/files_1.js | 2 +- doc/html/search/files_2.js | 2 +- doc/html/search/functions_0.js | 113 +- doc/html/search/functions_1.js | 89 +- doc/html/search/functions_2.js | 88 +- doc/html/search/functions_3.js | 364 +- doc/html/search/functions_4.js | 82 +- doc/html/search/groups_0.js | 4 +- doc/html/search/groups_1.js | 4 +- doc/html/search/groups_2.js | 2 +- doc/html/search/groups_3.js | 2 +- doc/html/search/groups_4.js | 4 +- doc/html/search/groups_5.js | 10 +- doc/html/search/groups_6.js | 6 +- doc/html/search/groups_7.js | 18 +- doc/html/search/groups_8.js | 2 +- doc/html/search/groups_9.js | 6 +- doc/html/search/groups_a.js | 18 +- doc/html/search/groups_b.js | 2 +- doc/html/search/groups_c.js | 2 +- doc/html/search/pages_0.js | 2 +- doc/html/search/pages_1.js | 2 +- doc/html/search/searchdata.js | 6 +- doc/html/search/typedefs_0.js | 84 +- doc/html/search/typedefs_1.js | 18 +- doc/html/search/typedefs_2.js | 33 +- doc/html/search/typedefs_3.js | 43 +- doc/html/search/typedefs_4.js | 16 +- doc/html/search/variables_0.js | 26 +- doc/html/search/variables_1.js | 10 +- doc/html/search/variables_10.js | 46 +- doc/html/search/variables_11.js | 21 +- doc/html/search/variables_12.js | 2 +- doc/html/search/variables_13.js | 2 +- doc/html/search/variables_14.html | 37 + doc/html/search/variables_14.js | 4 + doc/html/search/variables_2.js | 24 +- doc/html/search/variables_3.js | 43 +- doc/html/search/variables_4.js | 34 +- doc/html/search/variables_5.js | 13 +- doc/html/search/variables_6.js | 8 +- doc/html/search/variables_7.js | 10 +- doc/html/search/variables_8.js | 5 +- doc/html/search/variables_9.js | 21 +- doc/html/search/variables_a.js | 67 +- doc/html/search/variables_b.js | 29 +- doc/html/search/variables_c.js | 8 +- doc/html/search/variables_d.js | 21 +- doc/html/search/variables_e.js | 3 +- doc/html/search/variables_f.js | 13 +- doc/html/status_8h.html | 3 + doc/html/status_8h.js | 3 +- doc/html/status_8h_source.html | 553 +- .../struct____nats_client_config-members.html | 112 + doc/html/struct____nats_client_config.html | 233 + doc/html/struct____nats_client_config.js | 10 + doc/html/structjs_options-members.html | 5 +- doc/html/structjs_options.html | 20 + doc/html/structjs_options.js | 2 + ...s_options_1_1js_options_publish_async.html | 4 +- ..._options_pull_subscribe_async-members.html | 116 + ...ns_1_1js_options_pull_subscribe_async.html | 304 + ...ions_1_1js_options_pull_subscribe_async.js | 14 + doc/html/structjs_stream_config.html | 19 +- ...uctmicro__endpoint__config__s-members.html | 2 + .../structmicro__endpoint__config__s.html | 36 +- doc/html/structmicro__endpoint__config__s.js | 2 + ...tructmicro__endpoint__info__s-members.html | 3 +- doc/html/structmicro__endpoint__info__s.html | 19 +- doc/html/structmicro__endpoint__info__s.js | 1 + ...ructmicro__endpoint__stats__s-members.html | 3 +- doc/html/structmicro__endpoint__stats__s.html | 17 + doc/html/structmicro__endpoint__stats__s.js | 1 + ...structmicro__group__config__s-members.html | 108 + doc/html/structmicro__group__config__s.html | 170 + doc/html/structmicro__group__config__s.js | 6 + ...ructmicro__service__config__s-members.html | 2 + doc/html/structmicro__service__config__s.html | 36 +- doc/html/structmicro__service__config__s.js | 2 + 191 files changed, 8287 insertions(+), 5893 deletions(-) create mode 100644 doc/html/functions_u.html create mode 100644 doc/html/functions_vars_u.html create mode 100644 doc/html/search/all_16.html create mode 100644 doc/html/search/all_16.js create mode 100644 doc/html/search/all_17.html create mode 100644 doc/html/search/all_17.js create mode 100644 doc/html/search/classes_4.html create mode 100644 doc/html/search/classes_4.js create mode 100644 doc/html/search/variables_14.html create mode 100644 doc/html/search/variables_14.js create mode 100644 doc/html/struct____nats_client_config-members.html create mode 100644 doc/html/struct____nats_client_config.html create mode 100644 doc/html/struct____nats_client_config.js create mode 100644 doc/html/structjs_options_1_1js_options_pull_subscribe_async-members.html create mode 100644 doc/html/structjs_options_1_1js_options_pull_subscribe_async.html create mode 100644 doc/html/structjs_options_1_1js_options_pull_subscribe_async.js create mode 100644 doc/html/structmicro__group__config__s-members.html create mode 100644 doc/html/structmicro__group__config__s.html create mode 100644 doc/html/structmicro__group__config__s.js diff --git a/doc/html/annotated.html b/doc/html/annotated.html index be1be1390..36f64f706 100644 --- a/doc/html/annotated.html +++ b/doc/html/annotated.html @@ -89,61 +89,64 @@
Here are the classes, structs, unions and interfaces with brief descriptions:
[detail level 123]
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
 CjsAccountInfo
 CjsAccountLimits
 CjsAPIStats
 CjsClusterInfo
 CjsConsumerConfig
 CjsConsumerInfo
 CjsConsumerInfoList
 CjsConsumerNamesList
 CjsConsumerPauseResponse
 CjsConsumerSequenceMismatch
 CjsDirectGetMsgOptions
 CjsExternalStream
 CjsFetchRequest
 CjsLostStreamData
 CjsMsgMetaData
 CjsOptions
 CjsOptionsPublishAsync
 CjsOptionsStream
 CjsOptionsStreamInfo
 CjsOptionsStreamPurge
 CjsPeerInfo
 CjsPlacement
 CjsPubAck
 CjsPubAckErr
 CjsPubOptions
 CjsRePublish
 CjsSequenceInfo
 CjsSequencePair
 CjsStreamAlternate
 CjsStreamConfig
 CjsStreamConsumerLimits
 CjsStreamInfo
 CjsStreamInfoList
 CjsStreamNamesList
 CjsStreamSource
 CjsStreamSourceInfo
 CjsStreamState
 CjsStreamStateSubject
 CjsStreamStateSubjects
 CjsSubjectTransformConfig
 CjsSubOptions
 CjsTier
 CkvConfig
 CkvEntryListA list of KeyValue store entries
 CkvKeysListA list of KeyValue store keys
 CkvPurgeOptions
 CkvWatchOptions
 Cmicro_endpoint_config_s
 Cmicro_endpoint_info_s
 Cmicro_endpoint_stats_s
 Cmicro_service_config_sThe Microservice top-level configuration object
 Cmicro_service_info_s
 Cmicro_service_stats_s
 CnatsMetadataA type to represent user-provided metadata, a list of k=v pairs
 CnatsMsgListA list of NATS messages
 C__natsClientConfigAn initial configuration for NATS client. Provides control over the threading model, and sets many default option values
 CjsAccountInfo
 CjsAccountLimits
 CjsAPIStats
 CjsClusterInfo
 CjsConsumerConfig
 CjsConsumerInfo
 CjsConsumerInfoList
 CjsConsumerNamesList
 CjsConsumerPauseResponse
 CjsConsumerSequenceMismatch
 CjsDirectGetMsgOptions
 CjsExternalStream
 CjsFetchRequest
 CjsLostStreamData
 CjsMsgMetaData
 CjsOptions
 CjsOptionsPublishAsync
 CjsOptionsPullSubscribeAsync
 CjsOptionsStream
 CjsOptionsStreamInfo
 CjsOptionsStreamPurge
 CjsPeerInfo
 CjsPlacement
 CjsPubAck
 CjsPubAckErr
 CjsPubOptions
 CjsRePublish
 CjsSequenceInfo
 CjsSequencePair
 CjsStreamAlternate
 CjsStreamConfig
 CjsStreamConsumerLimits
 CjsStreamInfo
 CjsStreamInfoList
 CjsStreamNamesList
 CjsStreamSource
 CjsStreamSourceInfo
 CjsStreamState
 CjsStreamStateSubject
 CjsStreamStateSubjects
 CjsSubjectTransformConfig
 CjsSubOptions
 CjsTier
 CkvConfig
 CkvEntryListA list of KeyValue store entries
 CkvKeysListA list of KeyValue store keys
 CkvPurgeOptions
 CkvWatchOptions
 Cmicro_endpoint_config_s
 Cmicro_endpoint_info_s
 Cmicro_endpoint_stats_s
 Cmicro_group_config_sThe Microservice endpoint group configuration object
 Cmicro_service_config_sThe Microservice top-level configuration object
 Cmicro_service_info_s
 Cmicro_service_stats_s
 CnatsMetadataA type to represent user-provided metadata, a list of k=v pairs
 CnatsMsgListA list of NATS messages
diff --git a/doc/html/annotated_dup.js b/doc/html/annotated_dup.js index 0f2fc63f9..91bc90224 100644 --- a/doc/html/annotated_dup.js +++ b/doc/html/annotated_dup.js @@ -1,5 +1,6 @@ var annotated_dup = [ + [ "__natsClientConfig", "struct____nats_client_config.html", "struct____nats_client_config" ], [ "jsAccountInfo", "structjs_account_info.html", "structjs_account_info" ], [ "jsAccountLimits", "structjs_account_limits.html", "structjs_account_limits" ], [ "jsAPIStats", "structjs_a_p_i_stats.html", "structjs_a_p_i_stats" ], @@ -46,6 +47,7 @@ var annotated_dup = [ "micro_endpoint_config_s", "structmicro__endpoint__config__s.html", "structmicro__endpoint__config__s" ], [ "micro_endpoint_info_s", "structmicro__endpoint__info__s.html", "structmicro__endpoint__info__s" ], [ "micro_endpoint_stats_s", "structmicro__endpoint__stats__s.html", "structmicro__endpoint__stats__s" ], + [ "micro_group_config_s", "structmicro__group__config__s.html", "structmicro__group__config__s" ], [ "micro_service_config_s", "structmicro__service__config__s.html", "structmicro__service__config__s" ], [ "micro_service_info_s", "structmicro__service__info__s.html", "structmicro__service__info__s" ], [ "micro_service_stats_s", "structmicro__service__stats__s.html", "structmicro__service__stats__s" ], diff --git a/doc/html/classes.html b/doc/html/classes.html index 726490a4c..5210c916e 100644 --- a/doc/html/classes.html +++ b/doc/html/classes.html @@ -87,20 +87,23 @@
Class Index
-
J | K | M | N
+
J | K | M | N | _
J
-
jsAccountInfo
jsAccountLimits
jsAPIStats
jsClusterInfo
jsConsumerConfig
jsConsumerInfo
jsConsumerInfoList
jsConsumerNamesList
jsConsumerPauseResponse
jsConsumerSequenceMismatch
jsDirectGetMsgOptions
jsExternalStream
jsFetchRequest
jsLostStreamData
jsMsgMetaData
jsOptions
jsOptions::jsOptionsPublishAsync
jsOptions::jsOptionsStream
jsOptions::jsOptionsStream::jsOptionsStreamInfo
jsOptions::jsOptionsStream::jsOptionsStreamPurge
jsPeerInfo
jsPlacement
jsPubAck
jsPubAckErr
jsPubOptions
jsRePublish
jsSequenceInfo
jsSequencePair
jsStreamAlternate
jsStreamConfig
jsStreamConsumerLimits
jsStreamInfo
jsStreamInfoList
jsStreamNamesList
jsStreamSource
jsStreamSourceInfo
jsStreamState
jsStreamStateSubject
jsStreamStateSubjects
jsSubjectTransformConfig
jsSubOptions
jsTier
+
jsAccountInfo
jsAccountLimits
jsAPIStats
jsClusterInfo
jsConsumerConfig
jsConsumerInfo
jsConsumerInfoList
jsConsumerNamesList
jsConsumerPauseResponse
jsConsumerSequenceMismatch
jsDirectGetMsgOptions
jsExternalStream
jsFetchRequest
jsLostStreamData
jsMsgMetaData
jsOptions
jsOptions::jsOptionsPublishAsync
jsOptions::jsOptionsPullSubscribeAsync
jsOptions::jsOptionsStream
jsOptions::jsOptionsStream::jsOptionsStreamInfo
jsOptions::jsOptionsStream::jsOptionsStreamPurge
jsPeerInfo
jsPlacement
jsPubAck
jsPubAckErr
jsPubOptions
jsRePublish
jsSequenceInfo
jsSequencePair
jsStreamAlternate
jsStreamConfig
jsStreamConsumerLimits
jsStreamInfo
jsStreamInfoList
jsStreamNamesList
jsStreamSource
jsStreamSourceInfo
jsStreamState
jsStreamStateSubject
jsStreamStateSubjects
jsSubjectTransformConfig
jsSubOptions
jsTier
K
kvConfig
kvEntryList
kvKeysList
kvPurgeOptions
kvWatchOptions
M
-
micro_endpoint_config_s
micro_endpoint_info_s
micro_endpoint_stats_s
micro_service_config_s
micro_service_info_s
micro_service_stats_s
+
micro_endpoint_config_s
micro_endpoint_info_s
micro_endpoint_stats_s
micro_group_config_s
micro_service_config_s
micro_service_info_s
micro_service_stats_s
N
natsMetadata
natsMsgList
+
+
_
+
__natsClientConfig
diff --git a/doc/html/functions_c.html b/doc/html/functions_c.html index f46eade78..c6c32c0f5 100644 --- a/doc/html/functions_c.html +++ b/doc/html/functions_c.html @@ -92,6 +92,12 @@

- c -