diff --git a/CHANGELOG.md b/CHANGELOG.md index ec010f23d..5b8865269 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -77,6 +77,7 @@ * [CHANGE] Changed `ShouldLog()` function signature in `middleware.OptionalLogging` interface to `ShouldLog(context.Context) (bool, string)`: the returned `string` contains an optional reason. When reason is valued, `GRPCServerLog` adds `()` suffix to the error. #514 * [CHANGE] Cache: Remove superfluous `cache.RemoteCacheClient` interface and unify all caches using the `cache.Cache` interface. #520 * [CHANGE] Updated the minimum required Go version to 1.21. #540 +* [CHANGE] memberlist: Metric `memberlist_client_messages_in_broadcast_queue` is now split into `queue="local"` and `queue="gossip"` values. #539 * [FEATURE] Cache: Add support for configuring a Redis cache backend. #268 #271 #276 * [FEATURE] Add support for waiting on the rate limiter using the new `WaitN` method. #279 * [FEATURE] Add `log.BufferedLogger` type. #338 @@ -214,6 +215,8 @@ * [ENHANCEMENT] SpanProfiler: do less work on unsampled traces. #528 * [ENHANCEMENT] Log Middleware: if the trace is not sampled, log its ID as `trace_id_unsampled` instead of `trace_id`. #529 * [EHNANCEMENT] httpgrpc: httpgrpc Server can now use error message from special HTTP header when converting HTTP response to an error. This is useful when HTTP response body contains binary data that doesn't form valid utf-8 string, otherwise grpc would fail to marshal returned error. #531 +* [ENHANCEMENT] memberlist: use separate queue for broadcast messages that are result of local updates, and prioritize locally-generated messages when sending broadcasts. On stopping, only wait for queue with locally-generated messages to be empty. #539 +* [ENHANCEMENT] memberlist: Added `-memberlist.broadcast-timeout-for-local-updates-on-shutdown` option to set timeout for sending locally-generated updates on shutdown, instead of previously hardcoded 10s (which is still the default). #539 * [ENHANCEMENT] tracing: add ExtractTraceSpanID function. * [CHANGE] Backoff: added `Backoff.ErrCause()` which is like `Backoff.Err()` but returns the context cause if backoff is terminated because the context has been canceled. #538 * [BUGFIX] spanlogger: Support multiple tenant IDs. #59 diff --git a/kv/memberlist/memberlist_client.go b/kv/memberlist/memberlist_client.go index a1b659d40..a7eefe92f 100644 --- a/kv/memberlist/memberlist_client.go +++ b/kv/memberlist/memberlist_client.go @@ -157,7 +157,8 @@ type KVConfig struct { LeftIngestersTimeout time.Duration `yaml:"left_ingesters_timeout" category:"advanced"` // Timeout used when leaving the memberlist cluster. - LeaveTimeout time.Duration `yaml:"leave_timeout" category:"advanced"` + LeaveTimeout time.Duration `yaml:"leave_timeout" category:"advanced"` + BroadcastTimeoutForLocalUpdatesOnShutdown time.Duration `yaml:"broadcast_timeout_for_local_updates_on_shutdown" category:"advanced"` // How much space to use to keep received and sent messages in memory (for troubleshooting). MessageHistoryBufferBytes int `yaml:"message_history_buffer_bytes" category:"advanced"` @@ -198,6 +199,7 @@ func (cfg *KVConfig) RegisterFlagsWithPrefix(f *flag.FlagSet, prefix string) { f.IntVar(&cfg.AdvertisePort, prefix+"memberlist.advertise-port", mlDefaults.AdvertisePort, "Gossip port to advertise to other members in the cluster. Used for NAT traversal.") f.StringVar(&cfg.ClusterLabel, prefix+"memberlist.cluster-label", mlDefaults.Label, "The cluster label is an optional string to include in outbound packets and gossip streams. Other members in the memberlist cluster will discard any message whose label doesn't match the configured one, unless the 'cluster-label-verification-disabled' configuration option is set to true.") f.BoolVar(&cfg.ClusterLabelVerificationDisabled, prefix+"memberlist.cluster-label-verification-disabled", mlDefaults.SkipInboundLabelCheck, "When true, memberlist doesn't verify that inbound packets and gossip streams have the cluster label matching the configured one. This verification should be disabled while rolling out the change to the configured cluster label in a live memberlist cluster.") + f.DurationVar(&cfg.BroadcastTimeoutForLocalUpdatesOnShutdown, prefix+"memberlist.broadcast-timeout-for-local-updates-on-shutdown", 10*time.Second, "Timeout for broadcasting all remaining locally-generated updates to other nodes when shutting down. Only used if there are nodes left in the memberlist cluster, and only applies to locally-generated updates, not to broadcast messages that are result of incoming gossip updates. 0 = no timeout, wait until all locally-generated updates are sent.") cfg.TCPTransport.RegisterFlagsWithPrefix(f, prefix) } @@ -231,10 +233,11 @@ type KV struct { // dns discovery provider provider DNSProvider - // Protects access to memberlist and broadcasts fields. - delegateReady atomic.Bool - memberlist *memberlist.Memberlist - broadcasts *memberlist.TransmitLimitedQueue + // Protects access to memberlist and broadcast queues. + delegateReady atomic.Bool + memberlist *memberlist.Memberlist + localBroadcasts *memberlist.TransmitLimitedQueue // queue for messages generated locally + gossipBroadcasts *memberlist.TransmitLimitedQueue // queue for messages that we forward from other nodes // KV Store. storeMu sync.Mutex @@ -273,7 +276,8 @@ type KV struct { numberOfPushes prometheus.Counter totalSizeOfPulls prometheus.Counter totalSizeOfPushes prometheus.Counter - numberOfBroadcastMessagesInQueue prometheus.GaugeFunc + numberOfGossipMessagesInQueue prometheus.GaugeFunc + numberOfLocalMessagesInQueue prometheus.GaugeFunc totalSizeOfBroadcastMessagesInQueue prometheus.Gauge numberOfBroadcastMessagesDropped prometheus.Counter casAttempts prometheus.Counter @@ -456,7 +460,11 @@ func (m *KV) starting(ctx context.Context) error { } // Finish delegate initialization. m.memberlist = list - m.broadcasts = &memberlist.TransmitLimitedQueue{ + m.localBroadcasts = &memberlist.TransmitLimitedQueue{ + NumNodes: list.NumMembers, + RetransmitMult: mlCfg.RetransmitMult, + } + m.gossipBroadcasts = &memberlist.TransmitLimitedQueue{ NumNodes: list.NumMembers, RetransmitMult: mlCfg.RetransmitMult, } @@ -719,20 +727,24 @@ func (m *KV) discoverMembers(ctx context.Context, members []string) []string { func (m *KV) stopping(_ error) error { level.Info(m.logger).Log("msg", "leaving memberlist cluster") - // Wait until broadcast queue is empty, but don't wait for too long. + // Wait until queue with locally-generated messages is empty, but don't wait for too long. // Also don't wait if there is just one node left. - // Problem is that broadcast queue is also filled up by state changes received from other nodes, - // so it may never be empty in a busy cluster. However, we generally only care about messages - // generated on this node via CAS, and those are disabled now (via casBroadcastsEnabled), and should be able - // to get out in this timeout. + // Note: Once we enter Stopping state, we don't queue more locally-generated messages. - waitTimeout := time.Now().Add(10 * time.Second) - for m.broadcasts.NumQueued() > 0 && m.memberlist.NumMembers() > 1 && time.Now().Before(waitTimeout) { + deadline := time.Now().Add(m.cfg.BroadcastTimeoutForLocalUpdatesOnShutdown) + + msgs := m.localBroadcasts.NumQueued() + nodes := m.memberlist.NumMembers() + for msgs > 0 && nodes > 1 && (m.cfg.BroadcastTimeoutForLocalUpdatesOnShutdown <= 0 || time.Now().Before(deadline)) { + level.Info(m.logger).Log("msg", "waiting for locally-generated broadcast messages to be sent out", "count", msgs, "nodes", nodes) time.Sleep(250 * time.Millisecond) + + msgs = m.localBroadcasts.NumQueued() + nodes = m.memberlist.NumMembers() } - if cnt := m.broadcasts.NumQueued(); cnt > 0 { - level.Warn(m.logger).Log("msg", "broadcast messages left in queue", "count", cnt, "nodes", m.memberlist.NumMembers()) + if msgs > 0 { + level.Warn(m.logger).Log("msg", "locally-generated broadcast messages left the queue", "count", msgs, "nodes", nodes) } err := m.memberlist.Leave(m.cfg.LeaveTimeout) @@ -972,11 +984,7 @@ outer: m.casSuccesses.Inc() m.notifyWatchers(key) - if m.State() == services.Running { - m.broadcastNewValue(key, change, newver, codec) - } else { - level.Warn(m.logger).Log("msg", "skipped broadcasting CAS update because memberlist KV is shutting down", "key", key) - } + m.broadcastNewValue(key, change, newver, codec, true) } return nil @@ -1034,7 +1042,12 @@ func (m *KV) trySingleCas(key string, codec codec.Codec, f func(in interface{}) return change, newver, retry, nil } -func (m *KV) broadcastNewValue(key string, change Mergeable, version uint, codec codec.Codec) { +func (m *KV) broadcastNewValue(key string, change Mergeable, version uint, codec codec.Codec, locallyGenerated bool) { + if locallyGenerated && m.State() != services.Running { + level.Warn(m.logger).Log("msg", "skipped broadcasting of locally-generated update because memberlist KV is shutting down", "key", key) + return + } + data, err := codec.Encode(change) if err != nil { level.Error(m.logger).Log("msg", "failed to encode change", "key", key, "version", version, "err", err) @@ -1058,7 +1071,25 @@ func (m *KV) broadcastNewValue(key string, change Mergeable, version uint, codec Changes: change.MergeContent(), }) - m.queueBroadcast(key, change.MergeContent(), version, pairData) + l := len(pairData) + b := ringBroadcast{ + key: key, + content: change.MergeContent(), + version: version, + msg: pairData, + finished: func(ringBroadcast) { + m.totalSizeOfBroadcastMessagesInQueue.Sub(float64(l)) + }, + logger: m.logger, + } + + m.totalSizeOfBroadcastMessagesInQueue.Add(float64(l)) + + if locallyGenerated { + m.localBroadcasts.QueueBroadcast(b) + } else { + m.gossipBroadcasts.QueueBroadcast(b) + } } // NodeMeta is method from Memberlist Delegate interface @@ -1153,7 +1184,7 @@ func (m *KV) processValueUpdate(workerCh <-chan valueUpdate, key string) { m.notifyWatchers(key) // Don't resend original message, but only changes. - m.broadcastNewValue(key, mod, version, update.codec) + m.broadcastNewValue(key, mod, version, update.codec, false) } case <-m.shutdown: @@ -1163,24 +1194,6 @@ func (m *KV) processValueUpdate(workerCh <-chan valueUpdate, key string) { } } -func (m *KV) queueBroadcast(key string, content []string, version uint, message []byte) { - l := len(message) - - b := ringBroadcast{ - key: key, - content: content, - version: version, - msg: message, - finished: func(ringBroadcast) { - m.totalSizeOfBroadcastMessagesInQueue.Sub(float64(l)) - }, - logger: m.logger, - } - - m.totalSizeOfBroadcastMessagesInQueue.Add(float64(l)) - m.broadcasts.QueueBroadcast(b) -} - // GetBroadcasts is method from Memberlist Delegate interface // It returns all pending broadcasts (within the size limit) func (m *KV) GetBroadcasts(overhead, limit int) [][]byte { @@ -1188,7 +1201,18 @@ func (m *KV) GetBroadcasts(overhead, limit int) [][]byte { return nil } - return m.broadcasts.GetBroadcasts(overhead, limit) + // Prioritize locally-generated messages + msgs := m.localBroadcasts.GetBroadcasts(overhead, limit) + + // Decrease limit for each message we got from locally-generated broadcasts. + for _, m := range msgs { + limit -= overhead + len(m) + } + + if limit > 0 { + msgs = append(msgs, m.gossipBroadcasts.GetBroadcasts(overhead, limit)...) + } + return msgs } // LocalState is method from Memberlist Delegate interface @@ -1335,7 +1359,7 @@ func (m *KV) MergeRemoteState(data []byte, _ bool) { level.Error(m.logger).Log("msg", "failed to store received value", "key", kvPair.Key, "err", err) } else if newver > 0 { m.notifyWatchers(kvPair.Key) - m.broadcastNewValue(kvPair.Key, change, newver, codec) + m.broadcastNewValue(kvPair.Key, change, newver, codec, false) } } diff --git a/kv/memberlist/memberlist_client_test.go b/kv/memberlist/memberlist_client_test.go index 383471c5b..1f6d60b7d 100644 --- a/kv/memberlist/memberlist_client_test.go +++ b/kv/memberlist/memberlist_client_test.go @@ -718,7 +718,7 @@ func testMultipleClientsWithConfigGenerator(t *testing.T, members int, configGen startTime := time.Now() firstKv := clients[0] - ctx, cancel := context.WithTimeout(context.Background(), casInterval*3/2) // Watch for 1.5 cas intervals. + ctx, cancel := context.WithTimeout(context.Background(), casInterval*3) // Watch for 3x cas intervals. updates := 0 firstKv.WatchKey(ctx, key, func(in interface{}) bool { updates++ @@ -1647,3 +1647,66 @@ func (p *delayedDNSProviderMock) Resolve(_ context.Context, addrs []string) erro func (p delayedDNSProviderMock) Addresses() []string { return p.resolved } + +func TestGetBroadcastsPrefersLocalUpdates(t *testing.T) { + codec := dataCodec{} + + cfg := KVConfig{ + TCPTransport: TCPTransportConfig{ + BindAddrs: getLocalhostAddrs(), + }, + } + + // We will be checking for number of messages in the broadcast queue, so make sure to use known retransmit factor. + cfg.RetransmitMult = 1 + cfg.Codecs = append(cfg.Codecs, codec) + + reg := prometheus.NewRegistry() + kv := NewKV(cfg, log.NewNopLogger(), &dnsProviderMock{}, reg) + require.NoError(t, services.StartAndAwaitRunning(context.Background(), kv)) + defer services.StopAndAwaitTerminated(context.Background(), kv) //nolint:errcheck + + now := time.Now() + + smallUpdate := &data{Members: map[string]member{"a": {Timestamp: now.Unix(), State: JOINING}}} + bigUpdate := &data{Members: map[string]member{"b": {Timestamp: now.Unix(), State: JOINING}, "c": {Timestamp: now.Unix(), State: JOINING}, "d": {Timestamp: now.Unix(), State: JOINING}}} + mediumUpdate := &data{Members: map[string]member{"d": {Timestamp: now.Unix(), State: JOINING}, "e": {Timestamp: now.Unix(), State: JOINING}}} + + // No broadcast messages from KV at the beginning. + require.Equal(t, 0, len(kv.GetBroadcasts(0, math.MaxInt32))) + + // Check that locally-generated broadcast messages will be prioritized and sent out first, even if they are enqueued later or are smaller than other messages in the queue. + kv.broadcastNewValue("non-local", smallUpdate, 1, codec, false) + kv.broadcastNewValue("non-local", bigUpdate, 2, codec, false) + kv.broadcastNewValue("local", smallUpdate, 1, codec, true) + kv.broadcastNewValue("local", bigUpdate, 2, codec, true) + kv.broadcastNewValue("local", mediumUpdate, 3, codec, true) + + err := testutil.GatherAndCompare(reg, bytes.NewBufferString(` + # HELP memberlist_client_messages_in_broadcast_queue Number of user messages in the broadcast queue + # TYPE memberlist_client_messages_in_broadcast_queue gauge + memberlist_client_messages_in_broadcast_queue{queue="gossip"} 2 + memberlist_client_messages_in_broadcast_queue{queue="local"} 3 + `), "memberlist_client_messages_in_broadcast_queue") + require.NoError(t, err) + + msgs := kv.GetBroadcasts(0, 10000) + require.Len(t, msgs, 5) // we get all 4 messages + require.Equal(t, "local", getKey(t, msgs[0])) + require.Equal(t, "local", getKey(t, msgs[1])) + require.Equal(t, "local", getKey(t, msgs[2])) + require.Equal(t, "non-local", getKey(t, msgs[3])) + require.Equal(t, "non-local", getKey(t, msgs[4])) + + // Check that TransmitLimitedQueue.GetBroadcasts preferred larger messages (it does that). + require.True(t, len(msgs[0]) > len(msgs[1])) // Bigger local message is returned before medium one. + require.True(t, len(msgs[1]) > len(msgs[2])) // Medium local message is returned before small one. + require.True(t, len(msgs[3]) > len(msgs[4])) // Bigger non-local message is returned before smaller one +} + +func getKey(t *testing.T, msg []byte) string { + kvPair := KeyValuePair{} + err := kvPair.Unmarshal(msg) + require.NoError(t, err) + return kvPair.Key +} diff --git a/kv/memberlist/metrics.go b/kv/memberlist/metrics.go index 75a6b2324..0f09c5d71 100644 --- a/kv/memberlist/metrics.go +++ b/kv/memberlist/metrics.go @@ -71,15 +71,33 @@ func (m *KV) createAndRegisterMetrics() { Help: "Total size of pulled state", }) - m.numberOfBroadcastMessagesInQueue = promauto.With(m.registerer).NewGaugeFunc(prometheus.GaugeOpts{ - Namespace: m.cfg.MetricsNamespace, - Subsystem: subsystem, - Name: "messages_in_broadcast_queue", - Help: "Number of user messages in the broadcast queue", + const queueMetricName = "messages_in_broadcast_queue" + const queueMetricHelp = "Number of user messages in the broadcast queue" + + m.numberOfGossipMessagesInQueue = promauto.With(m.registerer).NewGaugeFunc(prometheus.GaugeOpts{ + Namespace: m.cfg.MetricsNamespace, + Subsystem: subsystem, + Name: queueMetricName, + Help: queueMetricHelp, + ConstLabels: map[string]string{"queue": "gossip"}, + }, func() float64 { + // Queues are not set before Starting state + if m.State() == services.Running || m.State() == services.Stopping { + return float64(m.gossipBroadcasts.NumQueued()) + } + return 0 + }) + + m.numberOfLocalMessagesInQueue = promauto.With(m.registerer).NewGaugeFunc(prometheus.GaugeOpts{ + Namespace: m.cfg.MetricsNamespace, + Subsystem: subsystem, + Name: queueMetricName, + Help: queueMetricHelp, + ConstLabels: map[string]string{"queue": "local"}, }, func() float64 { - // m.broadcasts is not set before Starting state + // Queues are not set before Starting state if m.State() == services.Running || m.State() == services.Stopping { - return float64(m.broadcasts.NumQueued()) + return float64(m.localBroadcasts.NumQueued()) } return 0 })