From ce7b2dfcf130a45f44c313d705e7186f5b6124f4 Mon Sep 17 00:00:00 2001 From: buffer <1045931706@qq.com> Date: Thu, 10 Aug 2023 13:33:25 +0800 Subject: [PATCH 1/3] This is an automated cherry-pick of #6919 close tikv/pd#6918 Signed-off-by: ti-chi-bot --- server/cluster/cluster.go | 86 +++++++++++++++++++++++++ server/cluster/cluster_test.go | 111 +++++++++++++++++++++++++++++++++ 2 files changed, 197 insertions(+) diff --git a/server/cluster/cluster.go b/server/cluster/cluster.go index 9fa4591430e..5786cb8776c 100644 --- a/server/cluster/cluster.go +++ b/server/cluster/cluster.go @@ -15,6 +15,7 @@ package cluster import ( + "bytes" "context" "fmt" "math" @@ -323,6 +324,12 @@ func (c *RaftCluster) runSyncConfig() { func syncConfig(manager *config.StoreConfigManager, stores []*core.StoreInfo) bool { for index := 0; index < len(stores); index++ { + select { + case <-c.ctx.Done(): + log.Info("stop sync store config job due to server shutdown") + return + default: + } // filter out the stores that are tiflash store := stores[index] if core.IsStoreContainLabel(store.GetMeta(), core.EngineKey, core.EngineTiFlash) { @@ -335,7 +342,15 @@ func syncConfig(manager *config.StoreConfigManager, stores []*core.StoreInfo) bo } // it will try next store if the current store is failed. address := netutil.ResolveLoopBackAddr(stores[index].GetStatusAddress(), stores[index].GetAddress()) +<<<<<<< HEAD if err := manager.ObserveConfig(address); err != nil { +======= + switchRaftV2, err := c.observeStoreConfig(c.ctx, address) + if err != nil { + // delete the store if it is failed and retry next store. + stores = append(stores[:index], stores[index+1:]...) + index-- +>>>>>>> 38d087fec (config: sync store config in time (#6919)) storeSyncConfigEvent.WithLabelValues(address, "fail").Inc() log.Debug("sync store config failed, it will try next store", zap.Error(err)) continue @@ -344,7 +359,78 @@ func syncConfig(manager *config.StoreConfigManager, stores []*core.StoreInfo) bo // it will only try one store. return true } +<<<<<<< HEAD return false +======= + return false, false +} + +// observeStoreConfig is used to observe the store config changes and +// return whether if the new config changes the engine to raft-kv2. +func (c *RaftCluster) observeStoreConfig(ctx context.Context, address string) (bool, error) { + cfg, err := c.fetchStoreConfigFromTiKV(ctx, address) + if err != nil { + return false, err + } + oldCfg := c.opt.GetStoreConfig() + if cfg == nil || oldCfg.Equal(cfg) { + return false, nil + } + log.Info("sync the store config successful", + zap.String("store-address", address), + zap.String("store-config", cfg.String()), + zap.String("old-config", oldCfg.String())) + return c.updateStoreConfig(oldCfg, cfg) +} + +// updateStoreConfig updates the store config. This is extracted for testing. +func (c *RaftCluster) updateStoreConfig(oldCfg, cfg *config.StoreConfig) (bool, error) { + cfg.Adjust() + c.opt.SetStoreConfig(cfg) + return oldCfg.Storage.Engine != config.RaftstoreV2 && cfg.Storage.Engine == config.RaftstoreV2, nil +} + +// fetchStoreConfigFromTiKV tries to fetch the config from the TiKV store URL. +func (c *RaftCluster) fetchStoreConfigFromTiKV(ctx context.Context, statusAddress string) (*config.StoreConfig, error) { + cfg := &config.StoreConfig{} + failpoint.Inject("mockFetchStoreConfigFromTiKV", func(val failpoint.Value) { + if regionMaxSize, ok := val.(string); ok { + cfg.RegionMaxSize = regionMaxSize + cfg.Storage.Engine = config.RaftstoreV2 + } + failpoint.Return(cfg, nil) + }) + if c.httpClient == nil { + return nil, fmt.Errorf("failed to get store config due to nil client") + } + var url string + if netutil.IsEnableHTTPS(c.httpClient) { + url = fmt.Sprintf("%s://%s/config", "https", statusAddress) + } else { + url = fmt.Sprintf("%s://%s/config", "http", statusAddress) + } + ctx, cancel := context.WithTimeout(ctx, clientTimeout) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, bytes.NewBuffer(nil)) + if err != nil { + cancel() + return nil, fmt.Errorf("failed to create store config http request: %w", err) + } + resp, err := c.httpClient.Do(req) + if err != nil { + cancel() + return nil, err + } + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + cancel() + if err != nil { + return nil, err + } + if err := json.Unmarshal(body, cfg); err != nil { + return nil, err + } + return cfg, nil +>>>>>>> 38d087fec (config: sync store config in time (#6919)) } // LoadClusterInfo loads cluster related info. diff --git a/server/cluster/cluster_test.go b/server/cluster/cluster_test.go index b19fb71bce1..0d68641aaf2 100644 --- a/server/cluster/cluster_test.go +++ b/server/cluster/cluster_test.go @@ -19,6 +19,8 @@ import ( "fmt" "math" "math/rand" + "net/http" + "net/http/httptest" "sync" "testing" "time" @@ -1288,7 +1290,116 @@ func TestOfflineAndMerge(t *testing.T) { } } +<<<<<<< HEAD func TestSyncConfig(t *testing.T) { +======= +func TestStoreConfigUpdate(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + _, opt, err := newTestScheduleConfig() + re.NoError(err) + tc := newTestCluster(ctx, opt) + stores := newTestStores(5, "2.0.0") + for _, s := range stores { + re.NoError(tc.putStoreLocked(s)) + } + re.Len(tc.getUpStores(), 5) + // Case1: big region. + { + body := `{ "coprocessor": { + "split-region-on-table": false, + "batch-split-limit": 2, + "region-max-size": "15GiB", + "region-split-size": "10GiB", + "region-max-keys": 144000000, + "region-split-keys": 96000000, + "consistency-check-method": "mvcc", + "perf-level": 2 + }}` + var config config.StoreConfig + re.NoError(json.Unmarshal([]byte(body), &config)) + tc.updateStoreConfig(opt.GetStoreConfig(), &config) + re.Equal(uint64(144000000), opt.GetRegionMaxKeys()) + re.Equal(uint64(96000000), opt.GetRegionSplitKeys()) + re.Equal(uint64(15*units.GiB/units.MiB), opt.GetRegionMaxSize()) + re.Equal(uint64(10*units.GiB/units.MiB), opt.GetRegionSplitSize()) + } + // Case2: empty config. + { + body := `{}` + var config config.StoreConfig + re.NoError(json.Unmarshal([]byte(body), &config)) + tc.updateStoreConfig(opt.GetStoreConfig(), &config) + re.Equal(uint64(1440000), opt.GetRegionMaxKeys()) + re.Equal(uint64(960000), opt.GetRegionSplitKeys()) + re.Equal(uint64(144), opt.GetRegionMaxSize()) + re.Equal(uint64(96), opt.GetRegionSplitSize()) + } + // Case3: raft-kv2 config. + { + body := `{ "coprocessor": { + "split-region-on-table":false, + "batch-split-limit":10, + "region-max-size":"384MiB", + "region-split-size":"256MiB", + "region-max-keys":3840000, + "region-split-keys":2560000, + "consistency-check-method":"mvcc", + "enable-region-bucket":true, + "region-bucket-size":"96MiB", + "region-size-threshold-for-approximate":"384MiB", + "region-bucket-merge-size-ratio":0.33 + }, + "storage":{ + "engine":"raft-kv2" + }}` + var config config.StoreConfig + re.NoError(json.Unmarshal([]byte(body), &config)) + tc.updateStoreConfig(opt.GetStoreConfig(), &config) + re.Equal(uint64(96), opt.GetRegionBucketSize()) + re.True(opt.IsRaftKV2()) + } +} + +func TestSyncConfigContext(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + _, opt, err := newTestScheduleConfig() + re.NoError(err) + tc := newTestCluster(ctx, opt) + tc.httpClient = &http.Client{} + + server := httptest.NewServer(http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) { + time.Sleep(time.Second * 100) + cfg := &config.StoreConfig{} + b, err := json.Marshal(cfg) + if err != nil { + res.WriteHeader(http.StatusInternalServerError) + res.Write([]byte(fmt.Sprintf("failed setting up test server: %s", err))) + return + } + + res.WriteHeader(http.StatusOK) + res.Write(b) + })) + stores := newTestStores(1, "2.0.0") + for _, s := range stores { + re.NoError(tc.putStoreLocked(s)) + } + // trip schema header + now := time.Now() + stores[0].GetMeta().StatusAddress = server.URL[7:] + synced, _ := tc.syncStoreConfig(tc.GetStores()) + re.False(synced) + re.Less(time.Since(now), clientTimeout*2) +} + +func TestStoreConfigSync(t *testing.T) { +>>>>>>> 38d087fec (config: sync store config in time (#6919)) re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() From 9dbc9bfcbe0a819302d89b41b7013007eee484f8 Mon Sep 17 00:00:00 2001 From: bufferflies <1045931706@qq.com> Date: Mon, 14 Aug 2023 16:26:38 +0800 Subject: [PATCH 2/3] resolve conflict Signed-off-by: bufferflies <1045931706@qq.com> --- server/cluster/cluster.go | 90 ++-------------------- server/cluster/cluster_test.go | 118 ++++++----------------------- server/config/store_config.go | 21 +++-- server/config/store_config_test.go | 7 +- 4 files changed, 48 insertions(+), 188 deletions(-) diff --git a/server/cluster/cluster.go b/server/cluster/cluster.go index 5786cb8776c..540ebdb653b 100644 --- a/server/cluster/cluster.go +++ b/server/cluster/cluster.go @@ -15,7 +15,6 @@ package cluster import ( - "bytes" "context" "fmt" "math" @@ -308,26 +307,26 @@ func (c *RaftCluster) runSyncConfig() { defer ticker.Stop() stores := c.GetStores() - syncConfig(c.storeConfigManager, stores) + syncConfig(c.ctx, c.storeConfigManager, stores) for { select { case <-c.ctx.Done(): log.Info("sync store config job is stopped") return case <-ticker.C: - if !syncConfig(c.storeConfigManager, stores) { + if !syncConfig(c.ctx, c.storeConfigManager, stores) { stores = c.GetStores() } } } } -func syncConfig(manager *config.StoreConfigManager, stores []*core.StoreInfo) bool { +func syncConfig(ctx context.Context, manager *config.StoreConfigManager, stores []*core.StoreInfo) bool { for index := 0; index < len(stores); index++ { select { - case <-c.ctx.Done(): + case <-ctx.Done(): log.Info("stop sync store config job due to server shutdown") - return + return false default: } // filter out the stores that are tiflash @@ -342,15 +341,9 @@ func syncConfig(manager *config.StoreConfigManager, stores []*core.StoreInfo) bo } // it will try next store if the current store is failed. address := netutil.ResolveLoopBackAddr(stores[index].GetStatusAddress(), stores[index].GetAddress()) -<<<<<<< HEAD - if err := manager.ObserveConfig(address); err != nil { -======= - switchRaftV2, err := c.observeStoreConfig(c.ctx, address) - if err != nil { - // delete the store if it is failed and retry next store. + if err := manager.ObserveConfig(ctx, address); err != nil { stores = append(stores[:index], stores[index+1:]...) index-- ->>>>>>> 38d087fec (config: sync store config in time (#6919)) storeSyncConfigEvent.WithLabelValues(address, "fail").Inc() log.Debug("sync store config failed, it will try next store", zap.Error(err)) continue @@ -359,78 +352,7 @@ func syncConfig(manager *config.StoreConfigManager, stores []*core.StoreInfo) bo // it will only try one store. return true } -<<<<<<< HEAD return false -======= - return false, false -} - -// observeStoreConfig is used to observe the store config changes and -// return whether if the new config changes the engine to raft-kv2. -func (c *RaftCluster) observeStoreConfig(ctx context.Context, address string) (bool, error) { - cfg, err := c.fetchStoreConfigFromTiKV(ctx, address) - if err != nil { - return false, err - } - oldCfg := c.opt.GetStoreConfig() - if cfg == nil || oldCfg.Equal(cfg) { - return false, nil - } - log.Info("sync the store config successful", - zap.String("store-address", address), - zap.String("store-config", cfg.String()), - zap.String("old-config", oldCfg.String())) - return c.updateStoreConfig(oldCfg, cfg) -} - -// updateStoreConfig updates the store config. This is extracted for testing. -func (c *RaftCluster) updateStoreConfig(oldCfg, cfg *config.StoreConfig) (bool, error) { - cfg.Adjust() - c.opt.SetStoreConfig(cfg) - return oldCfg.Storage.Engine != config.RaftstoreV2 && cfg.Storage.Engine == config.RaftstoreV2, nil -} - -// fetchStoreConfigFromTiKV tries to fetch the config from the TiKV store URL. -func (c *RaftCluster) fetchStoreConfigFromTiKV(ctx context.Context, statusAddress string) (*config.StoreConfig, error) { - cfg := &config.StoreConfig{} - failpoint.Inject("mockFetchStoreConfigFromTiKV", func(val failpoint.Value) { - if regionMaxSize, ok := val.(string); ok { - cfg.RegionMaxSize = regionMaxSize - cfg.Storage.Engine = config.RaftstoreV2 - } - failpoint.Return(cfg, nil) - }) - if c.httpClient == nil { - return nil, fmt.Errorf("failed to get store config due to nil client") - } - var url string - if netutil.IsEnableHTTPS(c.httpClient) { - url = fmt.Sprintf("%s://%s/config", "https", statusAddress) - } else { - url = fmt.Sprintf("%s://%s/config", "http", statusAddress) - } - ctx, cancel := context.WithTimeout(ctx, clientTimeout) - req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, bytes.NewBuffer(nil)) - if err != nil { - cancel() - return nil, fmt.Errorf("failed to create store config http request: %w", err) - } - resp, err := c.httpClient.Do(req) - if err != nil { - cancel() - return nil, err - } - defer resp.Body.Close() - body, err := io.ReadAll(resp.Body) - cancel() - if err != nil { - return nil, err - } - if err := json.Unmarshal(body, cfg); err != nil { - return nil, err - } - return cfg, nil ->>>>>>> 38d087fec (config: sync store config in time (#6919)) } // LoadClusterInfo loads cluster related info. diff --git a/server/cluster/cluster_test.go b/server/cluster/cluster_test.go index 0d68641aaf2..9adce941a06 100644 --- a/server/cluster/cluster_test.go +++ b/server/cluster/cluster_test.go @@ -16,6 +16,7 @@ package cluster import ( "context" + "encoding/json" "fmt" "math" "math/rand" @@ -1290,10 +1291,7 @@ func TestOfflineAndMerge(t *testing.T) { } } -<<<<<<< HEAD func TestSyncConfig(t *testing.T) { -======= -func TestStoreConfigUpdate(t *testing.T) { re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -1306,60 +1304,26 @@ func TestStoreConfigUpdate(t *testing.T) { re.NoError(tc.putStoreLocked(s)) } re.Len(tc.getUpStores(), 5) - // Case1: big region. - { - body := `{ "coprocessor": { - "split-region-on-table": false, - "batch-split-limit": 2, - "region-max-size": "15GiB", - "region-split-size": "10GiB", - "region-max-keys": 144000000, - "region-split-keys": 96000000, - "consistency-check-method": "mvcc", - "perf-level": 2 - }}` - var config config.StoreConfig - re.NoError(json.Unmarshal([]byte(body), &config)) - tc.updateStoreConfig(opt.GetStoreConfig(), &config) - re.Equal(uint64(144000000), opt.GetRegionMaxKeys()) - re.Equal(uint64(96000000), opt.GetRegionSplitKeys()) - re.Equal(uint64(15*units.GiB/units.MiB), opt.GetRegionMaxSize()) - re.Equal(uint64(10*units.GiB/units.MiB), opt.GetRegionSplitSize()) - } - // Case2: empty config. - { - body := `{}` - var config config.StoreConfig - re.NoError(json.Unmarshal([]byte(body), &config)) - tc.updateStoreConfig(opt.GetStoreConfig(), &config) - re.Equal(uint64(1440000), opt.GetRegionMaxKeys()) - re.Equal(uint64(960000), opt.GetRegionSplitKeys()) - re.Equal(uint64(144), opt.GetRegionMaxSize()) - re.Equal(uint64(96), opt.GetRegionSplitSize()) - } - // Case3: raft-kv2 config. - { - body := `{ "coprocessor": { - "split-region-on-table":false, - "batch-split-limit":10, - "region-max-size":"384MiB", - "region-split-size":"256MiB", - "region-max-keys":3840000, - "region-split-keys":2560000, - "consistency-check-method":"mvcc", - "enable-region-bucket":true, - "region-bucket-size":"96MiB", - "region-size-threshold-for-approximate":"384MiB", - "region-bucket-merge-size-ratio":0.33 - }, - "storage":{ - "engine":"raft-kv2" - }}` - var config config.StoreConfig - re.NoError(json.Unmarshal([]byte(body), &config)) - tc.updateStoreConfig(opt.GetStoreConfig(), &config) - re.Equal(uint64(96), opt.GetRegionBucketSize()) - re.True(opt.IsRaftKV2()) + + testdata := []struct { + whiteList []string + maxRegionSize uint64 + updated bool + }{{ + whiteList: []string{}, + maxRegionSize: uint64(144), + updated: false, + }, { + whiteList: []string{"127.0.0.1:5"}, + maxRegionSize: uint64(10), + updated: true, + }} + + for _, v := range testdata { + tc.storeConfigManager = config.NewTestStoreConfigManager(v.whiteList) + re.Equal(uint64(144), tc.GetStoreConfig().GetRegionMaxSize()) + re.Equal(v.updated, syncConfig(tc.ctx, tc.storeConfigManager, tc.GetStores())) + re.Equal(v.maxRegionSize, tc.GetStoreConfig().GetRegionMaxSize()) } } @@ -1371,6 +1335,7 @@ func TestSyncConfigContext(t *testing.T) { _, opt, err := newTestScheduleConfig() re.NoError(err) tc := newTestCluster(ctx, opt) + tc.storeConfigManager = config.NewStoreConfigManager(http.DefaultClient) tc.httpClient = &http.Client{} server := httptest.NewServer(http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) { @@ -1393,48 +1358,11 @@ func TestSyncConfigContext(t *testing.T) { // trip schema header now := time.Now() stores[0].GetMeta().StatusAddress = server.URL[7:] - synced, _ := tc.syncStoreConfig(tc.GetStores()) + synced := syncConfig(tc.ctx, tc.storeConfigManager, stores) re.False(synced) re.Less(time.Since(now), clientTimeout*2) } -func TestStoreConfigSync(t *testing.T) { ->>>>>>> 38d087fec (config: sync store config in time (#6919)) - re := require.New(t) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - _, opt, err := newTestScheduleConfig() - re.NoError(err) - tc := newTestCluster(ctx, opt) - stores := newTestStores(5, "2.0.0") - for _, s := range stores { - re.NoError(tc.putStoreLocked(s)) - } - re.Len(tc.getUpStores(), 5) - - testdata := []struct { - whiteList []string - maxRegionSize uint64 - updated bool - }{{ - whiteList: []string{}, - maxRegionSize: uint64(144), - updated: false, - }, { - whiteList: []string{"127.0.0.1:5"}, - maxRegionSize: uint64(10), - updated: true, - }} - - for _, v := range testdata { - tc.storeConfigManager = config.NewTestStoreConfigManager(v.whiteList) - re.Equal(uint64(144), tc.GetStoreConfig().GetRegionMaxSize()) - re.Equal(v.updated, syncConfig(tc.storeConfigManager, tc.GetStores())) - re.Equal(v.maxRegionSize, tc.GetStoreConfig().GetRegionMaxSize()) - } -} - func TestUpdateStorePendingPeerCount(t *testing.T) { re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) diff --git a/server/config/store_config.go b/server/config/store_config.go index 960ea6688e7..e322f3e122e 100644 --- a/server/config/store_config.go +++ b/server/config/store_config.go @@ -15,12 +15,14 @@ package config import ( + "context" "encoding/json" "fmt" "io" "net/http" "reflect" "sync/atomic" + "time" "github.com/pingcap/log" "github.com/tikv/pd/pkg/errs" @@ -41,6 +43,7 @@ var ( defaultRegionMaxKey = uint64(1440000) // default region split key is 960000 defaultRegionSplitKey = uint64(960000) + clientTimeout = 3 * time.Second ) // StoreConfig is the config of store like TiKV. @@ -191,8 +194,8 @@ func NewTestStoreConfigManager(whiteList []string) *StoreConfigManager { } // ObserveConfig is used to observe the config change. -func (m *StoreConfigManager) ObserveConfig(address string) error { - cfg, err := m.source.GetConfig(address) +func (m *StoreConfigManager) ObserveConfig(ctx context.Context, address string) error { + cfg, err := m.source.GetConfig(ctx, address) if err != nil { return err } @@ -222,7 +225,7 @@ func (m *StoreConfigManager) GetStoreConfig() *StoreConfig { // Source is used to get the store config. type Source interface { - GetConfig(statusAddress string) (*StoreConfig, error) + GetConfig(ctx context.Context, statusAddress string) (*StoreConfig, error) } // TiKVConfigSource is used to get the store config from TiKV. @@ -239,9 +242,15 @@ func newTiKVConfigSource(schema string, client *http.Client) *TiKVConfigSource { } // GetConfig returns the store config from TiKV. -func (s TiKVConfigSource) GetConfig(statusAddress string) (*StoreConfig, error) { +func (s TiKVConfigSource) GetConfig(ctx context.Context, statusAddress string) (*StoreConfig, error) { url := fmt.Sprintf("%s://%s/config", s.schema, statusAddress) - resp, err := s.client.Get(url) + ctx, cancel := context.WithTimeout(ctx, clientTimeout) + defer cancel() + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return nil, fmt.Errorf("failed to create store config http request: %w", err) + } + resp, err := s.client.Do(req) if err != nil { return nil, err } @@ -269,7 +278,7 @@ func newFakeSource(whiteList []string) *FakeSource { } // GetConfig returns the config. -func (f *FakeSource) GetConfig(url string) (*StoreConfig, error) { +func (f *FakeSource) GetConfig(_ context.Context, url string) (*StoreConfig, error) { if !slice.Contains(f.whiteList, url) { return nil, fmt.Errorf("[url:%s] is not in white list", url) } diff --git a/server/config/store_config_test.go b/server/config/store_config_test.go index 6916fedc929..3ab3e4900e3 100644 --- a/server/config/store_config_test.go +++ b/server/config/store_config_test.go @@ -15,6 +15,7 @@ package config import ( + "context" "crypto/tls" "encoding/json" "net/http" @@ -63,13 +64,13 @@ func TestTiKVConfig(t *testing.T) { func TestUpdateConfig(t *testing.T) { re := require.New(t) manager := NewTestStoreConfigManager([]string{"tidb.com"}) - manager.ObserveConfig("tikv.com") + manager.ObserveConfig(context.Background(), "tikv.com") re.Equal(uint64(144), manager.GetStoreConfig().GetRegionMaxSize()) - manager.ObserveConfig("tidb.com") + manager.ObserveConfig(context.Background(), "tidb.com") re.Equal(uint64(10), manager.GetStoreConfig().GetRegionMaxSize()) // case2: the config should not update if config is same expect some ignore field. - c, err := manager.source.GetConfig("tidb.com") + c, err := manager.source.GetConfig(context.Background(), "tidb.com") re.NoError(err) re.True(manager.GetStoreConfig().Equal(c)) From 31644accf0e59c0205c79534258c2a9a9584e869 Mon Sep 17 00:00:00 2001 From: bufferflies <1045931706@qq.com> Date: Mon, 14 Aug 2023 16:59:50 +0800 Subject: [PATCH 3/3] modify log Signed-off-by: bufferflies <1045931706@qq.com> --- server/cluster/cluster.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/cluster/cluster.go b/server/cluster/cluster.go index 540ebdb653b..f80138c92ff 100644 --- a/server/cluster/cluster.go +++ b/server/cluster/cluster.go @@ -325,7 +325,7 @@ func syncConfig(ctx context.Context, manager *config.StoreConfigManager, stores for index := 0; index < len(stores); index++ { select { case <-ctx.Done(): - log.Info("stop sync store config job due to server shutdown") + log.Info("stop sync store config job due to raft cluster exit") return false default: }