Skip to content

Commit

Permalink
Merge branch 'release-5.4' into cherry-pick-7108-to-release-5.4
Browse files Browse the repository at this point in the history
  • Loading branch information
HuSharp committed Sep 13, 2024
2 parents 78df3c2 + 41c6bc2 commit 3113929
Show file tree
Hide file tree
Showing 19 changed files with 180 additions and 63 deletions.
17 changes: 3 additions & 14 deletions .github/workflows/check.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,22 +4,11 @@ jobs:
statics:
runs-on: ubuntu-latest
steps:
- uses: actions/setup-go@v2
with:
go-version: 1.16
- name: Checkout code
uses: actions/checkout@v2
- name: Restore cache
uses: actions/cache@v2
uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
path: |
~/go/pkg/mod
~/.cache/go-build
**/.tools
**/.dashboard_download_cache
key: ${{ runner.os }}-golang-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-golang
go-version: 1.16
- name: Make Check
run: |
make build
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/label.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ jobs:
add_labels:
runs-on: ubuntu-latest
steps:
- uses: actions/github-script@v4
- uses: actions/github-script@v7
name: Add labels
with:
script: |
Expand Down
29 changes: 9 additions & 20 deletions .github/workflows/pd-tests.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -20,23 +20,11 @@ jobs:
outputs:
job-total: ${{ strategy.job-total }}
steps:
- uses: actions/setup-go@v2
with:
go-version: 1.16
- name: Checkout code
uses: actions/checkout@v2
- name: Restore cache
uses: actions/cache@v2
uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
path: |
~/go/pkg/mod
~/.cache/go-build
**/.tools
**/.dashboard_download_cache
key: ${{ runner.os }}-go-${{ matrix.worker_id }}-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-${{ matrix.worker_id }}
${{ runner.os }}-go-
go-version: 1.16
- name: Dispatch Packages
id: packages-units
env:
Expand All @@ -62,20 +50,21 @@ jobs:
mv covprofile covprofile_$WORKER_ID
sed -i "/failpoint_binding/d" covprofile_$WORKER_ID
- name: Upload coverage result ${{ matrix.worker_id }}
uses: actions/upload-artifact@v2
uses: actions/upload-artifact@v4
with:
name: cover-reports
name: cover-reports-${{ matrix.worker_id }}
path: covprofile_${{ matrix.worker_id }}
report-coverage:
needs: chunks
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
uses: actions/checkout@v4
- name: Download chunk report
uses: actions/download-artifact@v2
uses: actions/download-artifact@v4
with:
name: cover-reports
pattern: cover-reports-*
merge-multiple: true
- name: Merge
env:
TOTAL_JOBS: ${{needs.chunks.outputs.job-total}}
Expand Down
6 changes: 3 additions & 3 deletions .github/workflows/tso-consistency-test.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -8,10 +8,10 @@ jobs:
tso-consistency-test:
runs-on: ubuntu-latest
steps:
- uses: actions/setup-go@v2
- name: Checkout code
uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version: 1.16
- name: Checkout code
uses: actions/checkout@v2
- name: Make TSO Consistency Test
run: make test-tso-consistency
6 changes: 3 additions & 3 deletions .github/workflows/tso-function-test.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -12,10 +12,10 @@ jobs:
tso-function-test:
runs-on: ubuntu-latest
steps:
- uses: actions/setup-go@v2
- name: Checkout code
uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version: 1.16
- name: Checkout code
uses: actions/checkout@v2
- name: Make TSO Function Test
run: make test-tso-function
27 changes: 27 additions & 0 deletions OWNERS
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
- AndreMouche
- binshi-bing
- bufferflies
- CabinfeverB
- Connor1996
- disksing
- huachaohuang
- HunDunDM
- HuSharp
- JmPotato
- lhy1024
- nolouch
- overvenus
- qiuyesuifeng
- rleungx
- siddontang
- Yisaer
- zhouqiang-cl
reviewers:
- BusyJay
- howardlau1999
- Luffbee
- okJiang
- shafreeck
- xhebox
2 changes: 1 addition & 1 deletion pkg/mock/mockcluster/mockcluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -166,7 +166,7 @@ func (mc *Cluster) AllocPeer(storeID uint64) (*metapb.Peer, error) {
func (mc *Cluster) initRuleManager() {
if mc.RuleManager == nil {
mc.RuleManager = placement.NewRuleManager(core.NewStorage(kv.NewMemoryKV()), mc, mc.GetOpts())
mc.RuleManager.Initialize(int(mc.GetReplicationConfig().MaxReplicas), mc.GetReplicationConfig().LocationLabels)
mc.RuleManager.Initialize(int(mc.GetReplicationConfig().MaxReplicas), mc.GetReplicationConfig().LocationLabels, mc.GetReplicationConfig().IsolationLevel)
}
}

Expand Down
33 changes: 31 additions & 2 deletions server/api/label_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ import (
"github.com/pingcap/kvproto/pkg/pdpb"
"github.com/tikv/pd/server"
"github.com/tikv/pd/server/config"
"github.com/tikv/pd/server/core"
)

var _ = Suite(&testLabelsStoreSuite{})
Expand Down Expand Up @@ -260,19 +261,47 @@ func (s *testStrictlyLabelsStoreSuite) TestStoreMatch(c *C) {
valid: false,
expectError: "key matching the label was not found",
},
{
store: &metapb.Store{
Id: 3,
Address: "tiflash1",
State: metapb.StoreState_Up,
Labels: []*metapb.StoreLabel{
{
Key: "zone",
Value: "us-west-1",
},
{
Key: "disk",
Value: "ssd",
},
{
Key: core.EngineKey,
Value: core.EngineTiFlash,
},
},
Version: "3.0.0",
},
valid: true,
expectError: "placement rules is disabled",
},
}

for _, t := range cases {
_, err := s.grpcSvr.PutStore(context.Background(), &pdpb.PutStoreRequest{
Header: &pdpb.RequestHeader{ClusterId: s.svr.ClusterID()},
Store: &metapb.Store{
Id: t.store.Id,
Address: fmt.Sprintf("tikv%d", t.store.Id),
Address: t.store.Address,
State: t.store.State,
Labels: t.store.Labels,
Version: t.store.Version,
},
})
if t.store.Address == "tiflash1" {
c.Assert(strings.Contains(err.Error(), t.expectError), IsTrue)
continue
}
if t.valid {
c.Assert(err, IsNil)
} else {
Expand All @@ -287,7 +316,7 @@ func (s *testStrictlyLabelsStoreSuite) TestStoreMatch(c *C) {
Header: &pdpb.RequestHeader{ClusterId: s.svr.ClusterID()},
Store: &metapb.Store{
Id: t.store.Id,
Address: fmt.Sprintf("tikv%d", t.store.Id),
Address: t.store.Address,
State: t.store.State,
Labels: t.store.Labels,
Version: t.store.Version,
Expand Down
4 changes: 3 additions & 1 deletion server/api/operator_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -350,7 +350,9 @@ func (s *testTransferRegionOperatorSuite) TestTransferRegionWithPlacementRule(c
if tc.placementRuleEnable {
err := s.svr.GetRaftCluster().GetRuleManager().Initialize(
s.svr.GetRaftCluster().GetOpts().GetMaxReplicas(),
s.svr.GetRaftCluster().GetOpts().GetLocationLabels())
s.svr.GetRaftCluster().GetOpts().GetLocationLabels(),
s.svr.GetRaftCluster().GetOpts().GetIsolationLevel(),
)
c.Assert(err, IsNil)
}
if len(tc.rules) > 0 {
Expand Down
5 changes: 4 additions & 1 deletion server/cluster/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -240,7 +240,7 @@ func (c *RaftCluster) Start(s Server) error {

c.ruleManager = placement.NewRuleManager(c.storage, c, c.GetOpts())
if c.opt.IsPlacementRulesEnabled() {
err = c.ruleManager.Initialize(c.opt.GetMaxReplicas(), c.opt.GetLocationLabels())
err = c.ruleManager.Initialize(c.opt.GetMaxReplicas(), c.opt.GetLocationLabels(), c.opt.GetIsolationLevel())
if err != nil {
return err
}
Expand Down Expand Up @@ -1027,6 +1027,9 @@ func (c *RaftCluster) checkStoreLabels(s *core.StoreInfo) error {
}
for _, label := range s.GetLabels() {
key := label.GetKey()
if key == core.EngineKey {
continue
}
if _, ok := keysSet[key]; !ok {
log.Warn("not found the key match with the store label",
zap.Stringer("store", s.GetMeta()),
Expand Down
4 changes: 2 additions & 2 deletions server/cluster/cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -850,7 +850,7 @@ func (s *testClusterInfoSuite) TestOfflineAndMerge(c *C) {
storage := core.NewStorage(kv.NewMemoryKV())
cluster.ruleManager = placement.NewRuleManager(storage, cluster, cluster.GetOpts())
if opt.IsPlacementRulesEnabled() {
err := cluster.ruleManager.Initialize(opt.GetMaxReplicas(), opt.GetLocationLabels())
err := cluster.ruleManager.Initialize(opt.GetMaxReplicas(), opt.GetLocationLabels(), opt.GetIsolationLevel())
if err != nil {
panic(err)
}
Expand Down Expand Up @@ -1146,7 +1146,7 @@ func newTestCluster(ctx context.Context, opt *config.PersistOptions) *testCluste
rc := newTestRaftCluster(ctx, mockid.NewIDAllocator(), opt, storage, core.NewBasicCluster())
rc.ruleManager = placement.NewRuleManager(storage, rc, rc.GetOpts())
if opt.IsPlacementRulesEnabled() {
err := rc.ruleManager.Initialize(opt.GetMaxReplicas(), opt.GetLocationLabels())
err := rc.ruleManager.Initialize(opt.GetMaxReplicas(), opt.GetLocationLabels(), opt.GetIsolationLevel())
if err != nil {
panic(err)
}
Expand Down
7 changes: 7 additions & 0 deletions server/config/persist_options.go
Original file line number Diff line number Diff line change
Expand Up @@ -252,6 +252,13 @@ func (o *PersistOptions) SetSplitMergeInterval(splitMergeInterval time.Duration)
o.SetScheduleConfig(v)
}

// SetMaxStoreDownTime to set the max store down time. It's only used to test.
func (o *PersistOptions) SetMaxStoreDownTime(time time.Duration) {
v := o.GetScheduleConfig().Clone()
v.MaxStoreDownTime = typeutil.NewDuration(time)
o.SetScheduleConfig(v)
}

// SetStoreLimit sets a store limit for a given type and rate.
func (o *PersistOptions) SetStoreLimit(storeID uint64, typ storelimit.Type, ratePerMin float64) {
v := o.GetScheduleConfig().Clone()
Expand Down
5 changes: 4 additions & 1 deletion server/election/lease.go
Original file line number Diff line number Diff line change
Expand Up @@ -143,8 +143,11 @@ func (l *lease) keepAliveWorker(ctx context.Context, interval time.Duration) <-c
expire := start.Add(time.Duration(res.TTL) * time.Second)
select {
case ch <- expire:
case <-ctx1.Done():
// Here we don't use `ctx1.Done()` because we want to make sure if the keep alive success, we can update the expire time.
case <-ctx.Done():
}
} else {
log.Error("keep alive response ttl is zero", zap.String("purpose", l.Purpose))
}
}()

Expand Down
33 changes: 33 additions & 0 deletions server/election/lease_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,11 @@ package election

import (
"context"
"testing"
"time"

. "github.com/pingcap/check"
"github.com/stretchr/testify/require"
"github.com/tikv/pd/pkg/etcdutil"
"go.etcd.io/etcd/clientv3"
"go.etcd.io/etcd/embed"
Expand Down Expand Up @@ -104,3 +106,34 @@ func (s *testLeaseSuite) TestLease(c *C) {
time.Sleep((defaultLeaseTimeout + 1) * time.Second)
c.Check(lease1.IsExpired(), IsTrue)
}

func TestLeaseKeepAlive(t *testing.T) {
re := require.New(t)
cfg := etcdutil.NewTestSingleConfig()
etcd, err := embed.StartEtcd(cfg)
defer func() {
etcd.Close()
}()
re.NoError(err)

ep := cfg.LCUrls[0].String()
client, err := clientv3.New(clientv3.Config{
Endpoints: []string{ep},
})
re.NoError(err)

<-etcd.Server.ReadyNotify()

// Create the lease.
lease := &lease{
Purpose: "test_lease",
client: client,
lease: clientv3.NewLease(client),
}

re.NoError(lease.Grant(defaultLeaseTimeout))
ch := lease.keepAliveWorker(context.Background(), 2*time.Second)
time.Sleep(2 * time.Second)
<-ch
re.NoError(lease.Close())
}
3 changes: 2 additions & 1 deletion server/schedule/placement/rule_manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ func NewRuleManager(storage *core.Storage, storeSetInformer core.StoreSetInforme

// Initialize loads rules from storage. If Placement Rules feature is never enabled, it creates default rule that is
// compatible with previous configuration.
func (m *RuleManager) Initialize(maxReplica int, locationLabels []string) error {
func (m *RuleManager) Initialize(maxReplica int, locationLabels []string, isolationLevel string) error {
m.Lock()
defer m.Unlock()
if m.initialized {
Expand All @@ -83,6 +83,7 @@ func (m *RuleManager) Initialize(maxReplica int, locationLabels []string) error
Role: Voter,
Count: maxReplica,
LocationLabels: locationLabels,
IsolationLevel: isolationLevel,
}
if err := m.storage.SaveRule(defaultRule.StoreKey(), defaultRule); err != nil {
return err
Expand Down
7 changes: 4 additions & 3 deletions server/schedule/placement/rule_manager_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ package placement

import (
"encoding/hex"

. "github.com/pingcap/check"
"github.com/pingcap/kvproto/pkg/metapb"
"github.com/tikv/pd/pkg/codec"
Expand All @@ -34,7 +35,7 @@ func (s *testManagerSuite) SetUpTest(c *C) {
s.store = core.NewStorage(kv.NewMemoryKV())
var err error
s.manager = NewRuleManager(s.store, nil, nil)
err = s.manager.Initialize(3, []string{"zone", "rack", "host"})
err = s.manager.Initialize(3, []string{"zone", "rack", "host"}, "")
c.Assert(err, IsNil)
}

Expand Down Expand Up @@ -111,7 +112,7 @@ func (s *testManagerSuite) TestSaveLoad(c *C) {
}

m2 := NewRuleManager(s.store, nil, nil)
err := m2.Initialize(3, []string{"no", "labels"})
err := m2.Initialize(3, []string{"no", "labels"}, "")
c.Assert(err, IsNil)
c.Assert(m2.GetAllRules(), HasLen, 3)
c.Assert(m2.GetRule("pd", "default").String(), Equals, rules[0].String())
Expand All @@ -126,7 +127,7 @@ func (s *testManagerSuite) TestSetAfterGet(c *C) {
s.manager.SetRule(rule)

m2 := NewRuleManager(s.store, nil, nil)
err := m2.Initialize(100, []string{})
err := m2.Initialize(100, []string{}, "")
c.Assert(err, IsNil)
rule = m2.GetRule("pd", "default")
c.Assert(rule.Count, Equals, 1)
Expand Down
Loading

0 comments on commit 3113929

Please sign in to comment.