Skip to content

Commit

Permalink
tests: refactor and make pd-ctl helper support mcs (#7120)
Browse files Browse the repository at this point in the history
ref #5839

Signed-off-by: lhy1024 <[email protected]>

Co-authored-by: ti-chi-bot[bot] <108142056+ti-chi-bot[bot]@users.noreply.github.com>
  • Loading branch information
lhy1024 and ti-chi-bot[bot] authored Sep 21, 2023
1 parent e2f1269 commit 96ace89
Show file tree
Hide file tree
Showing 42 changed files with 289 additions and 270 deletions.
2 changes: 1 addition & 1 deletion tests/autoscaling/autoscaling_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ func TestAPI(t *testing.T) {
re.NoError(err)
cluster.WaitLeader()

leaderServer := cluster.GetServer(cluster.GetLeader())
leaderServer := cluster.GetLeaderServer()
re.NoError(leaderServer.BootstrapCluster())

var jsonStr = []byte(`
Expand Down
20 changes: 20 additions & 0 deletions tests/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ import (
"github.com/tikv/pd/pkg/errs"
"github.com/tikv/pd/pkg/id"
"github.com/tikv/pd/pkg/keyspace"
scheduling "github.com/tikv/pd/pkg/mcs/scheduling/server"
"github.com/tikv/pd/pkg/mcs/utils"
"github.com/tikv/pd/pkg/schedule/schedulers"
"github.com/tikv/pd/pkg/swaggerserver"
Expand Down Expand Up @@ -447,6 +448,7 @@ type TestCluster struct {
sync.Mutex
pool map[uint64]struct{}
}
schedulingCluster *TestSchedulingCluster
}

// ConfigOption is used to define customize settings in test.
Expand Down Expand Up @@ -629,6 +631,11 @@ func (c *TestCluster) GetFollower() string {
return ""
}

// GetLeaderServer returns the leader server of all servers
func (c *TestCluster) GetLeaderServer() *TestServer {
return c.GetServer(c.GetLeader())
}

// WaitLeader is used to get leader.
// If it exceeds the maximum number of loops, it will return an empty string.
func (c *TestCluster) WaitLeader(ops ...WaitOption) string {
Expand Down Expand Up @@ -853,6 +860,19 @@ func (c *TestCluster) CheckTSOUnique(ts uint64) bool {
return true
}

// GetSchedulingPrimaryServer returns the scheduling primary server.
func (c *TestCluster) GetSchedulingPrimaryServer() *scheduling.Server {
if c.schedulingCluster == nil {
return nil
}
return c.schedulingCluster.GetPrimaryServer()
}

// SetSchedulingCluster sets the scheduling cluster.
func (c *TestCluster) SetSchedulingCluster(cluster *TestSchedulingCluster) {
c.schedulingCluster = cluster
}

// WaitOp represent the wait configuration
type WaitOp struct {
retryTimes int
Expand Down
6 changes: 3 additions & 3 deletions tests/compatibility/version_upgrade_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ func TestStoreRegister(t *testing.T) {
err = cluster.RunInitialServers()
re.NoError(err)
cluster.WaitLeader()
leaderServer := cluster.GetServer(cluster.GetLeader())
leaderServer := cluster.GetLeaderServer()
re.NoError(leaderServer.BootstrapCluster())

putStoreRequest := &pdpb.PutStoreRequest{
Expand All @@ -63,7 +63,7 @@ func TestStoreRegister(t *testing.T) {
re.NoError(err)
cluster.WaitLeader()

leaderServer = cluster.GetServer(cluster.GetLeader())
leaderServer = cluster.GetLeaderServer()
re.NotNil(leaderServer)
newVersion := leaderServer.GetClusterVersion()
re.Equal(version, newVersion)
Expand Down Expand Up @@ -92,7 +92,7 @@ func TestRollingUpgrade(t *testing.T) {
err = cluster.RunInitialServers()
re.NoError(err)
cluster.WaitLeader()
leaderServer := cluster.GetServer(cluster.GetLeader())
leaderServer := cluster.GetLeaderServer()
re.NoError(leaderServer.BootstrapCluster())

stores := []*pdpb.PutStoreRequest{
Expand Down
2 changes: 1 addition & 1 deletion tests/dashboard/service_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ func (suite *dashboardTestSuite) testDashboard(internalProxy bool) {

cluster.WaitLeader()
servers := cluster.GetServers()
leader := cluster.GetServer(cluster.GetLeader())
leader := cluster.GetLeaderServer()
leaderAddr := leader.GetAddr()

// auto select node
Expand Down
12 changes: 6 additions & 6 deletions tests/integrations/client/client_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -347,7 +347,7 @@ func TestUnavailableTimeAfterLeaderIsReady(t *testing.T) {
go getTsoFunc()
go func() {
defer wg.Done()
leader := cluster.GetServer(cluster.GetLeader())
leader := cluster.GetLeaderServer()
leader.Stop()
re.NotEmpty(cluster.WaitLeader())
leaderReadyTime = time.Now()
Expand All @@ -362,7 +362,7 @@ func TestUnavailableTimeAfterLeaderIsReady(t *testing.T) {
go getTsoFunc()
go func() {
defer wg.Done()
leader := cluster.GetServer(cluster.GetLeader())
leader := cluster.GetLeaderServer()
re.NoError(failpoint.Enable("github.com/tikv/pd/client/unreachableNetwork", "return(true)"))
leader.Stop()
re.NotEmpty(cluster.WaitLeader())
Expand Down Expand Up @@ -596,7 +596,7 @@ func TestGetTsoFromFollowerClient2(t *testing.T) {
})

lastTS = checkTS(re, cli, lastTS)
re.NoError(cluster.GetServer(cluster.GetLeader()).ResignLeader())
re.NoError(cluster.GetLeaderServer().ResignLeader())
re.NotEmpty(cluster.WaitLeader())
lastTS = checkTS(re, cli, lastTS)

Expand All @@ -622,7 +622,7 @@ func runServer(re *require.Assertions, cluster *tests.TestCluster) []string {
err := cluster.RunInitialServers()
re.NoError(err)
re.NotEmpty(cluster.WaitLeader())
leaderServer := cluster.GetServer(cluster.GetLeader())
leaderServer := cluster.GetLeaderServer()
re.NoError(leaderServer.BootstrapCluster())

testServers := cluster.GetServers()
Expand Down Expand Up @@ -1439,7 +1439,7 @@ func TestPutGet(t *testing.T) {
getResp, err = client.Get(context.Background(), key)
re.NoError(err)
re.Equal([]byte("2"), getResp.GetKvs()[0].Value)
s := cluster.GetServer(cluster.GetLeader())
s := cluster.GetLeaderServer()
// use etcd client delete the key
_, err = s.GetEtcdClient().Delete(context.Background(), string(key))
re.NoError(err)
Expand All @@ -1459,7 +1459,7 @@ func TestClientWatchWithRevision(t *testing.T) {
endpoints := runServer(re, cluster)
client := setupCli(re, ctx, endpoints)
defer client.Close()
s := cluster.GetServer(cluster.GetLeader())
s := cluster.GetLeaderServer()
watchPrefix := "watch_test"
defer func() {
_, err := s.GetEtcdClient().Delete(context.Background(), watchPrefix+"test")
Expand Down
2 changes: 1 addition & 1 deletion tests/integrations/mcs/keyspace/tso_keyspace_group_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ func (suite *keyspaceGroupTestSuite) SetupTest() {
suite.NoError(err)
suite.NoError(cluster.RunInitialServers())
suite.NotEmpty(cluster.WaitLeader())
suite.server = cluster.GetServer(cluster.GetLeader())
suite.server = cluster.GetLeaderServer()
suite.NoError(suite.server.BootstrapCluster())
suite.backendEndpoints = suite.server.GetAddr()
suite.dialClient = &http.Client{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -903,7 +903,7 @@ func (suite *resourceManagerClientTestSuite) TestBasicResourceGroupCURD() {
// Test Resource Group CURD via HTTP
finalNum = 1
getAddr := func(i int) string {
server := suite.cluster.GetServer(suite.cluster.GetLeader())
server := suite.cluster.GetLeaderServer()
if i%2 == 1 {
server = suite.cluster.GetServer(suite.cluster.GetFollower())
}
Expand Down Expand Up @@ -1298,7 +1298,7 @@ func (suite *resourceManagerClientTestSuite) TestResourceGroupControllerConfigCh
}

getAddr := func() string {
server := suite.cluster.GetServer(suite.cluster.GetLeader())
server := suite.cluster.GetLeaderServer()
if rand.Intn(100)%2 == 1 {
server = suite.cluster.GetServer(suite.cluster.GetFollower())
}
Expand Down
2 changes: 1 addition & 1 deletion tests/integrations/mcs/scheduling/api_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ func (suite *apiTestSuite) SetupSuite() {
suite.NoError(err)
suite.NoError(cluster.RunInitialServers())
suite.NotEmpty(cluster.WaitLeader())
suite.server = cluster.GetServer(cluster.GetLeader())
suite.server = cluster.GetLeaderServer()
suite.NoError(suite.server.BootstrapCluster())
suite.backendEndpoints = suite.server.GetAddr()
suite.dialClient = &http.Client{
Expand Down
4 changes: 2 additions & 2 deletions tests/integrations/mcs/tso/keyspace_group_manager_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -517,7 +517,7 @@ func TestTwiceSplitKeyspaceGroup(t *testing.T) {
re.NoError(err)
defer tc.Destroy()
tc.WaitLeader()
leaderServer := tc.GetServer(tc.GetLeader())
leaderServer := tc.GetLeaderServer()
re.NoError(leaderServer.BootstrapCluster())

tsoCluster, err := tests.NewTestTSOCluster(ctx, 2, pdAddr)
Expand Down Expand Up @@ -711,7 +711,7 @@ func TestGetTSOImmediately(t *testing.T) {
re.NoError(err)
defer tc.Destroy()
tc.WaitLeader()
leaderServer := tc.GetServer(tc.GetLeader())
leaderServer := tc.GetLeaderServer()
re.NoError(leaderServer.BootstrapCluster())

tsoCluster, err := tests.NewTestTSOCluster(ctx, 2, pdAddr)
Expand Down
2 changes: 1 addition & 1 deletion tests/integrations/tso/client_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -389,7 +389,7 @@ func (suite *tsoClientTestSuite) TestRandomShutdown() {
if !suite.legacy {
suite.tsoCluster.WaitForDefaultPrimaryServing(re).Close()
} else {
suite.cluster.GetServer(suite.cluster.GetLeader()).GetServer().Close()
suite.cluster.GetLeaderServer().GetServer().Close()
}
time.Sleep(time.Duration(n) * time.Second)
}
Expand Down
2 changes: 1 addition & 1 deletion tests/pdctl/cluster/cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ func TestClusterAndPing(t *testing.T) {
err = cluster.RunInitialServers()
re.NoError(err)
cluster.WaitLeader()
err = cluster.GetServer(cluster.GetLeader()).BootstrapCluster()
err = cluster.GetLeaderServer().BootstrapCluster()
re.NoError(err)
pdAddr := cluster.GetConfig().GetClientURL()
i := strings.Index(pdAddr, "//")
Expand Down
36 changes: 15 additions & 21 deletions tests/pdctl/config/config_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -64,10 +64,10 @@ func TestConfig(t *testing.T) {
Id: 1,
State: metapb.StoreState_Up,
}
leaderServer := cluster.GetServer(cluster.GetLeader())
leaderServer := cluster.GetLeaderServer()
re.NoError(leaderServer.BootstrapCluster())
svr := leaderServer.GetServer()
pdctl.MustPutStore(re, svr, store)
tests.MustPutStore(re, cluster, store)
defer cluster.Destroy()

// config show
Expand Down Expand Up @@ -300,10 +300,9 @@ func TestPlacementRules(t *testing.T) {
State: metapb.StoreState_Up,
LastHeartbeat: time.Now().UnixNano(),
}
leaderServer := cluster.GetServer(cluster.GetLeader())
leaderServer := cluster.GetLeaderServer()
re.NoError(leaderServer.BootstrapCluster())
svr := leaderServer.GetServer()
pdctl.MustPutStore(re, svr, store)
tests.MustPutStore(re, cluster, store)
defer cluster.Destroy()

output, err := pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "enable")
Expand Down Expand Up @@ -358,7 +357,7 @@ func TestPlacementRules(t *testing.T) {
re.Equal([2]string{"pd", "test1"}, rules2[1].Key())

// test rule region detail
pdctl.MustPutRegion(re, cluster, 1, 1, []byte("a"), []byte("b"))
tests.MustPutRegion(re, cluster, 1, 1, []byte("a"), []byte("b"))
fit := &placement.RegionFit{}
// need clear up args, so create new a cobra.Command. Otherwise gourp still exists.
cmd2 := pdctlCmd.GetRootCmd()
Expand Down Expand Up @@ -398,10 +397,9 @@ func TestPlacementRuleGroups(t *testing.T) {
State: metapb.StoreState_Up,
LastHeartbeat: time.Now().UnixNano(),
}
leaderServer := cluster.GetServer(cluster.GetLeader())
leaderServer := cluster.GetLeaderServer()
re.NoError(leaderServer.BootstrapCluster())
svr := leaderServer.GetServer()
pdctl.MustPutStore(re, svr, store)
tests.MustPutStore(re, cluster, store)
defer cluster.Destroy()

output, err := pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "enable")
Expand Down Expand Up @@ -473,10 +471,9 @@ func TestPlacementRuleBundle(t *testing.T) {
State: metapb.StoreState_Up,
LastHeartbeat: time.Now().UnixNano(),
}
leaderServer := cluster.GetServer(cluster.GetLeader())
leaderServer := cluster.GetLeaderServer()
re.NoError(leaderServer.BootstrapCluster())
svr := leaderServer.GetServer()
pdctl.MustPutStore(re, svr, store)
tests.MustPutStore(re, cluster, store)
defer cluster.Destroy()

output, err := pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "enable")
Expand Down Expand Up @@ -609,10 +606,9 @@ func TestReplicationMode(t *testing.T) {
State: metapb.StoreState_Up,
LastHeartbeat: time.Now().UnixNano(),
}
leaderServer := cluster.GetServer(cluster.GetLeader())
leaderServer := cluster.GetLeaderServer()
re.NoError(leaderServer.BootstrapCluster())
svr := leaderServer.GetServer()
pdctl.MustPutStore(re, svr, store)
tests.MustPutStore(re, cluster, store)
defer cluster.Destroy()

conf := config.ReplicationModeConfig{
Expand Down Expand Up @@ -668,10 +664,9 @@ func TestUpdateDefaultReplicaConfig(t *testing.T) {
Id: 1,
State: metapb.StoreState_Up,
}
leaderServer := cluster.GetServer(cluster.GetLeader())
leaderServer := cluster.GetLeaderServer()
re.NoError(leaderServer.BootstrapCluster())
svr := leaderServer.GetServer()
pdctl.MustPutStore(re, svr, store)
tests.MustPutStore(re, cluster, store)
defer cluster.Destroy()

checkMaxReplicas := func(expect uint64) {
Expand Down Expand Up @@ -813,10 +808,9 @@ func TestPDServerConfig(t *testing.T) {
State: metapb.StoreState_Up,
LastHeartbeat: time.Now().UnixNano(),
}
leaderServer := cluster.GetServer(cluster.GetLeader())
leaderServer := cluster.GetLeaderServer()
re.NoError(leaderServer.BootstrapCluster())
svr := leaderServer.GetServer()
pdctl.MustPutStore(re, svr, store)
tests.MustPutStore(re, cluster, store)
defer cluster.Destroy()

output, err := pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "show", "server")
Expand Down
2 changes: 1 addition & 1 deletion tests/pdctl/health/health_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ func TestHealth(t *testing.T) {
err = tc.RunInitialServers()
re.NoError(err)
tc.WaitLeader()
leaderServer := tc.GetServer(tc.GetLeader())
leaderServer := tc.GetLeaderServer()
re.NoError(leaderServer.BootstrapCluster())
pdAddr := tc.GetConfig().GetClientURL()
cmd := pdctlCmd.GetRootCmd()
Expand Down
Loading

0 comments on commit 96ace89

Please sign in to comment.