From f666fdc62bc35fac541f9c0a074b9c2739e83a9e Mon Sep 17 00:00:00 2001 From: husharp Date: Wed, 3 Apr 2024 16:58:14 +0800 Subject: [PATCH] add ci Signed-off-by: husharp --- .github/workflows/pd-tests.yaml | 20 ++- Makefile | 9 +- client/Makefile | 22 +++- client/testutil/tempurl.go | 2 +- pkg/utils/tempurl/tempurl.go | 2 +- scripts/ci-subtask.sh | 53 ++------ tests/server/api/api_test.go | 162 ++++++++++++++---------- tests/server/api/rule_test.go | 43 ++----- tests/server/cluster/cluster_test.go | 7 +- tools/Makefile | 2 +- tools/go.mod | 2 +- tools/pd-ut/README.md | 5 +- tools/pd-ut/coverProfile.go | 176 +++++++++++++++++++++++++++ tools/pd-ut/ut.go | 52 ++++++-- 14 files changed, 388 insertions(+), 169 deletions(-) create mode 100644 tools/pd-ut/coverProfile.go diff --git a/.github/workflows/pd-tests.yaml b/.github/workflows/pd-tests.yaml index 3674e41cf8a2..022b4e627c90 100644 --- a/.github/workflows/pd-tests.yaml +++ b/.github/workflows/pd-tests.yaml @@ -25,9 +25,19 @@ jobs: strategy: fail-fast: true matrix: - worker_id: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] + include: + - worker_id: 1 + name: 'Unit Test' + - worker_id: 2 + name: 'Tools Test' + - worker_id: 3 + name: 'Client Integration Test' + - worker_id: 4 + name: 'TSO Integration Test' + - worker_id: 5 + name: 'MicroService Integration Test' outputs: - job-total: 13 + job-total: 5 steps: - uses: actions/setup-go@v3 with: @@ -43,11 +53,11 @@ jobs: **/.tools **/.dashboard_download_cache key: ${{ runner.os }}-go-${{ matrix.worker_id }}-${{ hashFiles('**/go.sum') }} - - name: Make Test + - name: ${{ matrix.name }} env: WORKER_ID: ${{ matrix.worker_id }} - WORKER_COUNT: 13 - JOB_COUNT: 9 # 10 is tools test, 11, 12, 13 are for other integrations jobs + WORKER_COUNT: 5 + JOB_COUNT: 5 run: | make ci-test-job JOB_COUNT=$(($JOB_COUNT)) JOB_INDEX=$WORKER_ID mv covprofile covprofile_$WORKER_ID diff --git a/Makefile b/Makefile index d78ddcdd65ed..8dcf91459a2e 100644 --- a/Makefile +++ b/Makefile @@ -127,7 +127,7 @@ regions-dump: stores-dump: cd tools && CGO_ENABLED=0 go build -gcflags '$(GCFLAGS)' -ldflags '$(LDFLAGS)' -o $(BUILD_BIN_PATH)/stores-dump stores-dump/main.go pd-ut: pd-xprog - cd tools && GOEXPERIMENT=$(BUILD_GOEXPERIMENT) CGO_ENABLED=$(BUILD_TOOL_CGO_ENABLED) go build -gcflags '$(GCFLAGS)' -ldflags '$(LDFLAGS)' -o $(BUILD_BIN_PATH)/pd-ut pd-ut/ut.go + cd tools && GOEXPERIMENT=$(BUILD_GOEXPERIMENT) CGO_ENABLED=$(BUILD_TOOL_CGO_ENABLED) go build -gcflags '$(GCFLAGS)' -ldflags '$(LDFLAGS)' -o $(BUILD_BIN_PATH)/pd-ut pd-ut/ut.go pd-ut/coverProfile.go pd-xprog: cd tools && GOEXPERIMENT=$(BUILD_GOEXPERIMENT) CGO_ENABLED=$(BUILD_TOOL_CGO_ENABLED) go build -tags xprog -gcflags '$(GCFLAGS)' -ldflags '$(LDFLAGS)' -o $(BUILD_BIN_PATH)/xprog pd-ut/xprog.go @@ -245,18 +245,19 @@ SUBMODULES := $(filter $(shell find . -iname "go.mod" -exec dirname {} \;),\ test: install-tools # testing all pkgs... @$(FAILPOINT_ENABLE) - CGO_ENABLED=1 go test -tags tso_function_test,deadlock -timeout 20m -race -cover $(TEST_PKGS) || { $(FAILPOINT_DISABLE); exit 1; } + CGO_ENABLED=1 go test -tags tso_function_test,deadlock -timeout 20m -race -cover $(TEST_PKGS) -coverprofile=all || { $(FAILPOINT_DISABLE); exit 1; } @$(FAILPOINT_DISABLE) basic-test: install-tools # testing basic pkgs... @$(FAILPOINT_ENABLE) - go test $(BASIC_TEST_PKGS) || { $(FAILPOINT_DISABLE); exit 1; } + CGO_ENABLED=1 go test -timeout 20m -race -cover $(BASIC_TEST_PKGS) -coverprofile=basic || { $(FAILPOINT_DISABLE); exit 1; } @$(FAILPOINT_DISABLE) -ci-test-job: install-tools dashboard-ui +ci-test-job: install-tools dashboard-ui pd-ut @$(FAILPOINT_ENABLE) ./scripts/ci-subtask.sh $(JOB_COUNT) $(JOB_INDEX) || { $(FAILPOINT_DISABLE); exit 1; } + @$(CLEAN_UT_BINARY) @$(FAILPOINT_DISABLE) TSO_INTEGRATION_TEST_PKGS := $(PD_PKG)/tests/server/tso diff --git a/client/Makefile b/client/Makefile index dae53222d923..818ff9c00559 100644 --- a/client/Makefile +++ b/client/Makefile @@ -12,20 +12,30 @@ # See the License for the specific language governing permissions and # limitations under the License. +ROOT_PATH := .. GO_TOOLS_BIN_PATH := $(shell pwd)/../.tools/bin PATH := $(GO_TOOLS_BIN_PATH):$(PATH) SHELL := env PATH='$(PATH)' GOBIN='$(GO_TOOLS_BIN_PATH)' $(shell which bash) default: static tidy test -test: - CGO_ENABLE=1 go test ./... -race -cover +test: failpoint-enable + CGO_ENABLE=1 go test ./... -race -cover || { $(MAKE) failpoint-disable && exit 1; } + $(MAKE) failpoint-disable -basic-test: - CGO_ENABLE=1 go test ./... +basic-test: failpoint-enable + CGO_ENABLE=1 go test ./... || { $(MAKE) failpoint-disable && exit 1; } + $(MAKE) failpoint-disable -ci-test-job: - CGO_ENABLED=1 go test ./... -race -covermode=atomic -coverprofile=covprofile -coverpkg=../... github.com/tikv/pd/client +ci-test-job: failpoint-enable + CGO_ENABLED=1 go test ./... -v -tags deadlock -race -cover -covermode=atomic -coverprofile=covprofile -coverpkg=../... || { $(MAKE) failpoint-disable && exit 1; } + $(MAKE) failpoint-disable + +failpoint-enable: + cd $(ROOT_PATH) && $(MAKE) failpoint-enable + +failpoint-disable: + cd $(ROOT_PATH) && $(MAKE) failpoint-disable install-tools: cd .. && $(MAKE) install-tools diff --git a/client/testutil/tempurl.go b/client/testutil/tempurl.go index ac8f29fa3452..fbb072b95474 100644 --- a/client/testutil/tempurl.go +++ b/client/testutil/tempurl.go @@ -42,7 +42,7 @@ func Alloc() string { } func tryAllocTestURL() string { - l, err := net.Listen("tcp", "127.0.0.1:0") + l, err := net.Listen("tcp", "127.0.0.1:") if err != nil { log.Fatal("listen failed", zap.Error(err)) } diff --git a/pkg/utils/tempurl/tempurl.go b/pkg/utils/tempurl/tempurl.go index 421513ff0016..e39a17cb1b49 100644 --- a/pkg/utils/tempurl/tempurl.go +++ b/pkg/utils/tempurl/tempurl.go @@ -42,7 +42,7 @@ func Alloc() string { } func tryAllocTestURL() string { - l, err := net.Listen("tcp", "127.0.0.1:0") + l, err := net.Listen("tcp", "127.0.0.1:") if err != nil { log.Fatal("listen failed", errs.ZapError(err)) } diff --git a/scripts/ci-subtask.sh b/scripts/ci-subtask.sh index b9006dda5039..fcc279a9e1c5 100755 --- a/scripts/ci-subtask.sh +++ b/scripts/ci-subtask.sh @@ -4,10 +4,10 @@ ROOT_PATH=../../ -if [[ $2 -gt 9 ]]; then - # run tools tests - if [[ $2 -eq 10 ]]; then - cd ./tools && make ci-test-job && cd .. && cat ./covprofile >> covprofile || exit 1 +if [[ $2 -gt 1 ]]; then + # run tools tests in task 2 + if [[ $2 -eq 2 ]]; then + cd ./tools && make ci-test-job && cat covprofile >> ../covprofile && cd .. || exit 1 exit fi @@ -15,51 +15,16 @@ if [[ $2 -gt 9 ]]; then integrations_dir=./tests/integrations integrations_tasks=($(find "$integrations_dir" -mindepth 1 -maxdepth 1 -type d)) for t in "${integrations_tasks[@]}"; do - if [[ "$t" = "$integrations_dir/client" && $2 -eq 11 ]]; then - cd ./client && make ci-test-job && cd .. && cat ./covprofile >> covprofile || exit 1 + if [[ "$t" = "$integrations_dir/client" && $2 -eq 3 ]]; then + cd ./client && make ci-test-job && cat covprofile >> ../covprofile && cd .. || exit 1 cd $integrations_dir && make ci-test-job test_name=client && cat ./client/covprofile >> "$ROOT_PATH/covprofile" || exit 1 - elif [[ "$t" = "$integrations_dir/tso" && $2 -eq 12 ]]; then + elif [[ "$t" = "$integrations_dir/tso" && $2 -eq 4 ]]; then cd $integrations_dir && make ci-test-job test_name=tso && cat ./tso/covprofile >> "$ROOT_PATH/covprofile" || exit 1 - elif [[ "$t" = "$integrations_dir/mcs" && $2 -eq 13 ]]; then + elif [[ "$t" = "$integrations_dir/mcs" && $2 -eq 5 ]]; then cd $integrations_dir && make ci-test-job test_name=mcs && cat ./mcs/covprofile >> "$ROOT_PATH/covprofile" || exit 1 fi done else - # Get package test list. - packages=($(go list ./...)) - dirs=($(find . -iname "*_test.go" -exec dirname {} \; | sort -u | sed -e "s/^\./github.com\/tikv\/pd/")) - tasks=($(comm -12 <(printf "%s\n" "${packages[@]}") <(printf "%s\n" "${dirs[@]}"))) - weight() { - [[ $1 == "github.com/tikv/pd/server/api" ]] && return 30 - [[ $1 == "github.com/tikv/pd/pkg/schedule" ]] && return 30 - [[ $1 == "github.com/tikv/pd/pkg/core" ]] && return 30 - [[ $1 == "github.com/tikv/pd/tests/server/api" ]] && return 30 - [[ $1 =~ "pd/tests" ]] && return 5 - return 1 - } - - # Create an associative array to store the weight of each task. - declare -A task_weights - for t in ${tasks[@]}; do - weight $t - task_weights[$t]=$? - done - - # Sort tasks by weight in descending order. - tasks=($(printf "%s\n" "${tasks[@]}" | sort -rn)) - - scores=($(seq "$1" | xargs -I{} echo 0)) - - res=() - for t in ${tasks[@]}; do - min_i=0 - for i in ${!scores[@]}; do - [[ ${scores[i]} -lt ${scores[$min_i]} ]] && min_i=$i - done - scores[$min_i]=$((${scores[$min_i]} + ${task_weights[$t]})) - [[ $(($min_i + 1)) -eq $2 ]] && res+=($t) - done - - CGO_ENABLED=1 go test -timeout=15m -tags deadlock -race -covermode=atomic -coverprofile=covprofile -coverpkg=./... ${res[@]} + ./bin/pd-ut run --race --coverprofile covprofile fi diff --git a/tests/server/api/api_test.go b/tests/server/api/api_test.go index b70c688993d6..e09fb83cc18a 100644 --- a/tests/server/api/api_test.go +++ b/tests/server/api/api_test.go @@ -755,9 +755,9 @@ func TestRemovingProgress(t *testing.T) { // no store removing output := sendRequest(re, leader.GetAddr()+"/pd/api/v1/stores/progress?action=removing", http.MethodGet, http.StatusNotFound) - re.Contains((string(output)), "no progress found for the action") + re.Contains(string(output), "no progress found for the action") output = sendRequest(re, leader.GetAddr()+"/pd/api/v1/stores/progress?id=2", http.MethodGet, http.StatusNotFound) - re.Contains((string(output)), "no progress found for the given store ID") + re.Contains(string(output), "no progress found for the given store ID") // remove store 1 and store 2 _ = sendRequest(re, leader.GetAddr()+"/pd/api/v1/store/1", http.MethodDelete, http.StatusOK) @@ -776,32 +776,52 @@ func TestRemovingProgress(t *testing.T) { tests.MustPutRegion(re, cluster, 1000, 1, []byte("a"), []byte("b"), core.SetApproximateSize(20)) tests.MustPutRegion(re, cluster, 1001, 2, []byte("c"), []byte("d"), core.SetApproximateSize(10)) - // is not prepared - time.Sleep(2 * time.Second) - output = sendRequest(re, leader.GetAddr()+"/pd/api/v1/stores/progress?action=removing", http.MethodGet, http.StatusOK) - re.NoError(json.Unmarshal(output, &p)) - re.Equal("removing", p.Action) - re.Equal(0.0, p.Progress) - re.Equal(0.0, p.CurrentSpeed) - re.Equal(math.MaxFloat64, p.LeftSeconds) + testutil.Eventually(re, func() bool { + if leader.GetRaftCluster().IsPrepared() { + // wait for cluster started + url := leader.GetAddr() + "/pd/api/v1/stores/progress?action=removing" + req, _ := http.NewRequest(http.MethodGet, url, http.NoBody) + resp, err := dialClient.Do(req) + re.NoError(err) + defer resp.Body.Close() + return resp.StatusCode == http.StatusOK + } + // is not prepared + output = sendRequest(re, leader.GetAddr()+"/pd/api/v1/stores/progress?action=removing", http.MethodGet, http.StatusOK) + re.NoError(json.Unmarshal(output, &p)) + re.Equal("removing", p.Action) + re.Equal(0.0, p.Progress) + re.Equal(0.0, p.CurrentSpeed) + re.Equal(math.MaxFloat64, p.LeftSeconds) + return false + }) - leader.GetRaftCluster().SetPrepared() - time.Sleep(2 * time.Second) - output = sendRequest(re, leader.GetAddr()+"/pd/api/v1/stores/progress?action=removing", http.MethodGet, http.StatusOK) - re.NoError(json.Unmarshal(output, &p)) - re.Equal("removing", p.Action) - // store 1: (60-20)/(60+50) ~= 0.36 - // store 2: (30-10)/(30+40) ~= 0.28 - // average progress ~= (0.36+0.28)/2 = 0.32 - re.Equal("0.32", fmt.Sprintf("%.2f", p.Progress)) - // store 1: 40/10s = 4 - // store 2: 20/10s = 2 - // average speed = (2+4)/2 = 33 - re.Equal(3.0, p.CurrentSpeed) - // store 1: (20+50)/4 = 17.5s - // store 2: (10+40)/2 = 25s - // average time = (17.5+25)/2 = 21.25s - re.Equal(21.25, p.LeftSeconds) + testutil.Eventually(re, func() bool { + output = sendRequest(re, leader.GetAddr()+"/pd/api/v1/stores/progress?action=removing", http.MethodGet, http.StatusOK) + re.NoError(json.Unmarshal(output, &p)) + if p.Action != "removing" { + return false + } + // store 1: (60-20)/(60+50) ~= 0.36 + // store 2: (30-10)/(30+40) ~= 0.28 + // average progress ~= (0.36+0.28)/2 = 0.32 + if fmt.Sprintf("%.2f", p.Progress) != "0.32" { + return false + } + // store 1: 40/10s = 4 + // store 2: 20/10s = 2 + // average speed = (2+4)/2 = 33 + if p.CurrentSpeed != 3.0 { + return false + } + // store 1: (20+50)/4 = 17.5s + // store 2: (10+40)/2 = 25s + // average time = (17.5+25)/2 = 21.25s + if p.LeftSeconds != 21.25 { + return false + } + return true + }) output = sendRequest(re, leader.GetAddr()+"/pd/api/v1/stores/progress?id=2", http.MethodGet, http.StatusOK) re.NoError(json.Unmarshal(output, &p)) @@ -929,47 +949,67 @@ func TestPreparingProgress(t *testing.T) { } // no store preparing output := sendRequest(re, leader.GetAddr()+"/pd/api/v1/stores/progress?action=preparing", http.MethodGet, http.StatusNotFound) - re.Contains((string(output)), "no progress found for the action") + re.Contains(string(output), "no progress found for the action") output = sendRequest(re, leader.GetAddr()+"/pd/api/v1/stores/progress?id=4", http.MethodGet, http.StatusNotFound) - re.Contains((string(output)), "no progress found for the given store ID") - - // is not prepared - time.Sleep(2 * time.Second) - output = sendRequest(re, leader.GetAddr()+"/pd/api/v1/stores/progress?action=preparing", http.MethodGet, http.StatusNotFound) - re.Contains((string(output)), "no progress found for the action") - output = sendRequest(re, leader.GetAddr()+"/pd/api/v1/stores/progress?id=4", http.MethodGet, http.StatusNotFound) - re.Contains((string(output)), "no progress found for the given store ID") + re.Contains(string(output), "no progress found for the given store ID") + + testutil.Eventually(re, func() bool { + if leader.GetRaftCluster().IsPrepared() { + // wait for cluster started + url := leader.GetAddr() + "/pd/api/v1/stores/progress?action=preparing" + req, _ := http.NewRequest(http.MethodGet, url, http.NoBody) + resp, err := dialClient.Do(req) + re.NoError(err) + defer resp.Body.Close() + return resp.StatusCode == http.StatusOK + } + // is not prepared + output = sendRequest(re, leader.GetAddr()+"/pd/api/v1/stores/progress?action=preparing", http.MethodGet, http.StatusNotFound) + re.Contains(string(output), "no progress found for the action") + output = sendRequest(re, leader.GetAddr()+"/pd/api/v1/stores/progress?id=4", http.MethodGet, http.StatusNotFound) + re.Contains(string(output), "no progress found for the given store ID") + return false + }) - // size is not changed. - leader.GetRaftCluster().SetPrepared() - time.Sleep(2 * time.Second) - output = sendRequest(re, leader.GetAddr()+"/pd/api/v1/stores/progress?action=preparing", http.MethodGet, http.StatusOK) var p api.Progress - re.NoError(json.Unmarshal(output, &p)) - re.Equal("preparing", p.Action) - re.Equal(0.0, p.Progress) - re.Equal(0.0, p.CurrentSpeed) - re.Equal(math.MaxFloat64, p.LeftSeconds) + testutil.Eventually(re, func() bool { + output = sendRequest(re, leader.GetAddr()+"/pd/api/v1/stores/progress?action=preparing", http.MethodGet, http.StatusOK) + re.NoError(json.Unmarshal(output, &p)) + if p.Action != "preparing" || p.Progress != 0.0 || p.CurrentSpeed != 0.0 || p.LeftSeconds != math.MaxFloat64 { + return false + } + return true + }) // update size tests.MustPutRegion(re, cluster, 1000, 4, []byte(fmt.Sprintf("%20d", 1000)), []byte(fmt.Sprintf("%20d", 1001)), core.SetApproximateSize(10)) tests.MustPutRegion(re, cluster, 1001, 5, []byte(fmt.Sprintf("%20d", 1001)), []byte(fmt.Sprintf("%20d", 1002)), core.SetApproximateSize(40)) - time.Sleep(2 * time.Second) - output = sendRequest(re, leader.GetAddr()+"/pd/api/v1/stores/progress?action=preparing", http.MethodGet, http.StatusOK) - re.NoError(json.Unmarshal(output, &p)) - re.Equal("preparing", p.Action) - // store 4: 10/(210*0.9) ~= 0.05 - // store 5: 40/(210*0.9) ~= 0.21 - // average progress ~= (0.05+0.21)/2 = 0.13 - re.Equal("0.13", fmt.Sprintf("%.2f", p.Progress)) - // store 4: 10/10s = 1 - // store 5: 40/10s = 4 - // average speed = (1+4)/2 = 2.5 - re.Equal(2.5, p.CurrentSpeed) - // store 4: 179/1 ~= 179 - // store 5: 149/4 ~= 37.25 - // average time ~= (179+37.25)/2 = 108.125 - re.Equal(108.125, p.LeftSeconds) + testutil.Eventually(re, func() bool { + output := sendRequest(re, leader.GetAddr()+"/pd/api/v1/stores/progress?action=preparing", http.MethodGet, http.StatusOK) + re.NoError(json.Unmarshal(output, &p)) + if p.Action != "preparing" { + return false + } + // store 4: 10/(210*0.9) ~= 0.05 + // store 5: 40/(210*0.9) ~= 0.21 + // average progress ~= (0.05+0.21)/2 = 0.13 + if fmt.Sprintf("%.2f", p.Progress) != "0.13" { + return false + } + // store 4: 10/10s = 1 + // store 5: 40/10s = 4 + // average speed = (1+4)/2 = 2.5 + if p.CurrentSpeed != 2.5 { + return false + } + // store 4: 179/1 ~= 179 + // store 5: 149/4 ~= 37.25 + // average time ~= (179+37.25)/2 = 108.125 + if p.LeftSeconds != 108.125 { + return false + } + return true + }) output = sendRequest(re, leader.GetAddr()+"/pd/api/v1/stores/progress?id=4", http.MethodGet, http.StatusOK) re.NoError(json.Unmarshal(output, &p)) diff --git a/tests/server/api/rule_test.go b/tests/server/api/rule_test.go index 912ff83e8d51..a99236992436 100644 --- a/tests/server/api/rule_test.go +++ b/tests/server/api/rule_test.go @@ -32,7 +32,7 @@ import ( "github.com/tikv/pd/pkg/errs" "github.com/tikv/pd/pkg/schedule/labeler" "github.com/tikv/pd/pkg/schedule/placement" - "github.com/tikv/pd/pkg/utils/etcdutil" + "github.com/tikv/pd/pkg/utils/apiutil" "github.com/tikv/pd/pkg/utils/syncutil" tu "github.com/tikv/pd/pkg/utils/testutil" "github.com/tikv/pd/server/config" @@ -1200,34 +1200,6 @@ func (suite *ruleTestSuite) checkConcurrencyWith(cluster *tests.TestCluster, }) } -func (suite *ruleTestSuite) TestLargeRules() { - suite.env.RunTestInTwoModes(suite.checkLargeRules) -} - -func (suite *ruleTestSuite) checkLargeRules(cluster *tests.TestCluster) { - leaderServer := cluster.GetLeaderServer() - pdAddr := leaderServer.GetAddr() - urlPrefix := fmt.Sprintf("%s%s/api/v1", pdAddr, apiPrefix) - genBundlesWithRulesNum := func(num int) []placement.GroupBundle { - bundle := []placement.GroupBundle{ - { - ID: "1", - Index: 1, - Rules: make([]*placement.Rule, 0), - }, - } - for i := 0; i < num; i++ { - bundle[0].Rules = append(bundle[0].Rules, &placement.Rule{ - ID: strconv.Itoa(i), Index: i, Role: placement.Voter, Count: 1, GroupID: "1", - StartKey: []byte(strconv.Itoa(i)), EndKey: []byte(strconv.Itoa(i + 1)), - }) - } - return bundle - } - suite.postAndCheckRuleBundle(urlPrefix, genBundlesWithRulesNum(etcdutil.MaxEtcdTxnOps/2)) - suite.postAndCheckRuleBundle(urlPrefix, genBundlesWithRulesNum(etcdutil.MaxEtcdTxnOps*2)) -} - func (suite *ruleTestSuite) assertBundleEqual(re *require.Assertions, b1, b2 placement.GroupBundle) { tu.Eventually(re, func() bool { return suite.compareBundle(b1, b2) @@ -1265,8 +1237,19 @@ func (suite *ruleTestSuite) postAndCheckRuleBundle(urlPrefix string, bundle []pl re.NoError(err) tu.Eventually(re, func() bool { + // wait for cluster started + url := urlPrefix + "/config/placement-rule" + resp, err := apiutil.GetJSON(testDialClient, url, nil) + if err != nil { + return false + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return false + } + respBundle := make([]placement.GroupBundle, 0) - err = tu.CheckGetJSON(testDialClient, urlPrefix+"/config/placement-rule", nil, + err = tu.CheckGetJSON(testDialClient, url, nil, tu.StatusOK(re), tu.ExtractJSON(re, &respBundle)) re.NoError(err) if len(respBundle) != len(bundle) { diff --git a/tests/server/cluster/cluster_test.go b/tests/server/cluster/cluster_test.go index 3415c22a77b2..1f5e83f34d4b 100644 --- a/tests/server/cluster/cluster_test.go +++ b/tests/server/cluster/cluster_test.go @@ -753,20 +753,19 @@ func TestConcurrentHandleRegion(t *testing.T) { re.NoError(err) peerID, err := id.Alloc() re.NoError(err) - regionID, err := id.Alloc() - re.NoError(err) peer := &metapb.Peer{Id: peerID, StoreId: store.GetId()} regionReq := &pdpb.RegionHeartbeatRequest{ Header: testutil.NewRequestHeader(clusterID), Region: &metapb.Region{ - Id: regionID, + // mock error msg to trigger stream.Recv() + Id: 0, Peers: []*metapb.Peer{peer}, }, Leader: peer, } err = stream.Send(regionReq) re.NoError(err) - // make sure the first store can receive one response + // make sure the first store can receive one response(error msg) if i == 0 { wg.Add(1) } diff --git a/tools/Makefile b/tools/Makefile index 052f8573b629..ca09383a48f9 100644 --- a/tools/Makefile +++ b/tools/Makefile @@ -34,7 +34,7 @@ tidy: git diff --quiet go.mod go.sum ci-test-job: failpoint-enable - CGO_ENABLED=1 go test ./... -v -tags deadlock -race -cover || { $(MAKE) failpoint-disable && exit 1; } + CGO_ENABLED=1 go test ./... -v -tags deadlock -race -cover -covermode=atomic -coverprofile=covprofile -coverpkg=../... || { $(MAKE) failpoint-disable && exit 1; } $(MAKE) failpoint-disable failpoint-enable: diff --git a/tools/go.mod b/tools/go.mod index 6ff30b2baf4d..f9a63b96ba39 100644 --- a/tools/go.mod +++ b/tools/go.mod @@ -35,6 +35,7 @@ require ( go.uber.org/goleak v1.2.0 go.uber.org/zap v1.26.0 golang.org/x/text v0.14.0 + golang.org/x/tools v0.14.0 google.golang.org/grpc v1.59.0 ) @@ -173,7 +174,6 @@ require ( golang.org/x/sync v0.4.0 // indirect golang.org/x/sys v0.16.0 // indirect golang.org/x/time v0.3.0 // indirect - golang.org/x/tools v0.14.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20231030173426-d783a09b4405 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b // indirect diff --git a/tools/pd-ut/README.md b/tools/pd-ut/README.md index 77b59bea4f77..e1e6378d8ade 100644 --- a/tools/pd-ut/README.md +++ b/tools/pd-ut/README.md @@ -16,7 +16,6 @@ This section describes how to use the pd-ut tool. make ut ``` - ### run by pd-ut - You should `make failpoint-enable` before running the tests. @@ -63,4 +62,8 @@ pd-ut run --junitfile xxx // test with race flag pd-ut run --race + +// test with coverprofile +pd-ut run --coverprofile xxx +go tool cover --func=xxx ``` diff --git a/tools/pd-ut/coverProfile.go b/tools/pd-ut/coverProfile.go new file mode 100644 index 000000000000..0ed1c3f3c618 --- /dev/null +++ b/tools/pd-ut/coverProfile.go @@ -0,0 +1,176 @@ +// Copyright 2024 TiKV Project Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "bufio" + "fmt" + "os" + "path" + "sort" + + "golang.org/x/tools/cover" +) + +func collectCoverProfileFile() { + // Combine all the cover file of single test function into a whole. + files, err := os.ReadDir(coverFileTempDir) + if err != nil { + fmt.Println("collect cover file error:", err) + os.Exit(-1) + } + + w, err := os.Create(coverProfile) + if err != nil { + fmt.Println("create cover file error:", err) + os.Exit(-1) + } + //nolint: errcheck + defer w.Close() + w.WriteString("mode: atomic\n") + + result := make(map[string]*cover.Profile) + for _, file := range files { + if file.IsDir() { + continue + } + collectOneCoverProfileFile(result, file) + } + + w1 := bufio.NewWriter(w) + for _, prof := range result { + for _, block := range prof.Blocks { + fmt.Fprintf(w1, "%s:%d.%d,%d.%d %d %d\n", + prof.FileName, + block.StartLine, + block.StartCol, + block.EndLine, + block.EndCol, + block.NumStmt, + block.Count, + ) + } + if err := w1.Flush(); err != nil { + fmt.Println("flush data to cover profile file error:", err) + os.Exit(-1) + } + } +} + +func collectOneCoverProfileFile(result map[string]*cover.Profile, file os.DirEntry) { + f, err := os.Open(path.Join(coverFileTempDir, file.Name())) + if err != nil { + fmt.Println("open temp cover file error:", err) + os.Exit(-1) + } + //nolint: errcheck + defer f.Close() + + profs, err := cover.ParseProfilesFromReader(f) + if err != nil { + fmt.Println("parse cover profile file error:", err) + os.Exit(-1) + } + mergeProfile(result, profs) +} + +func mergeProfile(m map[string]*cover.Profile, profs []*cover.Profile) { + for _, prof := range profs { + sort.Sort(blocksByStart(prof.Blocks)) + old, ok := m[prof.FileName] + if !ok { + m[prof.FileName] = prof + continue + } + + // Merge samples from the same location. + // The data has already been sorted. + tmp := old.Blocks[:0] + var i, j int + for i < len(old.Blocks) && j < len(prof.Blocks) { + v1 := old.Blocks[i] + v2 := prof.Blocks[j] + + switch compareProfileBlock(v1, v2) { + case -1: + tmp = appendWithReduce(tmp, v1) + i++ + case 1: + tmp = appendWithReduce(tmp, v2) + j++ + default: + tmp = appendWithReduce(tmp, v1) + tmp = appendWithReduce(tmp, v2) + i++ + j++ + } + } + for ; i < len(old.Blocks); i++ { + tmp = appendWithReduce(tmp, old.Blocks[i]) + } + for ; j < len(prof.Blocks); j++ { + tmp = appendWithReduce(tmp, prof.Blocks[j]) + } + + m[prof.FileName] = old + } +} + +// appendWithReduce works like append(), but it merge the duplicated values. +func appendWithReduce(input []cover.ProfileBlock, b cover.ProfileBlock) []cover.ProfileBlock { + if len(input) >= 1 { + last := &input[len(input)-1] + if b.StartLine == last.StartLine && + b.StartCol == last.StartCol && + b.EndLine == last.EndLine && + b.EndCol == last.EndCol { + if b.NumStmt != last.NumStmt { + panic(fmt.Errorf("inconsistent NumStmt: changed from %d to %d", last.NumStmt, b.NumStmt)) + } + // Merge the data with the last one of the slice. + last.Count |= b.Count + return input + } + } + return append(input, b) +} + +type blocksByStart []cover.ProfileBlock + +func compareProfileBlock(x, y cover.ProfileBlock) int { + if x.StartLine < y.StartLine { + return -1 + } + if x.StartLine > y.StartLine { + return 1 + } + + // Now x.StartLine == y.StartLine + if x.StartCol < y.StartCol { + return -1 + } + if x.StartCol > y.StartCol { + return 1 + } + + return 0 +} + +func (b blocksByStart) Len() int { return len(b) } +func (b blocksByStart) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b blocksByStart) Less(i, j int) bool { + bi, bj := b[i], b[j] + return bi.StartLine < bj.StartLine || bi.StartLine == bj.StartLine && bi.StartCol < bj.StartCol +} diff --git a/tools/pd-ut/ut.go b/tools/pd-ut/ut.go index ab8eca128096..4b7d19c18e64 100644 --- a/tools/pd-ut/ut.go +++ b/tools/pd-ut/ut.go @@ -74,7 +74,11 @@ pd-ut build xxx pd-ut run --junitfile xxx // test with race flag -pd-ut run --race` +pd-ut run --race + +// test with coverprofile +pd-ut run --coverprofile xxx +go tool cover --func=xxx` fmt.Println(msg) return true @@ -84,17 +88,30 @@ const modulePath = "github.com/tikv/pd" var ( // runtime - p int - buildParallel int - workDir string + p int + buildParallel int + workDir string + coverFileTempDir string // arguments - race bool - junitFile string + race bool + junitFile string + coverProfile string ) func main() { race = handleFlag("--race") junitFile = stripFlag("--junitfile") + coverProfile = stripFlag("--coverprofile") + + if coverProfile != "" { + var err error + coverFileTempDir, err = os.MkdirTemp(os.TempDir(), "cov") + if err != nil { + fmt.Println("create temp dir fail", coverFileTempDir) + os.Exit(1) + } + defer os.RemoveAll(coverFileTempDir) + } // Get the correct count of CPU if it's in docker. p = runtime.GOMAXPROCS(0) @@ -326,6 +343,10 @@ func cmdRun(args ...string) bool { } } + if coverProfile != "" { + collectCoverProfileFile() + } + for _, work := range works { if work.Fail { return false @@ -336,7 +357,7 @@ func cmdRun(args ...string) bool { // stripFlag strip the '--flag xxx' from the command line os.Args // Example of the os.Args changes -// Before: ut run pkg TestXXX --junitfile yyy +// Before: ut run pkg TestXXX --coverprofile xxx --junitfile yyy // After: ut run pkg TestXXX // The value of the flag is returned. func stripFlag(flag string) string { @@ -565,12 +586,17 @@ func failureCases(input []JUnitTestCase) int { func (n *numa) testCommand(pkg string, fn string) *exec.Cmd { args := make([]string, 0, 10) exe := "./" + testFileName(pkg) + if coverProfile != "" { + fileName := strings.ReplaceAll(pkg, "/", "_") + "." + fn + tmpFile := path.Join(coverFileTempDir, fileName) + args = append(args, "-test.coverprofile", tmpFile) + } args = append(args, "-test.cpu", "1") if !race { - args = append(args, []string{"-test.timeout", "5m"}...) + args = append(args, []string{"-test.timeout", "2m"}...) } else { // it takes a longer when race is enabled. so it is set more timeout value. - args = append(args, []string{"-test.timeout", "30m"}...) + args = append(args, []string{"-test.timeout", "5m"}...) } // core.test -test.run TestClusteredPrefixColum @@ -580,7 +606,7 @@ func (n *numa) testCommand(pkg string, fn string) *exec.Cmd { } func skipDIR(pkg string) bool { - skipDir := []string{"tests", "bin", "cmd", "tools"} + skipDir := []string{"tests/integrations", "bin", "cmd", "tools"} for _, ignore := range skipDir { if strings.HasPrefix(pkg, ignore) { return true @@ -600,6 +626,9 @@ func buildTestBinaryMulti(pkgs []string) error { p := strconv.Itoa(buildParallel) cmd := exec.Command("go", "test", "-p", p, "--exec", xprogPath, "-vet", "off", "--tags=tso_function_test,deadlock") + if coverProfile != "" { + cmd.Args = append(cmd.Args, "-cover") + } cmd.Args = append(cmd.Args, packages...) cmd.Dir = workDir cmd.Stdout = os.Stdout @@ -613,6 +642,9 @@ func buildTestBinaryMulti(pkgs []string) error { func buildTestBinary(pkg string) error { //nolint:gosec cmd := exec.Command("go", "test", "-c", "-vet", "off", "--tags=tso_function_test,deadlock", "-o", testFileName(pkg), "-v") + if coverProfile != "" { + cmd.Args = append(cmd.Args, "-cover") + } if race { cmd.Args = append(cmd.Args, "-race") }