Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

test dashboard #7103

Closed
wants to merge 6 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
60 changes: 30 additions & 30 deletions .github/workflows/pd-tests.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -23,9 +23,9 @@ jobs:
strategy:
fail-fast: true
matrix:
worker_id: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
worker_id: [1]
outputs:
job-total: 13
job-total: 1
steps:
- uses: actions/setup-go@v3
with:
Expand All @@ -48,31 +48,31 @@ jobs:
JOB_COUNT: 10 # 11, 12 13 are for other integrations jobs
run: |
make ci-test-job JOB_COUNT=$(($JOB_COUNT)) JOB_INDEX=$WORKER_ID
mv covprofile covprofile_$WORKER_ID
sed -i "/failpoint_binding/d" covprofile_$WORKER_ID
- name: Upload coverage result ${{ matrix.worker_id }}
uses: actions/upload-artifact@v2
with:
name: cover-reports
path: covprofile_${{ matrix.worker_id }}
report-coverage:
needs: chunks
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Download chunk report
uses: actions/download-artifact@v2
with:
name: cover-reports
- name: Merge
env:
TOTAL_JOBS: ${{needs.chunks.outputs.job-total}}
run: for i in $(seq 1 $TOTAL_JOBS); do cat covprofile_$i >> covprofile; done
- name: Send coverage
uses: codecov/codecov-action@v1
with:
token: ${{ secrets.CODECOV }}
file: ./covprofile
flags: unittests
name: codecov-umbrella
# mv covprofile covprofile_$WORKER_ID
# sed -i "/failpoint_binding/d" covprofile_$WORKER_ID
# - name: Upload coverage result ${{ matrix.worker_id }}
# uses: actions/upload-artifact@v2
# with:
# name: cover-reports
# path: covprofile_${{ matrix.worker_id }}
# report-coverage:
# needs: chunks
# runs-on: ubuntu-latest
# steps:
# - name: Checkout code
# uses: actions/checkout@v3
# - name: Download chunk report
# uses: actions/download-artifact@v2
# with:
# name: cover-reports
# - name: Merge
# env:
# TOTAL_JOBS: ${{needs.chunks.outputs.job-total}}
# run: for i in $(seq 1 $TOTAL_JOBS); do cat covprofile_$i >> covprofile; done
# - name: Send coverage
# uses: codecov/codecov-action@v1
# with:
# token: ${{ secrets.CODECOV }}
# file: ./covprofile
# flags: unittests
# name: codecov-umbrella
10 changes: 3 additions & 7 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -225,12 +225,8 @@ basic-test: install-tools
@$(FAILPOINT_DISABLE)

ci-test-job: install-tools dashboard-ui
@$(FAILPOINT_ENABLE)
if [[ $(JOB_INDEX) -le 10 ]]; then \
CGO_ENABLED=1 go test -timeout=15m -tags deadlock -race -covermode=atomic -coverprofile=covprofile -coverpkg=./... $(shell ./scripts/ci-subtask.sh $(JOB_COUNT) $(JOB_INDEX)); \
else \
for mod in $(shell ./scripts/ci-subtask.sh $(JOB_COUNT) $(JOB_INDEX)); do cd $$mod && $(MAKE) ci-test-job && cd $(ROOT_PATH) > /dev/null && cat $$mod/covprofile >> covprofile; done; \
fi
CGO_ENABLED=1 go test -timeout=15m -tags deadlock -race github.com/tikv/pd/tests/dashboard -test.count 10
@$(FAILPOINT_DISABLE)

TSO_INTEGRATION_TEST_PKGS := $(PD_PKG)/tests/server/tso

Expand Down Expand Up @@ -286,4 +282,4 @@ clean-build:
rm -rf $(BUILD_BIN_PATH)
rm -rf $(GO_TOOLS_BIN_PATH)

.PHONY: clean clean-test clean-build
.PHONY: clean clean-test clean-build
27 changes: 19 additions & 8 deletions client/resource_group/controller/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -219,7 +219,8 @@ func (c *ResourceGroupsController) Start(ctx context.Context) {
}
var watchChannel chan []*meta_storagepb.Event
if !c.ruConfig.isSingleGroupByKeyspace {
watchChannel, err = c.provider.Watch(ctx, pd.GroupSettingsPathPrefixBytes, pd.WithRev(revision), pd.WithPrefix())
// Use WithPrevKV() to get the previous key-value pair when get Delete Event.
watchChannel, err = c.provider.Watch(ctx, pd.GroupSettingsPathPrefixBytes, pd.WithRev(revision), pd.WithPrefix(), pd.WithPrevKV())
}
watchRetryTimer := time.NewTimer(watchRetryInterval)
if err == nil || c.ruConfig.isSingleGroupByKeyspace {
Expand Down Expand Up @@ -277,24 +278,34 @@ func (c *ResourceGroupsController) Start(ctx context.Context) {
}
for _, item := range resp {
revision = item.Kv.ModRevision
group := &rmpb.ResourceGroup{}
if err := proto.Unmarshal(item.Kv.Value, group); err != nil {
continue
}
switch item.Type {
case meta_storagepb.Event_PUT:
group := &rmpb.ResourceGroup{}
if err := proto.Unmarshal(item.Kv.Value, group); err != nil {
continue
}
if item, ok := c.groupsController.Load(group.Name); ok {
gc := item.(*groupCostController)
gc.modifyMeta(group)
}
case meta_storagepb.Event_DELETE:
if _, ok := c.groupsController.LoadAndDelete(group.Name); ok {
resourceGroupStatusGauge.DeleteLabelValues(group.Name)
if item.PrevKv != nil {
group := &rmpb.ResourceGroup{}
if err := proto.Unmarshal(item.PrevKv.Value, group); err != nil {
continue
}
if _, ok := c.groupsController.LoadAndDelete(group.Name); ok {
resourceGroupStatusGauge.DeleteLabelValues(group.Name)
}
} else {
// Prev-kv is compacted means there must have been a delete event before this event,
// which means that this is just a duplicated event, so we can just ignore it.
log.Info("previous key-value pair has been compacted", zap.String("required-key", string(item.Kv.Key)), zap.String("value", string(item.Kv.Value)))
}
}
}
case <-watchRetryTimer.C:
watchChannel, err = c.provider.Watch(ctx, pd.GroupSettingsPathPrefixBytes, pd.WithRev(revision), pd.WithPrefix())
watchChannel, err = c.provider.Watch(ctx, pd.GroupSettingsPathPrefixBytes, pd.WithRev(revision), pd.WithPrefix(), pd.WithPrevKV())
if err != nil {
watchRetryTimer.Reset(watchRetryInterval)
failpoint.Inject("watchStreamError", func() {
Expand Down
47 changes: 0 additions & 47 deletions client/resource_manager_client.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,6 @@ type ResourceManagerClient interface {
ModifyResourceGroup(ctx context.Context, metaGroup *rmpb.ResourceGroup) (string, error)
DeleteResourceGroup(ctx context.Context, resourceGroupName string) (string, error)
LoadResourceGroups(ctx context.Context) ([]*rmpb.ResourceGroup, int64, error)
WatchResourceGroup(ctx context.Context, revision int64) (chan []*rmpb.ResourceGroup, error)
AcquireTokenBuckets(ctx context.Context, request *rmpb.TokenBucketsRequest) ([]*rmpb.TokenBucketResponse, error)
Watch(ctx context.Context, key []byte, opts ...OpOption) (chan []*meta_storagepb.Event, error)
}
Expand Down Expand Up @@ -184,52 +183,6 @@ func (c *client) LoadResourceGroups(ctx context.Context) ([]*rmpb.ResourceGroup,
return groups, resp.Header.Revision, nil
}

// WatchResourceGroup [just for TEST] watches resource groups changes.
// It returns a stream of slices of resource groups.
// The first message in stream contains all current resource groups,
// all subsequent messages contains new events[PUT/DELETE] for all resource groups.
func (c *client) WatchResourceGroup(ctx context.Context, revision int64) (chan []*rmpb.ResourceGroup, error) {
configChan, err := c.Watch(ctx, GroupSettingsPathPrefixBytes, WithRev(revision), WithPrefix())
if err != nil {
return nil, err
}
resourceGroupWatcherChan := make(chan []*rmpb.ResourceGroup)
go func() {
defer func() {
close(resourceGroupWatcherChan)
if r := recover(); r != nil {
log.Error("[pd] panic in ResourceManagerClient `WatchResourceGroups`", zap.Any("error", r))
return
}
}()
for {
select {
case <-ctx.Done():
return
case res, ok := <-configChan:
if !ok {
return
}
groups := make([]*rmpb.ResourceGroup, 0, len(res))
for _, item := range res {
switch item.Type {
case meta_storagepb.Event_PUT:
group := &rmpb.ResourceGroup{}
if err := proto.Unmarshal(item.Kv.Value, group); err != nil {
return
}
groups = append(groups, group)
case meta_storagepb.Event_DELETE:
continue
}
}
resourceGroupWatcherChan <- groups
}
}
}()
return resourceGroupWatcherChan, err
}

func (c *client) AcquireTokenBuckets(ctx context.Context, request *rmpb.TokenBucketsRequest) ([]*rmpb.TokenBucketResponse, error) {
req := &tokenRequest{
done: make(chan error, 1),
Expand Down
2 changes: 1 addition & 1 deletion server/grpc_service.go
Original file line number Diff line number Diff line change
Expand Up @@ -2545,7 +2545,7 @@ func (s *GrpcServer) WatchGlobalConfig(req *pdpb.WatchGlobalConfigRequest, serve
} else {
// Prev-kv is compacted means there must have been a delete event before this event,
// which means that this is just a duplicated event, so we can just ignore it.
log.Info("previous key-value pair has been compacted", zap.String("previous key", string(e.Kv.Key)))
log.Info("previous key-value pair has been compacted", zap.String("required-key", string(e.Kv.Key)))
}
}
}
Expand Down
2 changes: 2 additions & 0 deletions tests/dashboard/service_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -69,10 +69,12 @@ func (suite *dashboardTestSuite) TearDownSuite() {
}

func (suite *dashboardTestSuite) TestDashboardRedirect() {
println("TestDashboardRedirect")
suite.testDashboard(false)
}

func (suite *dashboardTestSuite) TestDashboardProxy() {
println("TestDashboardProxy")
suite.testDashboard(true)
}

Expand Down
Loading
Loading