Skip to content

Commit

Permalink
Merge #131399
Browse files Browse the repository at this point in the history
131399: drt-run: Fix warnings to avoid build failure r=itsbilal a=nameisbhaskar

The build is failing on my mac due to the warnings. This PR fixes the same.
```
Use --sandbox_debug to see verbose messages from the sandbox and retain the sandbox build root for debugging
compilepkg: nogo: errors found by nogo during build-time code analysis:
pkg/cmd/drt-run/event.go:125:39: if statement between Lock and Unlock may be unsafe, move Unlock to a defer statement after Lock (deferunlockcheck)
pkg/cmd/drt-run/event.go:128:8: Unlock is >5 lines away from matching Lock, move it to a defer statement after Lock (deferunlockcheck)
pkg/cmd/drt-run/event.go:131:16: unchecked error (errcheck)
pkg/cmd/drt-run/event.go:143:52: if statement between Lock and Unlock may be unsafe, move Unlock to a defer statement after Lock (deferunlockcheck)
pkg/cmd/drt-run/event.go:146:7: Unlock is >5 lines away from matching Lock, move it to a defer statement after Lock (deferunlockcheck)
pkg/cmd/drt-run/event.go:149:16: unchecked error (errcheck)
pkg/cmd/drt-run/http.go:103:16: unchecked error (errcheck)
pkg/cmd/drt-run/main.go:94:20: unchecked error (errcheck)
pkg/cmd/drt-run/operations.go:112:29: if statement between Lock and Unlock may be unsafe, move Unlock to a defer statement after Lock (deferunlockcheck)
pkg/cmd/drt-run/operations.go:119:25: function call between Lock and Unlock may be unsafe, move Unlock to a defer statement after Lock (deferunlockcheck)
pkg/cmd/drt-run/operations.go:121:53: if statement between Lock and Unlock may be unsafe, move Unlock to a defer statement after Lock (deferunlockcheck)
pkg/cmd/drt-run/operations.go:128:31: function call between Lock and Unlock may be unsafe, move Unlock to a defer statement after Lock (deferunlockcheck)
pkg/cmd/drt-run/operations.go:140:4: for loop between Lock and Unlock may be unsafe, move Unlock to a defer statement after Lock (deferunlockcheck)
pkg/cmd/drt-run/operations.go:158:8: Unlock is >5 lines away from matching Lock, move it to a defer statement after Lock (deferunlockcheck)
pkg/cmd/drt-run/operations.go:218:11: unchecked error (errcheck)
pkg/cmd/drt-run/operations.go:259:74: if statement between Lock and Unlock may be unsafe, move Unlock to a defer statement after Lock (deferunlockcheck)
pkg/cmd/drt-run/operations.go:262:2: function call between Lock and Unlock may be unsafe, move Unlock to a defer statement after Lock (deferunlockcheck)
pkg/cmd/drt-run/operations.go:263:7: Unlock is >5 lines away from matching Lock, move it to a defer statement after Lock (deferunlockcheck)
pkg/cmd/drt-run/workloads.go:106:11: unchecked error (errcheck)
Target //pkg/cmd/drt-run:drt-run failed to build
Use --verbose_failures to see the command lines of failed build steps.
INFO: Elapsed time: 85.126s, Critical Path: 61.50s
INFO: 1080 processes: 5 internal, 1075 darwin-sandbox.
ERROR: Build did NOT complete successfully
INFO: Build Event Protocol files produced successfully.
ERROR: exit status 1
```

Epic: none

Release note: None

Co-authored-by: Bhaskarjyoti Bora <[email protected]>
  • Loading branch information
craig[bot] and nameisbhaskar committed Sep 27, 2024
2 parents a5d6748 + c9ef438 commit cafd11f
Show file tree
Hide file tree
Showing 7 changed files with 96 additions and 89 deletions.
44 changes: 22 additions & 22 deletions pkg/cmd/drt-run/config/drt-chaos.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,36 +4,36 @@ workloads:
steps:
- command: run
args:
- --warehouses 12000
- --active-warehouses 1700
- --db cct_tpcc
- --warehouses=12000
- --active-warehouses=1700
- --db=cct_tpcc
- --secure
- --ramp 10m
- --display-every 5s
- --duration 12h
- --prometheus-port 2112
- --user cct_tpcc_user
- --ramp=10m
- --display-every=5s
- --duration=12h
- --prometheus-port=2112
- --user=cct_tpcc_user
- --tolerate-errors
- --password tpcc
- --password=tpcc
- name: kv-main
kind: kv
steps:
- command: run
args:
- --concurrency 8
- --histograms kv/stats.json
- --db kv
- --splits 500
- --read-percent 50
- --cycle-length 100000
- --min-block-bytes 100
- --max-block-bytes 1000
- --max-rate 120
- --concurrency=8
- --histograms=kv/stats.json
- --db=kv
- --splits=500
- --read-percent=50
- --cycle-length=100000
- --min-block-bytes=100
- --max-block-bytes=1000
- --max-rate=120
- --secure
- --prometheus-port 2114
- --ramp 10m
- --display-every 5s
- --duration 12h
- --prometheus-port=2114
- --ramp=10m
- --display-every=5s
- --duration=12h
- --tolerate-errors
- --enum
operations:
Expand Down
40 changes: 22 additions & 18 deletions pkg/cmd/drt-run/event.go
Original file line number Diff line number Diff line change
Expand Up @@ -119,16 +119,18 @@ func (l *eventLogger) logWorkloadEvent(ev Event, workloadIdx int) {
ev.Source = SourceWorkload

we := &l.workloadEvents[workloadIdx]
we.mu.Lock()
idx := we.mu.startIdx
we.mu.startIdx = (we.mu.startIdx + 1) % perWorkloadEventRetention
if we.mu.numEvents != len(we.events) {
we.mu.numEvents++
}
we.mu.Unlock()

var idx int
func() {
we.mu.Lock()
defer we.mu.Unlock()
idx = we.mu.startIdx
we.mu.startIdx = (we.mu.startIdx + 1) % perWorkloadEventRetention
if we.mu.numEvents != len(we.events) {
we.mu.numEvents++
}
}()
we.events[idx] = ev
io.WriteString(l.outputFile, ev.String()+"\n")
_, _ = io.WriteString(l.outputFile, ev.String()+"\n")
}

// logOperationEvent logs an event in the operation log. Up to operationEventRetention
Expand All @@ -137,16 +139,18 @@ func (l *eventLogger) logOperationEvent(ev Event) {
ev.Timestamp = timeutil.Now()
ev.Source = SourceOperation

l.mu.Lock()
idx := l.mu.operationStartIdx
l.mu.operationStartIdx = (l.mu.operationStartIdx + 1) % operationEventRetention
if l.mu.operationEvents != len(l.operationEvents) {
l.mu.operationEvents++
}
l.mu.Unlock()

var idx int
func() {
l.mu.Lock()
defer l.mu.Unlock()
idx = l.mu.operationStartIdx
l.mu.operationStartIdx = (l.mu.operationStartIdx + 1) % operationEventRetention
if l.mu.operationEvents != len(l.operationEvents) {
l.mu.operationEvents++
}
}()
l.operationEvents[idx] = ev
io.WriteString(l.outputFile, ev.String()+"\n")
_, _ = io.WriteString(l.outputFile, ev.String()+"\n")
}

// getWorkloadEvents returns workload events for a given workload worker. Up to
Expand Down
2 changes: 1 addition & 1 deletion pkg/cmd/drt-run/http.go
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ func (h *httpHandler) serve(rw http.ResponseWriter, req *http.Request) {
fmt.Fprintf(rw, "<h3>Configuration</h3><hr>\n")
fmt.Fprintf(rw, "<pre>")
encoder := yaml.NewEncoder(rw)
encoder.Encode(h.w.config)
_ = encoder.Encode(h.w.config)
fmt.Fprintf(rw, "</pre>")

fmt.Fprintf(rw, "</body>\n</html>")
Expand Down
2 changes: 1 addition & 1 deletion pkg/cmd/drt-run/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ func runDRT(configFile string) (retErr error) {
o: or,
eventL: eventL,
}
hh.startHTTPServer(8080, "localhost")
_ = hh.startHTTPServer(8080, "localhost")
or.Run(ctx)

return nil
Expand Down
89 changes: 46 additions & 43 deletions pkg/cmd/drt-run/operations.go
Original file line number Diff line number Diff line change
Expand Up @@ -108,54 +108,57 @@ func (r *opsRunner) pickOperation(
setIdx := rng.Intn(len(r.specs))
opSpecsSet := r.specs[setIdx]
opSpec := &opSpecsSet[rng.Intn(len(opSpecsSet))]
r.mu.Lock()
if r.mu.lockOutOperations {
r.mu.completed.Wait()
r.mu.Unlock()
continue
}
shouldContinue := func() bool {
r.mu.Lock()
defer r.mu.Unlock()
if r.mu.lockOutOperations {
r.mu.completed.Wait()
return true
}

lastRun := r.mu.lastRun[opSpec.Name]
eligibleForNextRun := lastRun.Add(r.config.Operations.Sets[setIdx].Cadence)
lastRun := r.mu.lastRun[opSpec.Name]
eligibleForNextRun := lastRun.Add(r.config.Operations.Sets[setIdx].Cadence)

if timeutil.Now().Compare(eligibleForNextRun) < 0 {
// Find another operation to run.
r.mu.completed.Wait()
r.mu.Unlock()
continue
}
// Ratchet lastRun forward.
r.mu.lastRun[opSpec.Name] = timeutil.Now()
r.mu.runningOperations[workerIdx] = opSpec.Name

// See what level of isolation this operation requires
// from other operations.
switch opSpec.CanRunConcurrently {
case registry.OperationCanRunConcurrently:
// Nothing to do.
case registry.OperationCannotRunConcurrently:
r.mu.lockOutOperations = true
fallthrough
case registry.OperationCannotRunConcurrentlyWithItself:
for otherOpsRunning := true; otherOpsRunning; {
otherOpsRunning = false
for i := range r.mu.runningOperations {
if i == workerIdx {
continue
if timeutil.Now().Compare(eligibleForNextRun) < 0 {
// Find another operation to run.
r.mu.completed.Wait()
return true
}
// Ratchet lastRun forward.
r.mu.lastRun[opSpec.Name] = timeutil.Now()
r.mu.runningOperations[workerIdx] = opSpec.Name

// See what level of isolation this operation requires
// from other operations.
switch opSpec.CanRunConcurrently {
case registry.OperationCanRunConcurrently:
// Nothing to do.
case registry.OperationCannotRunConcurrently:
r.mu.lockOutOperations = true
fallthrough
case registry.OperationCannotRunConcurrentlyWithItself:
for otherOpsRunning := true; otherOpsRunning; {
otherOpsRunning = false
for i := range r.mu.runningOperations {
if i == workerIdx {
continue
}
if r.mu.runningOperations[i] != "" &&
(opSpec.CanRunConcurrently != registry.OperationCannotRunConcurrentlyWithItself || r.mu.runningOperations[i] == opSpec.Name) {
otherOpsRunning = true
break
}
}
if r.mu.runningOperations[i] != "" &&
(opSpec.CanRunConcurrently != registry.OperationCannotRunConcurrentlyWithItself || r.mu.runningOperations[i] == opSpec.Name) {
otherOpsRunning = true
break
if otherOpsRunning {
r.mu.completed.Wait()
}
}
if otherOpsRunning {
r.mu.completed.Wait()
}
}
return false
}()
if shouldContinue {
continue
}

r.mu.Unlock()
return opSpec
}
}
Expand Down Expand Up @@ -215,7 +218,7 @@ func (r *opsRunner) runOperation(
})
return
}
cmd.Start()
_ = cmd.Start()
wg.Add(2)
// Spin up goroutines to read stdout and stderr, and pipe them
// into the event logger.
Expand Down Expand Up @@ -255,12 +258,12 @@ func (r *opsRunner) runOperation(
})

r.mu.Lock()
defer r.mu.Unlock()
r.mu.runningOperations[workerIdx] = ""
if opSpec.CanRunConcurrently == registry.OperationCannotRunConcurrently {
r.mu.lockOutOperations = false
}
r.mu.completed.Broadcast()
r.mu.Unlock()
}

// runWorker manages the infinite loop for one operation runner worker.
Expand Down
2 changes: 1 addition & 1 deletion pkg/cmd/drt-run/workloads.go
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ func (w *workloadRunner) runWorkloadStep(
}
}()

cmd.Start()
_ = cmd.Start()
if err := cmd.Wait(); err != nil {
w.errChan <- err
return
Expand Down
6 changes: 3 additions & 3 deletions pkg/cmd/roachtest/operations/node_kill.go
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ func registerNodeKill(r registry.Registry) {
Owner: registry.OwnerServer,
Timeout: 15 * time.Minute,
CompatibleClouds: registry.AllClouds,
CanRunConcurrently: registry.OperationCannotRunConcurrentlyWithItself,
CanRunConcurrently: registry.OperationCannotRunConcurrently,
Dependencies: []registry.OperationDependency{registry.OperationRequiresZeroUnderreplicatedRanges},
Run: nodeKillRunner(9 /* signal */, true /* drain */),
})
Expand All @@ -112,7 +112,7 @@ func registerNodeKill(r registry.Registry) {
Owner: registry.OwnerServer,
Timeout: 10 * time.Minute,
CompatibleClouds: registry.AllClouds,
CanRunConcurrently: registry.OperationCannotRunConcurrentlyWithItself,
CanRunConcurrently: registry.OperationCannotRunConcurrently,
Dependencies: []registry.OperationDependency{registry.OperationRequiresZeroUnderreplicatedRanges},
Run: nodeKillRunner(9 /* signal */, false /* drain */),
})
Expand All @@ -121,7 +121,7 @@ func registerNodeKill(r registry.Registry) {
Owner: registry.OwnerServer,
Timeout: 15 * time.Minute,
CompatibleClouds: registry.AllClouds,
CanRunConcurrently: registry.OperationCannotRunConcurrentlyWithItself,
CanRunConcurrently: registry.OperationCannotRunConcurrently,
Dependencies: []registry.OperationDependency{registry.OperationRequiresZeroUnderreplicatedRanges},
Run: nodeKillRunner(15 /* signal */, true /* drain */),
})
Expand Down

0 comments on commit cafd11f

Please sign in to comment.