Skip to content
This repository has been archived by the owner on Apr 16, 2023. It is now read-only.

Commit

Permalink
Merge pull request #76 from GoogleCloudPlatform/release
Browse files Browse the repository at this point in the history
Project import generated by Copybara.
  • Loading branch information
bendory authored Aug 3, 2018
2 parents 3bec117 + 3ece3f6 commit 03fbb2d
Show file tree
Hide file tree
Showing 3 changed files with 104 additions and 65 deletions.
59 changes: 39 additions & 20 deletions build/build.go
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,10 @@ var (
RunRm = false
// timeNow is a function that returns the current time; stubbable for testing.
timeNow = time.Now
// baseDelay is passed to common.Backoff(); it is only modified in build_test.
baseDelay = 500 * time.Millisecond
// maxDelay is passed to common.Backoff(); it is only modified in build_test.
maxDelay = 10 * time.Second
)

type imageDigest struct {
Expand Down Expand Up @@ -522,7 +526,7 @@ func (b *Build) dockerPullWithRetries(ctx context.Context, tag string, outWriter
digest, err := b.dockerPull(ctx, tag, outWriter, errWriter)
if err != nil {
if attempt < maxPushRetries {
time.Sleep(common.Backoff(500*time.Millisecond, 10*time.Second, attempt))
time.Sleep(common.Backoff(baseDelay, maxDelay, attempt))
return b.dockerPullWithRetries(ctx, tag, outWriter, errWriter, attempt+1)
}
b.Log.WriteMainEntry("ERROR: failed to pull because we ran out of retries.")
Expand Down Expand Up @@ -625,7 +629,7 @@ func (b *Build) dockerPushWithRetries(ctx context.Context, tag string, attempt i
b.Log.WriteMainEntry("ERROR: " + msg)
}
if attempt < maxPushRetries {
time.Sleep(common.Backoff(500*time.Millisecond, 10*time.Second, attempt))
time.Sleep(common.Backoff(baseDelay, maxDelay, attempt))
return b.dockerPushWithRetries(ctx, tag, attempt+1)
}
b.Log.WriteMainEntry("ERROR: failed to push because we ran out of retries.")
Expand Down Expand Up @@ -864,20 +868,42 @@ func (b *Build) timeAndRunStep(ctx context.Context, idx int, waitChans []chan st

b.mu.Lock()
b.stepStatus[idx] = pb.Build_WORKING
when := timeNow()
b.Timing.BuildSteps[idx] = &TimeSpan{Start: when}
var timeout time.Duration
if stepTimeout := b.Request.Steps[idx].GetTimeout(); stepTimeout != nil {
var err error
timeout, err = ptypes.Duration(stepTimeout)
// We have previously validated this stepTimeout duration, so this err should never happen.
if err != nil {
err = fmt.Errorf("step %d has invalid timeout %v: %v", idx, stepTimeout, err)
log.Printf("Error: %v", err)
errors <- err
return
}
}
start := timeNow()
b.Timing.BuildSteps[idx] = &TimeSpan{Start: start}
b.mu.Unlock()

err := b.runStep(ctx, idx)
err := b.runStep(ctx, timeout, idx)
end := timeNow()

when = timeNow()
b.mu.Lock()
b.Timing.BuildSteps[idx].End = when
b.Timing.BuildSteps[idx].End = end
switch err {
case nil:
b.stepStatus[idx] = pb.Build_SUCCESS
case context.DeadlineExceeded:
b.stepStatus[idx] = pb.Build_TIMEOUT
// If the build step has no timeout, we got a DeadlineExceeded because the
// overall build timed out. The step's final status is its current WORKING
// status.
if timeout != 0 {
// If the build step has a timeout, then either the step timed out or the
// build timed out (or both). If it was a build timeout, don't update the
// per-step status.
if stepTime := end.Sub(start); stepTime >= timeout {
b.stepStatus[idx] = pb.Build_TIMEOUT
}
}
case context.Canceled:
b.stepStatus[idx] = pb.Build_CANCELLED
default:
Expand Down Expand Up @@ -927,7 +953,7 @@ func getTempDir(subpath string) string {
return fullpath
}

func (b *Build) runStep(ctx context.Context, idx int) error {
func (b *Build) runStep(ctx context.Context, timeout time.Duration, idx int) error {
step := b.Request.Steps[idx]

var stepIdentifier string
Expand Down Expand Up @@ -1000,17 +1026,10 @@ func (b *Build) runStep(ctx context.Context, idx int) error {
args = append(args, runTarget)
args = append(args, step.Args...)

if stepTimeout := step.GetTimeout(); stepTimeout != nil {
timeout, err := ptypes.Duration(stepTimeout)
// We have previously validated this stepTimeout duration, so this err should never happen.
if err != nil {
errWriter.Write([]byte(fmt.Sprintf("ERROR decoding timeout %v: %v", stepTimeout, err)))
log.Printf("ERROR: step %d has invalid timeout %v: %v", idx, stepTimeout, err)
} else if timeout != 0 {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, timeout)
defer cancel()
}
if timeout != 0 {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, timeout)
defer cancel()
}

buildErr := b.Runner.Run(ctx, args, nil, outWriter, errWriter, "")
Expand Down
106 changes: 64 additions & 42 deletions build/build_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ import (
"time"

durpb "github.com/golang/protobuf/ptypes/duration"
"github.com/golang/protobuf/ptypes"
"github.com/GoogleCloudPlatform/cloud-build-local/gsutil"
"github.com/GoogleCloudPlatform/cloud-build-local/runner"
"github.com/spf13/afero"
Expand All @@ -42,9 +43,13 @@ import (
pb "google.golang.org/genproto/googleapis/devtools/cloudbuild/v1"
)

const (
uuidRegex = "([a-fA-F0-9]{8}-[a-fA-F0-9]{4}-4[a-fA-F0-9]{3}-[8|9|aA|bB][a-fA-F0-9]{3}-[a-fA-F0-9]{12})"
)
const uuidRegex = "([a-fA-F0-9]{8}-[a-fA-F0-9]{4}-4[a-fA-F0-9]{3}-[8|9|aA|bB][a-fA-F0-9]{3}-[a-fA-F0-9]{12})"

func init() {
// Reduce backoff delays so that unit tests finish quickly.
baseDelay = time.Nanosecond
maxDelay = time.Millisecond
}

type mockRunner struct {
mu sync.Mutex
Expand Down Expand Up @@ -672,6 +677,14 @@ func TestRunBuildSteps(t *testing.T) {
ctx := context.Background()
exit1Err := errors.New("exit status 1")

stepTimeoutBuildRequest := commonBuildRequest
stepTimeoutBuildRequest.Steps = []*pb.BuildStep{{
Name: commonBuildRequest.Steps[0].Name,
Timeout: ptypes.DurationProto(-1 * time.Nanosecond),
}, {
Name: commonBuildRequest.Steps[0].Name,
}}

testCases := []struct {
name string
buildRequest pb.Build
Expand Down Expand Up @@ -763,10 +776,16 @@ func TestRunBuildSteps(t *testing.T) {
wantErr: errors.New(`build step 0 "gcr.io/my-project/my-compiler" failed: exit status 1`),
wantStepStatus: []pb.Build_Status{pb.Build_FAILURE, pb.Build_QUEUED},
}, {
name: "Step Timeout",
name: "Build Timeout",
buildRequest: commonBuildRequest,
opError: context.DeadlineExceeded,
wantErr: fmt.Errorf(`build step 0 "gcr.io/my-project/my-compiler" failed: %v`, context.DeadlineExceeded),
wantStepStatus: []pb.Build_Status{pb.Build_WORKING, pb.Build_QUEUED},
}, {
name: "Step Timeout",
buildRequest: stepTimeoutBuildRequest,
opError: context.DeadlineExceeded,
wantErr: fmt.Errorf(`build step 0 "gcr.io/my-project/my-compiler" failed: %v`, context.DeadlineExceeded),
wantStepStatus: []pb.Build_Status{pb.Build_TIMEOUT, pb.Build_QUEUED},
}, {
name: "Step Canceled",
Expand All @@ -776,52 +795,55 @@ func TestRunBuildSteps(t *testing.T) {
wantStepStatus: []pb.Build_Status{pb.Build_CANCELLED, pb.Build_QUEUED},
}}
for _, tc := range testCases {
r := newMockRunner(t, tc.name)
r.dockerRunHandler = func([]string, io.Writer, io.Writer) error {
if !tc.opFailsToWrite {
r.localImages["gcr.io/build-output-tag-1"] = true
r.localImages["gcr.io/build-output-tag-no-digest"] = true
t.Run(tc.name, func(t *testing.T) {
r := newMockRunner(t, tc.name)
r.dockerRunHandler = func([]string, io.Writer, io.Writer) error {
if !tc.opFailsToWrite {
r.localImages["gcr.io/build-output-tag-1"] = true
r.localImages["gcr.io/build-output-tag-no-digest"] = true
}
r.localImages["gcr.io/build-output-tag-2"] = true
return tc.opError
}
r.localImages["gcr.io/build-output-tag-2"] = true
return tc.opError
}
b := New(r, tc.buildRequest, mockTokenSource(), nopBuildLogger{}, "", afero.NewMemMapFs(), true, false, false)
gotErr := b.runBuildSteps(ctx)
if !reflect.DeepEqual(gotErr, tc.wantErr) {
t.Errorf("%s: Wanted error %q, but got %q", tc.name, tc.wantErr, gotErr)
}
if tc.wantCommands != nil {
got := strings.Join(r.commands, "\n")
want := strings.Join(tc.wantCommands, "\n")
if match, _ := regexp.MatchString(want, got); !match {
t.Errorf("%s: Commands didn't match!\n===Want:\n%s\n===Got:\n%s", tc.name, want, got)
b := New(r, tc.buildRequest, mockTokenSource(), nopBuildLogger{}, "", afero.NewMemMapFs(), true, false, false)
gotErr := b.runBuildSteps(ctx)
if !reflect.DeepEqual(gotErr, tc.wantErr) {
t.Errorf("Wanted error %q, but got %q", tc.wantErr, gotErr)
}
if tc.wantCommands != nil {
got := strings.Join(r.commands, "\n")
want := strings.Join(tc.wantCommands, "\n")
if match, _ := regexp.MatchString(want, got); !match {
t.Errorf("Commands didn't match!\n===Want:\n%s\n===Got:\n%s", want, got)
}
}
}

b.mu.Lock()
if len(b.stepStatus) != len(tc.wantStepStatus) {
t.Errorf("%s: want len(b.stepStatus)==%d, got %d", tc.name, len(tc.wantStepStatus), len(b.stepStatus))
} else {
for i, stepStatus := range tc.wantStepStatus {
if b.stepStatus[i] != stepStatus {
t.Errorf("%s step %d: want %s, got %s", tc.name, i, stepStatus, b.stepStatus[i])
b.mu.Lock()
if len(b.stepStatus) != len(tc.wantStepStatus) {
t.Errorf("Want len(b.stepStatus)==%d, got %d", len(tc.wantStepStatus), len(b.stepStatus))
} else {
for i, stepStatus := range tc.wantStepStatus {
if b.stepStatus[i] != stepStatus {
t.Errorf("Step %d: want %s, got %s", i, stepStatus, b.stepStatus[i])
t.Logf("Step %d timeout is %#v", i, tc.buildRequest.GetSteps()[i].GetTimeout())
}
}
}
}
b.mu.Unlock()
b.mu.Unlock()

// Confirm proper population of per-step status in BuildSummary.
summary := b.Summary()
got := summary.StepStatus
if len(got) != len(tc.wantStepStatus) {
t.Errorf("%s: build summary wrong size; want %d, got %d", tc.name, len(tc.wantStepStatus), len(got))
} else {
for i, stepStatus := range tc.wantStepStatus {
if got[i] != stepStatus {
t.Errorf("%s summary step %d: want %s, got %s", tc.name, i, stepStatus, got[i])
// Confirm proper population of per-step status in BuildSummary.
summary := b.Summary()
got := summary.StepStatus
if len(got) != len(tc.wantStepStatus) {
t.Errorf("Build summary wrong size; want %d, got %d", len(tc.wantStepStatus), len(got))
} else {
for i, stepStatus := range tc.wantStepStatus {
if got[i] != stepStatus {
t.Errorf("Summary step %d: want %s, got %s", i, stepStatus, got[i])
}
}
}
}
})
}
}

Expand Down
4 changes: 1 addition & 3 deletions cloudbuild_tag.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -23,14 +23,12 @@ steps:
for GOOS in darwin linux; do
for GOARCH in 386 amd64; do
# Build binary with the new tag and with 'latest'
GOOS=$$GOOS GOARCH=$$GOARCH /builder/bin/go.bash build -o container-builder-local_$${GOOS}_$${GOARCH}-$TAG_NAME github.com/GoogleCloudPlatform/cloud-build-local
GOOS=$$GOOS GOARCH=$$GOARCH /builder/bin/go.bash build -o cloud-build-local_$${GOOS}_$${GOARCH}-$TAG_NAME github.com/GoogleCloudPlatform/cloud-build-local
done
done
tar -czvf container-builder-local_latest.tar.gz container-builder-local_*
tar -czvf cloud-build-local_latest.tar.gz cloud-build-local_*
artifacts:
objects:
location: 'gs://local-builder/'
paths: ['container-builder-local_*', 'cloud-build-local_*']
paths: ['cloud-build-local_*']

0 comments on commit 03fbb2d

Please sign in to comment.