From 2417146cc98f89b51cbbcd6e4a507abb5fbeb569 Mon Sep 17 00:00:00 2001 From: Zackary Santana <64446617+ZackarySantana@users.noreply.github.com> Date: Fri, 17 Jan 2025 15:34:02 -0500 Subject: [PATCH 01/11] feat: add context to task.Find --- graphql/query_resolver.go | 2 +- graphql/task_resolver.go | 6 +-- graphql/util.go | 2 +- graphql/version_resolver.go | 2 +- model/generate_test.go | 2 +- model/lifecycle_test.go | 16 +++---- model/patch_lifecycle_test.go | 4 +- model/task/db.go | 8 ++-- model/task/db_test.go | 38 +++++++++++----- model/task/task.go | 22 +++++----- model/task/task_test.go | 24 +++++++--- model/task_history.go | 2 +- model/task_lifecycle.go | 24 +++++----- model/task_lifecycle_test.go | 4 +- operations/cli_integration_test.go | 4 +- repotracker/repotracker_test.go | 44 ++++++++++++++----- rest/data/scheduler.go | 2 +- rest/route/host.go | 2 +- rest/route/patch_test.go | 14 +++--- rest/route/task_test.go | 4 +- scheduler/utilization_based_host_allocator.go | 10 ++--- .../utilization_based_host_allocator_test.go | 5 ++- trigger/patch.go | 22 +++++----- trigger/process_test.go | 6 +-- trigger/task.go | 1 + trigger/task_jira.go | 3 +- units/check_blocked_tasks.go | 6 +-- units/generate_tasks_test.go | 2 +- units/host_termination_test.go | 4 +- units/periodic_builds_test.go | 2 +- 30 files changed, 173 insertions(+), 114 deletions(-) diff --git a/graphql/query_resolver.go b/graphql/query_resolver.go index 94cab4186fb..aaa1933274e 100644 --- a/graphql/query_resolver.go +++ b/graphql/query_resolver.go @@ -670,7 +670,7 @@ func (r *queryResolver) TaskTestSample(ctx context.Context, versionID string, ta if dbTask.Version != versionID && dbTask.ParentPatchID != versionID { return nil, InputValidationError.Send(ctx, fmt.Sprintf("task '%s' does not belong to version '%s'", dbTask.Id, versionID)) } - taskOpts, err := dbTask.CreateTestResultsTaskOptions() + taskOpts, err := dbTask.CreateTestResultsTaskOptions(ctx) if err != nil { return nil, InternalServerError.Send(ctx, fmt.Sprintf("creating test results task options for task '%s': %s", dbTask.Id, err.Error())) } diff --git a/graphql/task_resolver.go b/graphql/task_resolver.go index 0fb09cb0f3d..f05c09086fd 100644 --- a/graphql/task_resolver.go +++ b/graphql/task_resolver.go @@ -227,7 +227,7 @@ func (r *taskResolver) CanSetPriority(ctx context.Context, obj *restModel.APITas return true, nil } if len(obj.ExecutionTasks) != 0 && !evergreen.IsFinishedTaskStatus(utility.FromStringPtr(obj.Status)) { - tasks, err := task.FindByExecutionTasksAndMaxExecution(utility.FromStringPtrSlice(obj.ExecutionTasks), obj.Execution) + tasks, err := task.FindByExecutionTasksAndMaxExecution(ctx, utility.FromStringPtrSlice(obj.ExecutionTasks), obj.Execution) if err != nil { return false, InternalServerError.Send(ctx, fmt.Sprintf("finding execution tasks for task '%s': %s", *obj.Id, err.Error())) } @@ -346,7 +346,7 @@ func (r *taskResolver) ExecutionTasksFull(ctx context.Context, obj *restModel.AP if len(obj.ExecutionTasks) == 0 { return nil, nil } - tasks, err := task.FindByExecutionTasksAndMaxExecution(utility.FromStringPtrSlice(obj.ExecutionTasks), obj.Execution) + tasks, err := task.FindByExecutionTasksAndMaxExecution(ctx, utility.FromStringPtrSlice(obj.ExecutionTasks), obj.Execution) if err != nil { return nil, InternalServerError.Send(ctx, fmt.Sprintf("finding execution tasks for task '%s': %s", utility.FromStringPtr(obj.Id), err.Error())) } @@ -387,7 +387,7 @@ func (r *taskResolver) Files(ctx context.Context, obj *restModel.APITask) (*Task fileCount := 0 if obj.DisplayOnly { - execTasks, err := task.Find(task.ByIds(utility.FromStringPtrSlice(obj.ExecutionTasks))) + execTasks, err := task.Find(ctx, task.ByIds(utility.FromStringPtrSlice(obj.ExecutionTasks))) if err != nil { return &emptyTaskFiles, ResourceNotFound.Send(ctx, err.Error()) } diff --git a/graphql/util.go b/graphql/util.go index b93397b9010..062e369179d 100644 --- a/graphql/util.go +++ b/graphql/util.go @@ -1008,7 +1008,7 @@ func getBaseTaskTestResultsOptions(ctx context.Context, dbTask *task.Task) ([]te } if baseTask != nil && baseTask.ResultsService == dbTask.ResultsService { - taskOpts, err = baseTask.CreateTestResultsTaskOptions() + taskOpts, err = baseTask.CreateTestResultsTaskOptions(ctx) if err != nil { return nil, InternalServerError.Send(ctx, fmt.Sprintf("Error creating test results task options for base task '%s': %s", baseTask.Id, err)) } diff --git a/graphql/version_resolver.go b/graphql/version_resolver.go index 8a81567729c..335fd8777ec 100644 --- a/graphql/version_resolver.go +++ b/graphql/version_resolver.go @@ -174,7 +174,7 @@ func (r *versionResolver) GeneratedTaskCounts(ctx context.Context, obj *restMode } var res []*GeneratedTaskCountResults - versionGeneratorTasks, err := task.Find(bson.M{ + versionGeneratorTasks, err := task.Find(ctx, bson.M{ task.VersionKey: versionID, task.GenerateTaskKey: true, }) diff --git a/model/generate_test.go b/model/generate_test.go index 559103080b5..9ddac80c764 100644 --- a/model/generate_test.go +++ b/model/generate_test.go @@ -1055,7 +1055,7 @@ func (s *GenerateSuite) TestSaveNewBuildsAndTasksWithBatchtime() { s.NoError(err) s.Require().NotZero(dbExistingBV) - tasksInExistingBV, err := task.Find(task.ByBuildId(sampleBuild.Id)) // without display + tasksInExistingBV, err := task.Find(ctx, task.ByBuildId(sampleBuild.Id)) // without display s.NoError(err) s.Len(tasksInExistingBV, 3) for _, tsk := range tasksInExistingBV { diff --git a/model/lifecycle_test.go b/model/lifecycle_test.go index 3b511baf936..e8b0b4db14e 100644 --- a/model/lifecycle_test.go +++ b/model/lifecycle_test.go @@ -205,7 +205,7 @@ func TestBuildSetPriority(t *testing.T) { So(SetBuildPriority(ctx, b.Id, 42, ""), ShouldBeNil) - tasks, err := task.Find(task.ByBuildId(b.Id)) + tasks, err := task.Find(ctx, task.ByBuildId(b.Id)) So(err, ShouldBeNil) So(len(tasks), ShouldEqual, 3) So(tasks[0].Priority, ShouldEqual, 42) @@ -489,7 +489,7 @@ func TestBuildMarkAborted(t *testing.T) { So(AbortBuild(ctx, b.Id, ""), ShouldBeNil) - abortedTasks, err := task.Find(task.ByAborted(true)) + abortedTasks, err := task.Find(ctx, task.ByAborted(true)) So(err, ShouldBeNil) So(len(abortedTasks), ShouldEqual, 2) So(taskIdInSlice(abortedTasks, abortableOne.Id), ShouldBeTrue) @@ -642,7 +642,7 @@ func TestBuildSetActivated(t *testing.T) { So(b.ActivatedBy, ShouldEqual, evergreen.GenerateTasksActivator) // only the matching task should have been updated that has not been set by a user - deactivatedTasks, err := task.Find(task.ByActivation(false)) + deactivatedTasks, err := task.Find(ctx, task.ByActivation(false)) So(err, ShouldBeNil) So(len(deactivatedTasks), ShouldEqual, 3) So(deactivatedTasks[0].Id, ShouldEqual, matching.Id) @@ -654,7 +654,7 @@ func TestBuildSetActivated(t *testing.T) { So(differentUserTask.ActivatedBy, ShouldEqual, user) So(ActivateBuildsAndTasks(ctx, []string{b.Id}, true, ""), ShouldBeNil) - activatedTasks, err := task.Find(task.ByActivation(true)) + activatedTasks, err := task.Find(ctx, task.ByActivation(true)) So(err, ShouldBeNil) So(len(activatedTasks), ShouldEqual, 5) }) @@ -2165,7 +2165,7 @@ func TestVersionRestart(t *testing.T) { taskIds := []string{"task1", "task3", "task4"} buildIds := []string{"build1", "build2"} assert.NoError(RestartVersion(ctx, "version", taskIds, false, "test")) - tasks, err := task.Find(task.ByIds(taskIds)) + tasks, err := task.Find(ctx, task.ByIds(taskIds)) assert.NoError(err) assert.NotEmpty(tasks) builds, err := build.Find(build.ByIds(buildIds)) @@ -3095,14 +3095,14 @@ func TestRecomputeNumDependents(t *testing.T) { assert.NoError(t, t5.Insert()) assert.NoError(t, RecomputeNumDependents(ctx, t3)) - tasks, err := task.Find(task.ByVersion(t1.Version)) + tasks, err := task.Find(ctx, task.ByVersion(t1.Version)) assert.NoError(t, err) for i, dbTask := range tasks { assert.Equal(t, i, dbTask.NumDependents) } assert.NoError(t, RecomputeNumDependents(ctx, t5)) - tasks, err = task.Find(task.ByVersion(t1.Version)) + tasks, err = task.Find(ctx, task.ByVersion(t1.Version)) assert.NoError(t, err) for i, dbTask := range tasks { assert.Equal(t, i, dbTask.NumDependents) @@ -3139,7 +3139,7 @@ func TestRecomputeNumDependents(t *testing.T) { assert.NoError(t, t9.Insert()) assert.NoError(t, RecomputeNumDependents(ctx, t8)) - tasks, err = task.Find(task.ByVersion(t6.Version)) + tasks, err = task.Find(ctx, task.ByVersion(t6.Version)) assert.NoError(t, err) expected := map[string]int{ "6": 0, diff --git a/model/patch_lifecycle_test.go b/model/patch_lifecycle_test.go index 6105a1f7ba8..7d90c6b6f33 100644 --- a/model/patch_lifecycle_test.go +++ b/model/patch_lifecycle_test.go @@ -319,7 +319,7 @@ func TestFinalizePatch(t *testing.T) { require.NoError(t, err) assert.Len(t, builds, 1) assert.Len(t, builds[0].Tasks, 2) - tasks, err := task.Find(bson.M{}) + tasks, err := task.Find(ctx, bson.M{}) require.NoError(t, err) assert.Len(t, tasks, 2) }, @@ -420,7 +420,7 @@ func TestFinalizePatch(t *testing.T) { assert.Len(t, builds, 1) assert.Len(t, builds[0].Tasks, 2) - tasks, err := task.Find(bson.M{}) + tasks, err := task.Find(ctx, bson.M{}) require.NoError(t, err) assert.Len(t, tasks, 2) for _, tsk := range tasks { diff --git a/model/task/db.go b/model/task/db.go index 406d1467249..818dcf899d0 100644 --- a/model/task/db.go +++ b/model/task/db.go @@ -890,7 +890,7 @@ func GetRecentTaskStats(ctx context.Context, period time.Duration, nameKey strin // FindByExecutionTasksAndMaxExecution returns the tasks corresponding to the // passed in taskIds and execution, or the most recent executions of those // tasks if they do not have a matching execution. -func FindByExecutionTasksAndMaxExecution(taskIds []string, execution int, filters ...bson.E) ([]Task, error) { +func FindByExecutionTasksAndMaxExecution(ctx context.Context, taskIds []string, execution int, filters ...bson.E) ([]Task, error) { query := bson.M{ IdKey: bson.M{ "$in": taskIds, @@ -902,7 +902,7 @@ func FindByExecutionTasksAndMaxExecution(taskIds []string, execution int, filter for _, filter := range filters { query[filter.Key] = filter.Value } - tasks, err := Find(query) + tasks, err := Find(ctx, query) if err != nil { return nil, errors.Wrap(err, "finding tasks") } @@ -1612,14 +1612,14 @@ func FindAllFirstExecution(query db.Q) ([]Task, error) { } // Find returns all tasks that satisfy the query it also filters out display tasks from the results. -func Find(filter bson.M) ([]Task, error) { +func Find(ctx context.Context, filter bson.M) ([]Task, error) { tasks := []Task{} _, exists := filter[DisplayOnlyKey] if !exists { filter[DisplayOnlyKey] = bson.M{"$ne": true} } query := db.Query(filter) - err := db.FindAllQ(Collection, query, &tasks) + err := db.FindAllQContext(ctx, Collection, query, &tasks) return tasks, err } diff --git a/model/task/db_test.go b/model/task/db_test.go index 0259e4301d2..b2967a02c94 100644 --- a/model/task/db_test.go +++ b/model/task/db_test.go @@ -35,6 +35,9 @@ func checkStatuses(t *testing.T, expected string, toCheck Task) { } func TestFindTasksByIds(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + Convey("When calling FindTasksByIds...", t, func() { So(db.Clear(Collection), ShouldBeNil) Convey("only tasks with the specified ids should be returned", func() { @@ -55,7 +58,7 @@ func TestFindTasksByIds(t *testing.T) { So(task.Insert(), ShouldBeNil) } - dbTasks, err := Find(ByIds([]string{"one", "two"})) + dbTasks, err := Find(ctx, ByIds([]string{"one", "two"})) So(err, ShouldBeNil) So(len(dbTasks), ShouldEqual, 2) So(dbTasks[0].Id, ShouldNotEqual, "three") @@ -131,6 +134,9 @@ func TestDisplayTasksByVersion(t *testing.T) { } func TestNonExecutionTasksByVersion(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + assert.NoError(t, db.Clear(Collection)) displayTask := Task{ Id: "dt", @@ -160,7 +166,7 @@ func TestNonExecutionTasksByVersion(t *testing.T) { } assert.NoError(t, db.InsertMany(Collection, displayTask, regularTask, wrongVersionTask, execTask, legacyTask)) - tasks, err := Find(NonExecutionTasksByVersions([]string{"v1", "v2"})) + tasks, err := Find(ctx, NonExecutionTasksByVersions([]string{"v1", "v2"})) assert.NoError(t, err) assert.Len(t, tasks, 3) // doesn't include wrong version or execution task with DisplayTaskId cached for _, task := range tasks { @@ -170,6 +176,9 @@ func TestNonExecutionTasksByVersion(t *testing.T) { } func TestFailedTasksByVersion(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + Convey("When calling FailedTasksByVersion...", t, func() { So(db.Clear(Collection), ShouldBeNil) Convey("only tasks with the failed statuses should be returned", func() { @@ -196,7 +205,7 @@ func TestFailedTasksByVersion(t *testing.T) { So(task.Insert(), ShouldBeNil) } - dbTasks, err := Find(FailedTasksByVersion("v1")) + dbTasks, err := Find(ctx, FailedTasksByVersion("v1")) So(err, ShouldBeNil) So(len(dbTasks), ShouldEqual, 2) So(dbTasks[0].Id, ShouldNotEqual, "three") @@ -206,6 +215,9 @@ func TestFailedTasksByVersion(t *testing.T) { } func TestPotentiallyBlockedTasksByIds(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + assert.NoError(t, db.Clear(Collection)) tasks := []Task{ { // Can't be blocked (override dependencies) @@ -274,7 +286,7 @@ func TestPotentiallyBlockedTasksByIds(t *testing.T) { ids = append(ids, task.Id) } - dbTasks, err := Find(PotentiallyBlockedTasksByIds(ids)) + dbTasks, err := Find(ctx, PotentiallyBlockedTasksByIds(ids)) require.NoError(t, err) require.Len(t, dbTasks, 3) assert.Contains(t, []string{"t3", "t6", "t8"}, dbTasks[0].Id) @@ -283,6 +295,9 @@ func TestPotentiallyBlockedTasksByIds(t *testing.T) { } func TestFindTasksByVersionWithChildTasks(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + assert.NoError(t, db.ClearCollections(Collection)) mainVersion := "main_version" mainVersionTaskIds := []string{"t1", "t3"} @@ -310,7 +325,7 @@ func TestFindTasksByVersionWithChildTasks(t *testing.T) { assert.NoError(t, task.Insert()) } - dbTasks, err := Find(ByVersionWithChildTasks(mainVersion)) + dbTasks, err := Find(ctx, ByVersionWithChildTasks(mainVersion)) assert.NoError(t, err) assert.Len(t, dbTasks, 2) for _, dbTask := range dbTasks { @@ -868,6 +883,9 @@ func TestFindNeedsContainerAllocation(t *testing.T) { } func TestFindByStaleRunningTask(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + defer func() { assert.NoError(t, db.ClearCollections(Collection)) }() @@ -880,7 +898,7 @@ func TestFindByStaleRunningTask(t *testing.T) { } require.NoError(t, tsk.Insert()) - found, err := Find(ByStaleRunningTask(30 * time.Minute)) + found, err := Find(ctx, ByStaleRunningTask(30*time.Minute)) require.NoError(t, err) require.Len(t, found, 1) assert.Equal(t, tsk.Id, found[0].Id) @@ -893,7 +911,7 @@ func TestFindByStaleRunningTask(t *testing.T) { } require.NoError(t, tsk.Insert()) - found, err := Find(ByStaleRunningTask(30 * time.Minute)) + found, err := Find(ctx, ByStaleRunningTask(30*time.Minute)) require.NoError(t, err) require.Len(t, found, 1) assert.Equal(t, tsk.Id, found[0].Id) @@ -920,7 +938,7 @@ func TestFindByStaleRunningTask(t *testing.T) { require.NoError(t, tsk.Insert()) } - found, err := Find(ByStaleRunningTask(30 * time.Minute)) + found, err := Find(ctx, ByStaleRunningTask(30*time.Minute)) require.NoError(t, err) require.Len(t, found, 2) for _, tsk := range found { @@ -935,7 +953,7 @@ func TestFindByStaleRunningTask(t *testing.T) { } require.NoError(t, tsk.Insert()) - found, err := Find(ByStaleRunningTask(30 * time.Minute)) + found, err := Find(ctx, ByStaleRunningTask(30*time.Minute)) require.NoError(t, err) assert.Empty(t, found) }, @@ -947,7 +965,7 @@ func TestFindByStaleRunningTask(t *testing.T) { } require.NoError(t, tsk.Insert()) - found, err := Find(ByStaleRunningTask(0)) + found, err := Find(ctx, ByStaleRunningTask(0)) require.NoError(t, err) assert.Empty(t, found) }, diff --git a/model/task/task.go b/model/task/task.go index b8d5b443c4b..e9e5e354fca 100644 --- a/model/task/task.go +++ b/model/task/task.go @@ -1924,11 +1924,11 @@ func (t *Task) SetResultsInfo(service string, failedResults bool) error { } // HasResults returns whether the task has test results or not. -func (t *Task) HasResults() bool { +func (t *Task) HasResults(ctx context.Context) bool { if t.DisplayOnly && len(t.ExecutionTasks) > 0 { hasResults := []bson.M{{ResultsServiceKey: bson.M{"$exists": true}}, {HasCedarResultsKey: true}} if t.Archived { - execTasks, err := FindByExecutionTasksAndMaxExecution(t.ExecutionTasks, t.Execution, bson.E{Key: "$or", Value: hasResults}) + execTasks, err := FindByExecutionTasksAndMaxExecution(ctx, t.ExecutionTasks, t.Execution, bson.E{Key: "$or", Value: hasResults}) if err != nil { grip.Error(message.WrapError(err, message.Fields{ "message": "getting execution tasks for archived display task", @@ -3089,7 +3089,7 @@ func ArchiveMany(ctx context.Context, tasks []Task) error { var err error if t.IsRestartFailedOnly() { - execTasks, err = Find(FailedTasksByIds(t.ExecutionTasks)) + execTasks, err = Find(ctx, FailedTasksByIds(t.ExecutionTasks)) } else { execTasks, err = FindAll(db.Query(ByIdsAndStatus(t.ExecutionTasks, evergreen.TaskCompletedStatuses))) } @@ -3241,7 +3241,7 @@ func (t *Task) PopulateTestResults(ctx context.Context) error { // GetTestResults returns the task's test results filtered, sorted, and // paginated as specified by the optional filter options. func (t *Task) GetTestResults(ctx context.Context, env evergreen.Environment, filterOpts *testresult.FilterOptions) (testresult.TaskTestResults, error) { - taskOpts, err := t.CreateTestResultsTaskOptions() + taskOpts, err := t.CreateTestResultsTaskOptions(ctx) if err != nil { return testresult.TaskTestResults{}, errors.Wrap(err, "creating test results task options") } @@ -3254,7 +3254,7 @@ func (t *Task) GetTestResults(ctx context.Context, env evergreen.Environment, fi // GetTestResultsStats returns basic statistics of the task's test results. func (t *Task) GetTestResultsStats(ctx context.Context, env evergreen.Environment) (testresult.TaskTestResultsStats, error) { - taskOpts, err := t.CreateTestResultsTaskOptions() + taskOpts, err := t.CreateTestResultsTaskOptions(ctx) if err != nil { return testresult.TaskTestResultsStats{}, errors.Wrap(err, "creating test results task options") } @@ -3269,7 +3269,7 @@ func (t *Task) GetTestResultsStats(ctx context.Context, env evergreen.Environmen // the task. If the task does not have any results or does not have any failing // tests, a nil slice is returned. func (t *Task) GetFailedTestSample(ctx context.Context, env evergreen.Environment) ([]string, error) { - taskOpts, err := t.CreateTestResultsTaskOptions() + taskOpts, err := t.CreateTestResultsTaskOptions(ctx) if err != nil { return nil, errors.Wrap(err, "creating test results task options") } @@ -3287,7 +3287,7 @@ func (t *Task) GetFailedTestSample(ctx context.Context, env evergreen.Environmen // additional tasks are required for fetching test results, such as when // sorting results by some base status, using this function to populate those // task options is useful. -func (t *Task) CreateTestResultsTaskOptions() ([]testresult.TaskOptions, error) { +func (t *Task) CreateTestResultsTaskOptions(ctx context.Context) ([]testresult.TaskOptions, error) { var taskOpts []testresult.TaskOptions if t.DisplayOnly && len(t.ExecutionTasks) > 0 { var ( @@ -3296,7 +3296,7 @@ func (t *Task) CreateTestResultsTaskOptions() ([]testresult.TaskOptions, error) ) hasResults := []bson.M{{ResultsServiceKey: bson.M{"$exists": true}}, {HasCedarResultsKey: true}} if t.Archived { - execTasksWithResults, err = FindByExecutionTasksAndMaxExecution(t.ExecutionTasks, t.Execution, bson.E{Key: "$or", Value: hasResults}) + execTasksWithResults, err = FindByExecutionTasksAndMaxExecution(ctx, t.ExecutionTasks, t.Execution, bson.E{Key: "$or", Value: hasResults}) } else { query := ByIds(t.ExecutionTasks) query["$or"] = hasResults @@ -3317,7 +3317,7 @@ func (t *Task) CreateTestResultsTaskOptions() ([]testresult.TaskOptions, error) ResultsService: execTask.ResultsService, }) } - } else if t.HasResults() { + } else if t.HasResults(ctx) { taskID := t.Id if t.Archived { taskID = t.OldTaskId @@ -3424,7 +3424,7 @@ func updateSchedulingLimitForResetWhenFinished(ctx context.Context, t *Task, cal var tasks []Task var err error if t.DisplayOnly { - tasks, err = Find(ByIds(t.ExecutionTasks)) + tasks, err = Find(ctx, ByIds(t.ExecutionTasks)) if err != nil { return errors.Wrapf(err, "finding execution tasks for '%s'", t.Id) } @@ -3491,7 +3491,7 @@ func FindHostSchedulable(ctx context.Context, distroID string) ([]Task, error) { return nil, errors.WithStack(err) } - return Find(query) + return Find(ctx, query) } func addApplicableDistroFilter(ctx context.Context, id string, fieldName string, query bson.M) error { diff --git a/model/task/task_test.go b/model/task/task_test.go index 9279c82e784..8c452a054c8 100644 --- a/model/task/task_test.go +++ b/model/task/task_test.go @@ -1443,6 +1443,9 @@ func TestSiblingDependency(t *testing.T) { } func TestBulkInsert(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + assert := assert.New(t) require.NoError(t, db.ClearCollections(Collection)) t1_a := Task{ @@ -1463,7 +1466,7 @@ func TestBulkInsert(t *testing.T) { } tasks := Tasks{&t1_a, &t1_b, &t2, &t3} assert.Error(tasks.InsertUnordered(context.Background())) - dbTasks, err := Find(ByVersion("version")) + dbTasks, err := Find(ctx, ByVersion("version")) assert.NoError(err) assert.Len(dbTasks, 3) for _, dbTask := range dbTasks { @@ -4286,6 +4289,9 @@ func TestArchiveFailedOnly(t *testing.T) { } func TestByExecutionTasksAndMaxExecution(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + tasksToFetch := []string{"t1", "t2"} t.Run("Fetching latest execution with same executions", func(t *testing.T) { require.NoError(t, db.ClearCollections(Collection, OldCollection)) @@ -4314,7 +4320,7 @@ func TestByExecutionTasksAndMaxExecution(t *testing.T) { ot2 = *ot2.makeArchivedTask() assert.NoError(t, db.Insert(OldCollection, ot2)) - tasks, err := FindByExecutionTasksAndMaxExecution(tasksToFetch, 1) + tasks, err := FindByExecutionTasksAndMaxExecution(ctx, tasksToFetch, 1) tasks = convertOldTasksIntoTasks(tasks) assert.NoError(t, err) assert.Len(t, tasks, 2) @@ -4352,7 +4358,7 @@ func TestByExecutionTasksAndMaxExecution(t *testing.T) { ot2 = *ot2.makeArchivedTask() assert.NoError(t, db.Insert(OldCollection, ot2)) - tasks, err := FindByExecutionTasksAndMaxExecution(tasksToFetch, 2) + tasks, err := FindByExecutionTasksAndMaxExecution(ctx, tasksToFetch, 2) tasks = convertOldTasksIntoTasks(tasks) assert.NoError(t, err) assert.Len(t, tasks, 2) @@ -4399,7 +4405,7 @@ func TestByExecutionTasksAndMaxExecution(t *testing.T) { ot2 = *ot2.makeArchivedTask() assert.NoError(t, db.Insert(OldCollection, ot2)) - tasks, err := FindByExecutionTasksAndMaxExecution(tasksToFetch, 1) + tasks, err := FindByExecutionTasksAndMaxExecution(ctx, tasksToFetch, 1) tasks = convertOldTasksIntoTasks(tasks) assert.NoError(t, err) assert.Len(t, tasks, 2) @@ -4894,6 +4900,9 @@ func TestHasResults(t *testing.T) { }, } { t.Run(test.name, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + for _, execTask := range test.executionTasks { _, err := db.Upsert(Collection, ById(execTask.Id), &execTask) require.NoError(t, err) @@ -4903,7 +4912,7 @@ func TestHasResults(t *testing.T) { require.NoError(t, err) } - assert.Equal(t, test.hasResults, test.tsk.HasResults()) + assert.Equal(t, test.hasResults, test.tsk.HasResults(ctx)) }) } } @@ -5076,6 +5085,9 @@ func TestCreateTestResultsTaskOptions(t *testing.T) { }, } { t.Run(test.name, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + for _, execTask := range test.executionTasks { _, err := db.Upsert(Collection, ById(execTask.Id), &execTask) require.NoError(t, err) @@ -5086,7 +5098,7 @@ func TestCreateTestResultsTaskOptions(t *testing.T) { require.NoError(t, err) } - opts, err := test.tsk.CreateTestResultsTaskOptions() + opts, err := test.tsk.CreateTestResultsTaskOptions(ctx) require.NoError(t, err) assert.ElementsMatch(t, test.expectedOpts, opts) }) diff --git a/model/task_history.go b/model/task_history.go index dc643c6098e..a6eb04d0b42 100644 --- a/model/task_history.go +++ b/model/task_history.go @@ -234,7 +234,7 @@ func (thi *taskHistoryIterator) GetFailedTests(tasks []task.Task) (map[string][] var allTaskOpts []testresult.TaskOptions taskIDsToDisplay := map[string]string{} for _, tsk := range tasks { - taskOpts, err := tsk.CreateTestResultsTaskOptions() + taskOpts, err := tsk.CreateTestResultsTaskOptions(ctx) if err != nil { return nil, errors.Wrap(err, "creating test results task options") } diff --git a/model/task_lifecycle.go b/model/task_lifecycle.go index edce1518568..e85641bcc64 100644 --- a/model/task_lifecycle.go +++ b/model/task_lifecycle.go @@ -42,7 +42,7 @@ func SetActiveState(ctx context.Context, caller string, active bool, tasks ...ta for _, t := range tasks { originalTasks := []task.Task{t} if t.DisplayOnly { - execTasks, err := task.Find(task.ByIds(t.ExecutionTasks)) + execTasks, err := task.Find(ctx, task.ByIds(t.ExecutionTasks)) catcher.Wrap(err, "getting execution tasks") originalTasks = append(originalTasks, execTasks...) } @@ -520,7 +520,7 @@ func getStepback(ctx context.Context, taskId string) (stepbackInstructions, erro // tasks one by one). If there is a previous task and if not it returns nothing. func doLinearStepback(ctx context.Context, t *task.Task) error { if t.DisplayOnly { - execTasks, err := task.Find(task.ByIds(t.ExecutionTasks)) + execTasks, err := task.Find(ctx, task.ByIds(t.ExecutionTasks)) if err != nil { return errors.Wrapf(err, "finding tasks for stepback of '%s'", t.Id) } @@ -553,7 +553,7 @@ func doLinearStepback(ctx context.Context, t *task.Task) error { func doBisectStepback(ctx context.Context, t *task.Task) error { // Do stepback for all execution tasks. if t.DisplayOnly { - execTasks, err := task.Find(task.ByIds(t.ExecutionTasks)) + execTasks, err := task.Find(ctx, task.ByIds(t.ExecutionTasks)) if err != nil { return errors.Wrapf(err, "finding tasks for stepback of '%s'", t.Id) } @@ -789,7 +789,7 @@ func MarkEnd(ctx context.Context, settings *evergreen.Settings, t *task.Task, ca }) return nil } - if detailsCopy.Status == evergreen.TaskSucceeded && t.MustHaveResults && !t.HasResults() { + if detailsCopy.Status == evergreen.TaskSucceeded && t.MustHaveResults && !t.HasResults(ctx) { detailsCopy.Type = evergreen.CommandTypeTest detailsCopy.Status = evergreen.TaskFailed detailsCopy.Description = evergreen.TaskDescriptionNoResults @@ -1359,8 +1359,8 @@ func checkUpdateBuildPRStatusPending(ctx context.Context, b *build.Build) error // updateBuildStatus updates the status of the build based on its tasks' statuses // Returns true if the build's status has changed or if all the build's tasks become blocked / unscheduled. -func updateBuildStatus(b *build.Build) (bool, error) { - buildTasks, err := task.Find(task.ByBuildId(b.Id)) +func updateBuildStatus(ctx context.Context, b *build.Build) (bool, error) { + buildTasks, err := task.Find(ctx, task.ByBuildId(b.Id)) if err != nil { return false, errors.Wrapf(err, "getting tasks in build '%s'", b.Id) } @@ -1613,7 +1613,7 @@ func UpdateBuildAndVersionStatusForTask(ctx context.Context, t *task.Task) error if taskBuild == nil { return errors.Errorf("no build '%s' found for task '%s'", t.BuildId, t.Id) } - buildStatusChanged, err := updateBuildStatus(taskBuild) + buildStatusChanged, err := updateBuildStatus(ctx, taskBuild) if err != nil { return errors.Wrapf(err, "updating build '%s' status", taskBuild.Id) } @@ -1710,7 +1710,7 @@ func UpdateVersionAndPatchStatusForBuilds(ctx context.Context, buildIds []string versionsToUpdate := make(map[string]bool) for _, build := range builds { - buildStatusChanged, err := updateBuildStatus(&build) + buildStatusChanged, err := updateBuildStatus(ctx, &build) if err != nil { return errors.Wrapf(err, "updating build '%s' status", build.Id) } @@ -2219,7 +2219,7 @@ func UpdateDisplayTaskForTask(ctx context.Context, t *task.Task) error { return errors.Errorf("task '%s' is not a display task", originalDisplayTask.Id) } - updatedDisplayTask, err = tryUpdateDisplayTaskAtomically(*originalDisplayTask) + updatedDisplayTask, err = tryUpdateDisplayTaskAtomically(ctx, *originalDisplayTask) if err == nil { // Update the cached display task in case it's used later on. t.DisplayTask = updatedDisplayTask @@ -2244,10 +2244,10 @@ func UpdateDisplayTaskForTask(ctx context.Context, t *task.Task) error { return nil } -func tryUpdateDisplayTaskAtomically(dt task.Task) (updated *task.Task, err error) { +func tryUpdateDisplayTaskAtomically(ctx context.Context, dt task.Task) (updated *task.Task, err error) { originalStatus := dt.Status - execTasks, err := task.Find(task.ByIds(dt.ExecutionTasks)) + execTasks, err := task.Find(ctx, task.ByIds(dt.ExecutionTasks)) if err != nil { return &dt, errors.Wrap(err, "retrieving execution tasks") } @@ -2404,7 +2404,7 @@ func checkResetDisplayTask(ctx context.Context, setting *evergreen.Settings, use if !t.ResetWhenFinished && !t.ResetFailedWhenFinished { return nil } - execTasks, err := task.Find(task.ByIds(t.ExecutionTasks)) + execTasks, err := task.Find(ctx, task.ByIds(t.ExecutionTasks)) if err != nil { return errors.Wrapf(err, "getting execution tasks for display task '%s'", t.Id) } diff --git a/model/task_lifecycle_test.go b/model/task_lifecycle_test.go index baf9b63379e..5045a93f117 100644 --- a/model/task_lifecycle_test.go +++ b/model/task_lifecycle_test.go @@ -7134,7 +7134,7 @@ func TestHandleEndTaskForGithubMergeQueueTask(t *testing.T) { // Neither of these should abort any tasks. assert.NoError(t, HandleEndTaskForGithubMergeQueueTask(ctx, t1, evergreen.TaskSucceeded)) assert.NoError(t, HandleEndTaskForGithubMergeQueueTask(ctx, t2, evergreen.TaskFailed)) - tasks, err := task.Find(task.ByVersion("version1")) + tasks, err := task.Find(ctx, task.ByVersion("version1")) assert.NoError(t, err) for _, task := range tasks { // only t2 should be aborted, since it already was @@ -7147,7 +7147,7 @@ func TestHandleEndTaskForGithubMergeQueueTask(t *testing.T) { // This should abort all tasks. assert.NoError(t, HandleEndTaskForGithubMergeQueueTask(ctx, t3, evergreen.TaskFailed)) - tasks, err = task.Find(task.ByVersion("version1")) + tasks, err = task.Find(ctx, task.ByVersion("version1")) assert.NoError(t, err) for _, task := range tasks { // all but t1, which already succeeded, and t3, the caller, should be aborted diff --git a/operations/cli_integration_test.go b/operations/cli_integration_test.go index 204673c8e34..7a089b0c63c 100644 --- a/operations/cli_integration_test.go +++ b/operations/cli_integration_test.go @@ -435,7 +435,7 @@ func TestCLIFunctions(t *testing.T) { patches, err = ac.GetPatches(0) So(err, ShouldBeNil) // After canceling, tasks in the version should be deactivated - tasks, err := task.Find(task.ByVersion(patches[0].Version)) + tasks, err := task.Find(ctx, task.ByVersion(patches[0].Version)) So(err, ShouldBeNil) for _, t := range tasks { So(t.Activated, ShouldBeFalse) @@ -549,7 +549,7 @@ func TestCLIFunctions(t *testing.T) { So(ac.CancelPatch(newPatch.Id.Hex()), ShouldBeNil) patches, err := ac.GetPatches(0) So(err, ShouldBeNil) - tasks, err := task.Find(task.ByVersion(patches[0].Version)) + tasks, err := task.Find(ctx, task.ByVersion(patches[0].Version)) So(err, ShouldBeNil) for _, t := range tasks { So(t.Activated, ShouldBeFalse) diff --git a/repotracker/repotracker_test.go b/repotracker/repotracker_test.go index 1ef9306b996..358d8274069 100644 --- a/repotracker/repotracker_test.go +++ b/repotracker/repotracker_test.go @@ -502,7 +502,7 @@ tasks: require.NotZero(t, build1) // neither batchtime task nor disabled task should be activated - tasks, err := task.Find(task.ByBuildId(build1.Id)) + tasks, err := task.Find(ctx, task.ByBuildId(build1.Id)) assert.NoError(t, err) require.NotEmpty(t, tasks) assert.Len(t, tasks, 3) @@ -524,7 +524,7 @@ tasks: assert.NoError(t, err) require.NotZero(t, build3) - tasks, err = task.Find(task.ByBuildId(build3.Id)) + tasks, err = task.Find(ctx, task.ByBuildId(build3.Id)) assert.NoError(t, err) require.Len(t, tasks, 1) assert.Equal(t, evergreen.DisabledTaskPriority, tasks[0].Priority) @@ -980,6 +980,9 @@ func (s *CreateVersionFromConfigSuite) TearDownTest() { } func (s *CreateVersionFromConfigSuite) TestCreateBasicVersion() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + configYml := ` buildvariants: - name: bv @@ -1024,12 +1027,15 @@ tasks: s.Equal(v.Id, dbBuild.Version) s.Len(dbBuild.Tasks, 2) - dbTasks, err := task.Find(task.ByVersion(v.Id)) + dbTasks, err := task.Find(ctx, task.ByVersion(v.Id)) s.NoError(err) s.Len(dbTasks, 2) } func (s *CreateVersionFromConfigSuite) TestInvalidConfigErrors() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + configYml := ` buildvariants: - name: bv @@ -1075,12 +1081,15 @@ tasks: s.NoError(err) s.Nil(dbBuild) - dbTasks, err := task.Find(task.ByVersion(v.Id)) + dbTasks, err := task.Find(ctx, task.ByVersion(v.Id)) s.NoError(err) s.Empty(dbTasks) } func (s *CreateVersionFromConfigSuite) TestInvalidAliasErrors() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + configYml := ` buildvariants: - name: bv @@ -1122,7 +1131,7 @@ tasks: s.NoError(err) s.Nil(dbBuild) - dbTasks, err := task.Find(task.ByVersion(v.Id)) + dbTasks, err := task.Find(ctx, task.ByVersion(v.Id)) s.NoError(err) s.Empty(dbTasks) } @@ -1180,6 +1189,9 @@ tasks: } func (s *CreateVersionFromConfigSuite) TestTransactionAbort() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + configYml := ` buildvariants: - name: bv @@ -1210,7 +1222,7 @@ tasks: v, err = CreateVersionFromConfig(s.ctx, projectInfo, model.VersionMetadata{Revision: *s.rev, SourceVersion: s.sourceVersion}, false, nil) s.Error(err) - tasks, err := task.Find(task.ByVersion(v.Id)) + tasks, err := task.Find(ctx, task.ByVersion(v.Id)) s.NoError(err) s.Empty(tasks) } @@ -1365,6 +1377,9 @@ tasks: } func (s *CreateVersionFromConfigSuite) TestVersionWithDependencies() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + configYml := ` buildvariants: - name: bv @@ -1411,12 +1426,15 @@ tasks: s.Equal(v.Id, dbBuild.Version) s.Len(dbBuild.Tasks, 2) - dbTasks, err := task.Find(task.ByVersion(v.Id)) + dbTasks, err := task.Find(ctx, task.ByVersion(v.Id)) s.NoError(err) s.Len(dbTasks, 2) } func (s *CreateVersionFromConfigSuite) TestWithAliasAndPatchOptionalDependencyDoesNotCreateDependentTaskAutomatically() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + configYml := ` buildvariants: - name: bv1 @@ -1458,7 +1476,7 @@ tasks: s.Require().NotNil(v) s.Empty(v.Errors) - tasks, err := task.Find(task.ByVersion(v.Id)) + tasks, err := task.Find(ctx, task.ByVersion(v.Id)) s.NoError(err) s.Require().Len(tasks, 1) s.Equal("task1", tasks[0].DisplayName, "should create task matching alias") @@ -1467,6 +1485,9 @@ tasks: } func (s *CreateVersionFromConfigSuite) TestWithAliasMatchingTaskGroup() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + configYml := ` buildvariants: - name: bv1 @@ -1512,7 +1533,7 @@ task_groups: s.Require().NotNil(v) s.Empty(v.Errors) - tasks, err := task.Find(task.ByVersion(v.Id)) + tasks, err := task.Find(ctx, task.ByVersion(v.Id)) s.NoError(err) s.Require().Len(tasks, 2) var foundTask1, foundTask2 bool @@ -1534,6 +1555,9 @@ task_groups: } func (s *CreateVersionFromConfigSuite) TestWithAliasAndPatchOptionalDependencyCreatesDependencyIfDependentTaskIsCreated() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + configYml := ` buildvariants: - name: bv1 @@ -1577,7 +1601,7 @@ tasks: s.Require().NotNil(v) s.Empty(v.Errors) - tasks, err := task.Find(task.ByVersion(v.Id)) + tasks, err := task.Find(ctx, task.ByVersion(v.Id)) s.NoError(err) s.Len(tasks, 2) diff --git a/rest/data/scheduler.go b/rest/data/scheduler.go index 14a1bca5ed0..af3d6bbbf65 100644 --- a/rest/data/scheduler.go +++ b/rest/data/scheduler.go @@ -14,7 +14,7 @@ func CompareTasks(ctx context.Context, taskIds []string, useLegacy bool) ([]stri if len(taskIds) == 0 { return nil, nil, nil } - tasks, err := task.Find(task.ByIds(taskIds)) + tasks, err := task.Find(ctx, task.ByIds(taskIds)) if err != nil { return nil, nil, errors.Wrap(err, "finding tasks to compare") } diff --git a/rest/route/host.go b/rest/route/host.go index 17b3b442434..08dd8d8b42b 100644 --- a/rest/route/host.go +++ b/rest/route/host.go @@ -260,7 +260,7 @@ func (hgh *hostGetHandler) Run(ctx context.Context) gimlet.Responder { var tasks []task.Task if len(taskIds) > 0 { - tasks, err = task.Find(task.ByIds(taskIds)) + tasks, err = task.Find(ctx, task.ByIds(taskIds)) if err != nil { return gimlet.MakeJSONInternalErrorResponder(errors.Wrapf(err, "finding tasks %s", taskIds)) } diff --git a/rest/route/patch_test.go b/rest/route/patch_test.go index d7127332b99..fc7348833a4 100644 --- a/rest/route/patch_test.go +++ b/rest/route/patch_test.go @@ -981,7 +981,7 @@ buildvariants: respVersion := resp.Data().(restModel.APIVersion) assert.Equal(t, unfinalized.Id.Hex(), *respVersion.Id) assert.Equal(t, description, *respVersion.Message) - tasks, err := task.Find(task.ByVersion(*respVersion.Id)) + tasks, err := task.Find(ctx, task.ByVersion(*respVersion.Id)) assert.NoError(t, err) assert.Len(t, tasks, 2) foundCompile := false @@ -1017,7 +1017,7 @@ buildvariants: respVersion = resp.Data().(restModel.APIVersion) assert.Equal(t, unfinalized.Id.Hex(), *respVersion.Id) assert.Equal(t, description, *respVersion.Message) - tasks, err = task.Find(task.ByVersion(*respVersion.Id)) + tasks, err = task.Find(ctx, task.ByVersion(*respVersion.Id)) assert.NoError(t, err) assert.Len(t, tasks, 3) foundCompile = false @@ -1074,7 +1074,7 @@ buildvariants: respVersion = resp.Data().(restModel.APIVersion) assert.Equal(t, patch2.Id.Hex(), *respVersion.Id) assert.Equal(t, "", *respVersion.Message) - tasks, err = task.Find(task.ByVersion(*respVersion.Id)) + tasks, err = task.Find(ctx, task.ByVersion(*respVersion.Id)) assert.NoError(t, err) assert.Len(t, tasks, 4) @@ -1107,7 +1107,7 @@ buildvariants: respVersion = resp.Data().(restModel.APIVersion) assert.Equal(t, patch3.Id.Hex(), *respVersion.Id) assert.Equal(t, "", *respVersion.Message) - tasks, err = task.Find(task.ByVersion(*respVersion.Id)) + tasks, err = task.Find(ctx, task.ByVersion(*respVersion.Id)) assert.NoError(t, err) assert.Len(t, tasks, 2) } @@ -1394,7 +1394,7 @@ tasks: respVersion := resp.Data().(restModel.APIVersion) assert.Equal(t, unfinalized.Id.Hex(), *respVersion.Id) assert.Equal(t, description, *respVersion.Message) - tasks, err := task.Find(task.ByVersion(*respVersion.Id)) + tasks, err := task.Find(ctx, task.ByVersion(*respVersion.Id)) assert.NoError(t, err) assert.Len(t, tasks, 1) // manually set the task as running and its generated JSON for simplicity @@ -1426,7 +1426,7 @@ tasks: respVersion = resp.Data().(restModel.APIVersion) assert.Equal(t, unfinalized.Id.Hex(), *respVersion.Id) assert.Equal(t, description, *respVersion.Message) - tasks, err = task.Find(task.ByVersion(*respVersion.Id)) + tasks, err = task.Find(ctx, task.ByVersion(*respVersion.Id)) assert.NoError(t, err) // affirm that pre-existing inactive tasks are all activated now for _, foundTask := range tasks { @@ -1453,7 +1453,7 @@ tasks: respVersion = resp.Data().(restModel.APIVersion) assert.Equal(t, unfinalized.Id.Hex(), *respVersion.Id) assert.Equal(t, description, *respVersion.Message) - tasks, err = task.Find(task.ByVersion(*respVersion.Id)) + tasks, err = task.Find(ctx, task.ByVersion(*respVersion.Id)) assert.NoError(t, err) numTGTasksScheduled := 0 for _, foundTask := range tasks { diff --git a/rest/route/task_test.go b/rest/route/task_test.go index 61543122368..c40d371be10 100644 --- a/rest/route/task_test.go +++ b/rest/route/task_test.go @@ -60,7 +60,7 @@ func (s *TaskAbortSuite) TestAbort() { s.Equal(http.StatusOK, res.Status()) s.NotNil(res) - tasks, err := task.Find(task.ByIds([]string{"task1", "task2"})) + tasks, err := task.Find(ctx, task.ByIds([]string{"task1", "task2"})) s.NoError(err) s.Equal("user1", tasks[0].ActivatedBy) s.Equal("", tasks[1].ActivatedBy) @@ -71,7 +71,7 @@ func (s *TaskAbortSuite) TestAbort() { res = rm.Run(ctx) s.Equal(http.StatusOK, res.Status()) s.NotNil(res) - tasks, err = task.Find(task.ByIds([]string{"task1", "task2"})) + tasks, err = task.Find(ctx, task.ByIds([]string{"task1", "task2"})) s.NoError(err) s.Equal("user1", tasks[0].AbortInfo.User) s.Equal("", tasks[1].AbortInfo.User) diff --git a/scheduler/utilization_based_host_allocator.go b/scheduler/utilization_based_host_allocator.go index 5c05dd84bc8..f084e7f29b6 100644 --- a/scheduler/utilization_based_host_allocator.go +++ b/scheduler/utilization_based_host_allocator.go @@ -152,7 +152,7 @@ func evalHostUtilization(ctx context.Context, d distro.Distro, taskGroupData Tas } // determine how many free hosts we have that are already up - numFreeHosts, err := calcExistingFreeHosts(existingHosts, futureHostFraction, maxDurationThreshold) + numFreeHosts, err := calcExistingFreeHosts(ctx, existingHosts, futureHostFraction, maxDurationThreshold) if err != nil { return numNewHosts, numFreeHosts, err } @@ -293,7 +293,7 @@ func calcNewHostsNeeded(scheduledDuration, maxDurationPerHost time.Duration, num // calcExistingFreeHosts returns the number of hosts that are not running a task, // plus hosts that will soon be free scaled by some fraction -func calcExistingFreeHosts(existingHosts []host.Host, futureHostFactor float64, maxDurationPerHost time.Duration) (int, error) { +func calcExistingFreeHosts(ctx context.Context, existingHosts []host.Host, futureHostFactor float64, maxDurationPerHost time.Duration) (int, error) { numFreeHosts := 0 if futureHostFactor > 1 { return numFreeHosts, errors.New("future host factor cannot be greater than 1") @@ -305,7 +305,7 @@ func calcExistingFreeHosts(existingHosts []host.Host, futureHostFactor float64, } } - soonToBeFree, err := getSoonToBeFreeHosts(existingHosts, futureHostFactor, maxDurationPerHost) + soonToBeFree, err := getSoonToBeFreeHosts(ctx, existingHosts, futureHostFactor, maxDurationPerHost) if err != nil { return 0, err } @@ -317,7 +317,7 @@ func calcExistingFreeHosts(existingHosts []host.Host, futureHostFactor float64, // to be free for some fraction of the next maxDurationPerHost interval // the final value is scaled by some fraction representing how confident we are that // the hosts will actually be free in the expected amount of time -func getSoonToBeFreeHosts(existingHosts []host.Host, futureHostFraction float64, maxDurationPerHost time.Duration) (float64, error) { +func getSoonToBeFreeHosts(ctx context.Context, existingHosts []host.Host, futureHostFraction float64, maxDurationPerHost time.Duration) (float64, error) { runningTaskIds := []string{} for _, existingDistroHost := range existingHosts { @@ -330,7 +330,7 @@ func getSoonToBeFreeHosts(existingHosts []host.Host, futureHostFraction float64, return 0.0, nil } - runningTasks, err := task.Find(task.ByIds(runningTaskIds)) + runningTasks, err := task.Find(ctx, task.ByIds(runningTaskIds)) if err != nil { return 0.0, err } diff --git a/scheduler/utilization_based_host_allocator_test.go b/scheduler/utilization_based_host_allocator_test.go index ff440850879..e66a9c76930 100644 --- a/scheduler/utilization_based_host_allocator_test.go +++ b/scheduler/utilization_based_host_allocator_test.go @@ -170,6 +170,9 @@ func (s *UtilizationAllocatorSuite) TestCalcNewHostsNeeded() { } func (s *UtilizationAllocatorSuite) TestCalcExistingFreeHosts() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + h1 := host.Host{ Id: "h1", RunningTask: "t1", @@ -215,7 +218,7 @@ func (s *UtilizationAllocatorSuite) TestCalcExistingFreeHosts() { } s.NoError(t3.Insert()) - freeHosts, err := calcExistingFreeHosts([]host.Host{h1, h2, h3, h4, h5}, 1, evergreen.MaxDurationPerDistroHost) + freeHosts, err := calcExistingFreeHosts(ctx, []host.Host{h1, h2, h3, h4, h5}, 1, evergreen.MaxDurationPerDistroHost) s.NoError(err) s.Equal(3, freeHosts) } diff --git a/trigger/patch.go b/trigger/patch.go index c8b74204694..a62f5d1804b 100644 --- a/trigger/patch.go +++ b/trigger/patch.go @@ -121,7 +121,7 @@ func (t *patchTriggers) patchOutcome(ctx context.Context, sub *event.Subscriptio return nil, nil } } - return t.generate(sub) + return t.generate(ctx, sub) } func (t *patchTriggers) patchFailure(ctx context.Context, sub *event.Subscription) (*notification.Notification, error) { @@ -129,7 +129,7 @@ func (t *patchTriggers) patchFailure(ctx context.Context, sub *event.Subscriptio return nil, nil } - return t.generate(sub) + return t.generate(ctx, sub) } func finalizeChildPatch(sub *event.Subscription) error { @@ -172,7 +172,7 @@ func (t *patchTriggers) patchSuccess(ctx context.Context, sub *event.Subscriptio return nil, nil } - return t.generate(sub) + return t.generate(ctx, sub) } func (t *patchTriggers) patchStarted(ctx context.Context, sub *event.Subscription) (*notification.Notification, error) { @@ -180,10 +180,10 @@ func (t *patchTriggers) patchStarted(ctx context.Context, sub *event.Subscriptio return nil, nil } - return t.generate(sub) + return t.generate(ctx, sub) } -func (t *patchTriggers) makeData(sub *event.Subscription) (*commonTemplateData, error) { +func (t *patchTriggers) makeData(ctx context.Context, sub *event.Subscription) (*commonTemplateData, error) { api := restModel.APIPatch{} if err := api.BuildFromService(*t.patch, &restModel.APIPatchArgs{ IncludeProjectIdentifier: true, @@ -279,7 +279,7 @@ func (t *patchTriggers) makeData(sub *event.Subscription) (*commonTemplateData, }) } - tasks, err := task.Find(task.ByVersionWithChildTasks(t.patch.Id.Hex())) + tasks, err := task.Find(ctx, task.ByVersionWithChildTasks(t.patch.Id.Hex())) if err != nil { return nil, errors.Wrapf(err, "getting tasks for patch '%s'", t.patch.Id) } @@ -303,8 +303,8 @@ func (t *patchTriggers) makeData(sub *event.Subscription) (*commonTemplateData, return &data, nil } -func (t *patchTriggers) generate(sub *event.Subscription) (*notification.Notification, error) { - data, err := t.makeData(sub) +func (t *patchTriggers) generate(ctx context.Context, sub *event.Subscription) (*notification.Notification, error) { + data, err := t.makeData(ctx, sub) if err != nil { return nil, errors.Wrap(err, "collecting patch data") } @@ -344,7 +344,7 @@ func (t *patchTriggers) patchFamilyOutcome(ctx context.Context, sub *event.Subsc return nil, nil } - return t.generate(sub) + return t.generate(ctx, sub) } func (t *patchTriggers) patchFamilySuccess(ctx context.Context, sub *event.Subscription) (*notification.Notification, error) { @@ -352,12 +352,12 @@ func (t *patchTriggers) patchFamilySuccess(ctx context.Context, sub *event.Subsc return nil, nil } - return t.generate(sub) + return t.generate(ctx, sub) } func (t *patchTriggers) patchFamilyFailure(ctx context.Context, sub *event.Subscription) (*notification.Notification, error) { if t.data.Status != evergreen.VersionFailed || t.event.EventType != event.PatchChildrenCompletion { return nil, nil } - return t.generate(sub) + return t.generate(ctx, sub) } diff --git a/trigger/process_test.go b/trigger/process_test.go index f74e18aa5a5..71c038c5f77 100644 --- a/trigger/process_test.go +++ b/trigger/process_test.go @@ -364,7 +364,7 @@ func TestProjectTriggerIntegration(t *testing.T) { assert.Equal(e.ID, b.TriggerEvent) assert.Contains(b.BuildVariant, "buildvariant") } - tasks, err := task.Find(task.ByVersion(downstreamVersions[0].Id)) + tasks, err := task.Find(ctx, task.ByVersion(downstreamVersions[0].Id)) assert.NoError(err) assert.NotEmpty(tasks) for _, t := range tasks { @@ -497,7 +497,7 @@ func TestProjectTriggerIntegrationForBuild(t *testing.T) { assert.Equal(e.ID, b.TriggerEvent) assert.Contains(b.BuildVariant, "buildvariant") } - tasks, err := task.Find(task.ByVersion(downstreamVersions[0].Id)) + tasks, err := task.Find(ctx, task.ByVersion(downstreamVersions[0].Id)) assert.NoError(err) assert.NotEmpty(tasks) for _, t := range tasks { @@ -610,7 +610,7 @@ func TestProjectTriggerIntegrationForPush(t *testing.T) { assert.Equal(model.ProjectTriggerLevelPush, b.TriggerType) assert.Contains(b.BuildVariant, "buildvariant") } - tasks, err := task.Find(task.ByVersion(dbVersions[0].Id)) + tasks, err := task.Find(ctx, task.ByVersion(dbVersions[0].Id)) assert.NoError(err) assert.NotEmpty(tasks) for _, t := range tasks { diff --git a/trigger/task.go b/trigger/task.go index a5717955395..2accd30b70b 100644 --- a/trigger/task.go +++ b/trigger/task.go @@ -972,6 +972,7 @@ func JIRATaskPayload(ctx context.Context, params JiraIssueParameters) (*message. } data := jiraTemplateData{ + Context: ctx, UIRoot: params.UiURL, SubscriptionID: params.SubID, EventID: params.EventID, diff --git a/trigger/task_jira.go b/trigger/task_jira.go index cfd8d154601..e936772a28e 100644 --- a/trigger/task_jira.go +++ b/trigger/task_jira.go @@ -135,7 +135,7 @@ func getTaskLogURLs(data *jiraTemplateData) ([]taskInfo, error) { } else { // Task is display only without tests result := make([]taskInfo, 0) - execTasks, err := task.Find(task.ByIds(data.Task.ExecutionTasks)) + execTasks, err := task.Find(data.Context, task.ByIds(data.Task.ExecutionTasks)) if err != nil { return nil, errors.Wrapf(err, "finding execution tasks for task '%s'", data.Task.Id) } @@ -182,6 +182,7 @@ type jiraBuilder struct { } type jiraTemplateData struct { + Context context.Context UIRoot string SubscriptionID string EventID string diff --git a/units/check_blocked_tasks.go b/units/check_blocked_tasks.go index d66ceb8c122..73bab304d1a 100644 --- a/units/check_blocked_tasks.go +++ b/units/check_blocked_tasks.go @@ -55,7 +55,7 @@ func NewCheckBlockedTasksJob(distroId string, ts time.Time) amboy.Job { func (j *checkBlockedTasksJob) Run(ctx context.Context) { var tasksToCheck []task.Task if j.DistroId != "" { - tasksToCheck = j.getDistroTasksToCheck() + tasksToCheck = j.getDistroTasksToCheck(ctx) } else { tasksToCheck = j.getContainerTasksToCheck() } @@ -65,7 +65,7 @@ func (j *checkBlockedTasksJob) Run(ctx context.Context) { } } -func (j *checkBlockedTasksJob) getDistroTasksToCheck() []task.Task { +func (j *checkBlockedTasksJob) getDistroTasksToCheck(ctx context.Context) []task.Task { queue, err := model.FindDistroTaskQueue(j.DistroId) if err != nil { j.AddError(errors.Wrapf(err, "getting task queue for distro '%s'", j.DistroId)) @@ -99,7 +99,7 @@ func (j *checkBlockedTasksJob) getDistroTasksToCheck() []task.Task { return nil } - tasksToCheck, err := task.Find(task.PotentiallyBlockedTasksByIds(taskIds)) + tasksToCheck, err := task.Find(ctx, task.PotentiallyBlockedTasksByIds(taskIds)) if err != nil { j.AddError(errors.Wrapf(err, "getting tasks to check in distro '%s'", j.DistroId)) return nil diff --git a/units/generate_tasks_test.go b/units/generate_tasks_test.go index e9b1984f63c..60ddbd49357 100644 --- a/units/generate_tasks_test.go +++ b/units/generate_tasks_test.go @@ -911,7 +911,7 @@ buildvariants: j.Run(ctx) assert.NoError(j.Error()) - tasks, err := task.Find(task.ByVersion(sampleVersion.Id)) + tasks, err := task.Find(ctx, task.ByVersion(sampleVersion.Id)) assert.NoError(err) foundGeneratedtask := false for _, dbTask := range tasks { diff --git a/units/host_termination_test.go b/units/host_termination_test.go index 401f707d37d..b81c2dc4130 100644 --- a/units/host_termination_test.go +++ b/units/host_termination_test.go @@ -308,7 +308,7 @@ func TestHostTerminationJob(t *testing.T) { assert.Equal(t, evergreen.TaskSucceeded, resetTask.Status) // Verify the single host task group did not reset - tasks, err := task.Find(task.ByIds([]string{task1.Id, task2.Id})) + tasks, err := task.Find(ctx, task.ByIds([]string{task1.Id, task2.Id})) require.NoError(t, err) require.Len(t, tasks, 2) for _, dbTask := range tasks { @@ -425,7 +425,7 @@ func TestHostTerminationJob(t *testing.T) { assert.Equal(t, cloud.StatusTerminated, cloudHost.Status) // Verify the single host task group reset - tasks, err := task.Find(task.ByIds([]string{task1.Id, task2.Id, task3.Id, task4.Id, task5.Id})) + tasks, err := task.Find(ctx, task.ByIds([]string{task1.Id, task2.Id, task3.Id, task4.Id, task5.Id})) require.NoError(t, err) require.Len(t, tasks, 5) for _, dbTask := range tasks { diff --git a/units/periodic_builds_test.go b/units/periodic_builds_test.go index bf57314619b..5c4059531fb 100644 --- a/units/periodic_builds_test.go +++ b/units/periodic_builds_test.go @@ -65,7 +65,7 @@ func TestPeriodicBuildsJob(t *testing.T) { assert.NoError(err) assert.Equal(evergreen.AdHocRequester, createdVersion.Requester) assert.Equal(prevVersion.Revision, createdVersion.Revision) - tasks, err := task.Find(task.ByVersion(createdVersion.Id)) + tasks, err := task.Find(ctx, task.ByVersion(createdVersion.Id)) require.NoError(t, err) assert.True(tasks[0].Activated) dbProject, err := model.FindBranchProjectRef(sampleProject.Id) From 6d03d1a2618ae433bc75b3166ed657b8f47fed3c Mon Sep 17 00:00:00 2001 From: Zackary Santana <64446617+ZackarySantana@users.noreply.github.com> Date: Fri, 17 Jan 2025 15:45:57 -0500 Subject: [PATCH 02/11] feat: add context to task.FindWithFields --- graphql/task_resolver.go | 2 +- model/generate.go | 2 +- model/lifecycle.go | 8 ++--- model/task/db.go | 8 ++--- model/task/dependency_graph.go | 5 ++-- model/task/task.go | 32 ++++++++++---------- model/task/task_test.go | 40 ++++++++++++++++--------- model/task_history.go | 2 +- model/task_lifecycle.go | 4 +-- model/task_lifecycle_test.go | 2 +- model/version_activation.go | 2 +- rest/route/project_test.go | 4 +-- scheduler/task_finder.go | 2 +- service/rest_version.go | 7 +++-- service/task.go | 2 +- units/check_blocked_tasks.go | 2 +- units/patch_intent.go | 18 +++++------ units/patch_intent_test.go | 26 +++++++++++----- units/task_monitor_execution_timeout.go | 2 +- 19 files changed, 98 insertions(+), 72 deletions(-) diff --git a/graphql/task_resolver.go b/graphql/task_resolver.go index f05c09086fd..1aa1324827a 100644 --- a/graphql/task_resolver.go +++ b/graphql/task_resolver.go @@ -256,7 +256,7 @@ func (r *taskResolver) DependsOn(ctx context.Context, obj *restModel.APITask) ([ depIds = append(depIds, dep.TaskId) } - dependencyTasks, err := task.FindWithFields(task.ByIds(depIds), task.DisplayNameKey, task.StatusKey, + dependencyTasks, err := task.FindWithFields(ctx, task.ByIds(depIds), task.DisplayNameKey, task.StatusKey, task.ActivatedKey, task.BuildVariantKey, task.DetailsKey, task.DependsOnKey) if err != nil { return nil, InternalServerError.Send(ctx, fmt.Sprintf("Cannot find dependency tasks for task %s: %s", *obj.Id, err.Error())) diff --git a/model/generate.go b/model/generate.go index e94f46cdc70..466fb8bdb82 100644 --- a/model/generate.go +++ b/model/generate.go @@ -446,7 +446,7 @@ func (g *GeneratedProject) GetNewTasksAndActivationInfo(ctx context.Context, v * func (g *GeneratedProject) CheckForCycles(ctx context.Context, v *Version, p *Project, projectRef *ProjectRef) error { ctx, span := tracer.Start(ctx, "check-for-cycles") defer span.End() - existingTasksGraph, err := task.VersionDependencyGraph(g.Task.Version, false) + existingTasksGraph, err := task.VersionDependencyGraph(ctx, g.Task.Version, false) if err != nil { return errors.Wrapf(err, "creating dependency graph for version '%s'", g.Task.Version) } diff --git a/model/lifecycle.go b/model/lifecycle.go index 1cc7938f755..7db674ddea5 100644 --- a/model/lifecycle.go +++ b/model/lifecycle.go @@ -143,7 +143,7 @@ func setTaskActivationForBuilds(ctx context.Context, buildIds []string, active, return errors.Wrap(err, "getting tasks to activate") } if withDependencies { - dependOn, err := task.GetRecursiveDependenciesUp(tasksToActivate, nil) + dependOn, err := task.GetRecursiveDependenciesUp(ctx, tasksToActivate, nil) if err != nil { return errors.Wrap(err, "getting recursive dependencies") } @@ -213,7 +213,7 @@ func TryMarkVersionStarted(versionId string, startTime time.Time) error { // dependencies that have a lower priority than the one being set for this task // will also have their priority increased. func SetTaskPriority(ctx context.Context, t task.Task, priority int64, caller string) error { - depTasks, err := task.GetRecursiveDependenciesUp([]task.Task{t}, nil) + depTasks, err := task.GetRecursiveDependenciesUp(ctx, []task.Task{t}, nil) if err != nil { return errors.Wrap(err, "getting task dependencies") } @@ -1659,7 +1659,7 @@ func addNewBuilds(ctx context.Context, creationInfo TaskCreationInfo, existingBu return nil, errors.Wrap(err, "updating version with new build IDs") } - activatedTaskDependencies, err := task.GetRecursiveDependenciesUp(newActivatedTasks, nil) + activatedTaskDependencies, err := task.GetRecursiveDependenciesUp(ctx, newActivatedTasks, nil) if err != nil { return nil, errors.Wrap(err, "getting dependencies for activated tasks") } @@ -1788,7 +1788,7 @@ func addNewTasksToExistingBuilds(ctx context.Context, creationInfo TaskCreationI } } - activatedTaskDependencies, err := task.GetRecursiveDependenciesUp(activatedTasks, nil) + activatedTaskDependencies, err := task.GetRecursiveDependenciesUp(ctx, activatedTasks, nil) if err != nil { return nil, errors.Wrap(err, "getting dependencies for activated tasks") } diff --git a/model/task/db.go b/model/task/db.go index 818dcf899d0..fe939377722 100644 --- a/model/task/db.go +++ b/model/task/db.go @@ -1526,8 +1526,8 @@ func FindAllTasksFromVersionWithDependencies(ctx context.Context, versionId stri } // FindTasksFromVersions returns all tasks associated with the given versions. Note that this only returns a few key fields. -func FindTasksFromVersions(versionIds []string) ([]Task, error) { - return FindWithFields(ByVersions(versionIds), +func FindTasksFromVersions(ctx context.Context, versionIds []string) ([]Task, error) { + return FindWithFields(ctx, ByVersions(versionIds), IdKey, DisplayNameKey, StatusKey, TimeTakenKey, VersionKey, BuildVariantKey, AbortedKey, AbortInfoKey) } @@ -1624,14 +1624,14 @@ func Find(ctx context.Context, filter bson.M) ([]Task, error) { return tasks, err } -func FindWithFields(filter bson.M, fields ...string) ([]Task, error) { +func FindWithFields(ctx context.Context, filter bson.M, fields ...string) ([]Task, error) { tasks := []Task{} _, exists := filter[DisplayOnlyKey] if !exists { filter[DisplayOnlyKey] = bson.M{"$ne": true} } query := db.Query(filter).WithFields(fields...) - err := db.FindAllQ(Collection, query, &tasks) + err := db.FindAllQContext(ctx, Collection, query, &tasks) return tasks, err } diff --git a/model/task/dependency_graph.go b/model/task/dependency_graph.go index 14852a1e7eb..57ec7a04e00 100644 --- a/model/task/dependency_graph.go +++ b/model/task/dependency_graph.go @@ -1,6 +1,7 @@ package task import ( + "context" "fmt" "strings" @@ -70,8 +71,8 @@ func (t TaskNode) String() string { } // VersionDependencyGraph finds all the tasks from the version given by versionID and constructs a DependencyGraph from them. -func VersionDependencyGraph(versionID string, transposed bool) (DependencyGraph, error) { - tasks, err := FindWithFields(ByVersion(versionID), DependsOnKey, BuildVariantKey, DisplayNameKey) +func VersionDependencyGraph(ctx context.Context, versionID string, transposed bool) (DependencyGraph, error) { + tasks, err := FindWithFields(ctx, ByVersion(versionID), DependsOnKey, BuildVariantKey, DisplayNameKey) if err != nil { return DependencyGraph{}, errors.Wrapf(err, "getting tasks for version '%s'", versionID) } diff --git a/model/task/task.go b/model/task/task.go index e9e5e354fca..b06ca6d891e 100644 --- a/model/task/task.go +++ b/model/task/task.go @@ -765,7 +765,7 @@ func (t *Task) DependenciesMet(ctx context.Context, depCaches map[string]Task) ( return true, nil } - _, err := t.populateDependencyTaskCache(depCaches) + _, err := t.populateDependencyTaskCache(ctx, depCaches) if err != nil { return false, errors.WithStack(err) } @@ -809,7 +809,7 @@ func (t *Task) setDependenciesMetTime() { } // populateDependencyTaskCache ensures that all the dependencies for the task are in the cache. -func (t *Task) populateDependencyTaskCache(depCache map[string]Task) ([]Task, error) { +func (t *Task) populateDependencyTaskCache(ctx context.Context, depCache map[string]Task) ([]Task, error) { var deps []Task depIdsToQueryFor := make([]string, 0, len(t.DependsOn)) for _, dep := range t.DependsOn { @@ -821,7 +821,7 @@ func (t *Task) populateDependencyTaskCache(depCache map[string]Task) ([]Task, er } if len(depIdsToQueryFor) > 0 { - newDeps, err := FindWithFields(ByIds(depIdsToQueryFor), StatusKey, DependsOnKey, ActivatedKey) + newDeps, err := FindWithFields(ctx, ByIds(depIdsToQueryFor), StatusKey, DependsOnKey, ActivatedKey) if err != nil { return nil, errors.WithStack(err) } @@ -837,7 +837,7 @@ func (t *Task) populateDependencyTaskCache(depCache map[string]Task) ([]Task, er } // GetFinishedBlockingDependencies gets all blocking tasks that are finished or blocked. -func (t *Task) GetFinishedBlockingDependencies(depCache map[string]Task) ([]Task, error) { +func (t *Task) GetFinishedBlockingDependencies(ctx context.Context, depCache map[string]Task) ([]Task, error) { if len(t.DependsOn) == 0 || t.OverrideDependencies { return nil, nil } @@ -849,7 +849,7 @@ func (t *Task) GetFinishedBlockingDependencies(depCache map[string]Task) ([]Task } } - _, err := t.populateDependencyTaskCache(depCache) + _, err := t.populateDependencyTaskCache(ctx, depCache) if err != nil { return nil, errors.WithStack(err) } @@ -875,7 +875,7 @@ func (t *Task) GetFinishedBlockingDependencies(depCache map[string]Task) ([]Task // GetDeactivatedBlockingDependencies gets all blocking tasks that are not finished and are not activated. // These tasks are not going to run unless they are manually activated. func (t *Task) GetDeactivatedBlockingDependencies(ctx context.Context, depCache map[string]Task) ([]string, error) { - _, err := t.populateDependencyTaskCache(depCache) + _, err := t.populateDependencyTaskCache(ctx, depCache) if err != nil { return nil, errors.WithStack(err) } @@ -2025,7 +2025,7 @@ func UpdateSchedulingLimit(username, requester string, numTasksModified int, act } // ActivateTasksByIdsWithDependencies activates the given tasks and their dependencies. -func ActivateTasksByIdsWithDependencies(ids []string, caller string) error { +func ActivateTasksByIdsWithDependencies(ctx context.Context, ids []string, caller string) error { q := db.Query(bson.M{ IdKey: bson.M{"$in": ids}, StatusKey: evergreen.TaskUndispatched, @@ -2035,7 +2035,7 @@ func ActivateTasksByIdsWithDependencies(ids []string, caller string) error { if err != nil { return errors.Wrap(err, "getting tasks for activation") } - dependOn, err := GetRecursiveDependenciesUp(tasks, nil) + dependOn, err := GetRecursiveDependenciesUp(ctx, tasks, nil) if err != nil { return errors.Wrap(err, "getting recursive dependencies") } @@ -2648,7 +2648,7 @@ func (t *Task) SetNumActivatedGeneratedTasks(numActivatedGeneratedTasks int) err // GetRecursiveDependenciesUp returns all tasks recursively depended upon // that are not in the original task slice (this includes earlier tasks in task groups, if applicable). // depCache should originally be nil. We assume there are no dependency cycles. -func GetRecursiveDependenciesUp(tasks []Task, depCache map[string]Task) ([]Task, error) { +func GetRecursiveDependenciesUp(ctx context.Context, tasks []Task, depCache map[string]Task) ([]Task, error) { if depCache == nil { depCache = make(map[string]Task) } @@ -2683,12 +2683,12 @@ func GetRecursiveDependenciesUp(tasks []Task, depCache map[string]Task) ([]Task, return nil, nil } - deps, err := FindWithFields(ByIds(tasksToFind), IdKey, DependsOnKey, ExecutionKey, BuildIdKey, StatusKey, TaskGroupKey, ActivatedKey, DisplayNameKey, PriorityKey) + deps, err := FindWithFields(ctx, ByIds(tasksToFind), IdKey, DependsOnKey, ExecutionKey, BuildIdKey, StatusKey, TaskGroupKey, ActivatedKey, DisplayNameKey, PriorityKey) if err != nil { return nil, errors.Wrap(err, "getting dependencies") } - recursiveDeps, err := GetRecursiveDependenciesUp(deps, depCache) + recursiveDeps, err := GetRecursiveDependenciesUp(ctx, deps, depCache) if err != nil { return nil, errors.Wrap(err, "getting recursive dependencies") } @@ -3300,7 +3300,7 @@ func (t *Task) CreateTestResultsTaskOptions(ctx context.Context) ([]testresult.T } else { query := ByIds(t.ExecutionTasks) query["$or"] = hasResults - execTasksWithResults, err = FindWithFields(query, ExecutionKey, ResultsServiceKey, HasCedarResultsKey) + execTasksWithResults, err = FindWithFields(ctx, query, ExecutionKey, ResultsServiceKey, HasCedarResultsKey) } if err != nil { return nil, errors.Wrap(err, "getting execution tasks for display task") @@ -3466,12 +3466,12 @@ func CheckUsersPatchTaskLimit(ctx context.Context, requester, username string, i return UpdateSchedulingLimit(username, requester, numTasksToActivate, true) } -func FindExecTasksToReset(t *Task) ([]string, error) { +func FindExecTasksToReset(ctx context.Context, t *Task) ([]string, error) { if !t.IsRestartFailedOnly() { return t.ExecutionTasks, nil } - failedExecTasks, err := FindWithFields(FailedTasksByIds(t.ExecutionTasks), IdKey) + failedExecTasks, err := FindWithFields(ctx, FailedTasksByIds(t.ExecutionTasks), IdKey) if err != nil { return nil, errors.Wrap(err, "retrieving failed execution tasks") } @@ -4133,8 +4133,8 @@ func AddExecTasksToDisplayTask(ctx context.Context, displayTaskId string, execTa } // in the process of aborting and will eventually reset themselves. -func (t *Task) FindAbortingAndResettingDependencies() ([]Task, error) { - recursiveDeps, err := GetRecursiveDependenciesUp([]Task{*t}, map[string]Task{}) +func (t *Task) FindAbortingAndResettingDependencies(ctx context.Context) ([]Task, error) { + recursiveDeps, err := GetRecursiveDependenciesUp(ctx, []Task{*t}, map[string]Task{}) if err != nil { return nil, errors.Wrap(err, "getting recursive parent dependencies") } diff --git a/model/task/task_test.go b/model/task/task_test.go index 8c452a054c8..a288f70562c 100644 --- a/model/task/task_test.go +++ b/model/task/task_test.go @@ -419,6 +419,9 @@ func TestDependenciesMet(t *testing.T) { } func TestGetFinishedBlockingDependencies(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + taskId := "t1" taskDoc := &Task{ Id: taskId, @@ -440,7 +443,7 @@ func TestGetFinishedBlockingDependencies(t *testing.T) { taskDoc.DependsOn = []Dependency{} require.NoError(t, taskDoc.Insert()) - tasks, err := taskDoc.GetFinishedBlockingDependencies(map[string]Task{}) + tasks, err := taskDoc.GetFinishedBlockingDependencies(ctx, map[string]Task{}) assert.NoError(t, err) assert.Empty(t, tasks) }, @@ -451,7 +454,7 @@ func TestGetFinishedBlockingDependencies(t *testing.T) { } require.NoError(t, taskDoc.Insert()) - tasks, err := taskDoc.GetFinishedBlockingDependencies(map[string]Task{}) + tasks, err := taskDoc.GetFinishedBlockingDependencies(ctx, map[string]Task{}) assert.NoError(t, err) assert.Empty(t, tasks) }, @@ -462,7 +465,7 @@ func TestGetFinishedBlockingDependencies(t *testing.T) { } require.NoError(t, taskDoc.Insert()) - tasks, err := taskDoc.GetFinishedBlockingDependencies(map[string]Task{ + tasks, err := taskDoc.GetFinishedBlockingDependencies(ctx, map[string]Task{ "cached-task": {Id: "cached-task", Status: evergreen.TaskSucceeded}, }) assert.NoError(t, err) @@ -476,7 +479,7 @@ func TestGetFinishedBlockingDependencies(t *testing.T) { } require.NoError(t, taskDoc.Insert()) - tasks, err := taskDoc.GetFinishedBlockingDependencies(map[string]Task{}) + tasks, err := taskDoc.GetFinishedBlockingDependencies(ctx, map[string]Task{}) assert.NoError(t, err) assert.Len(t, tasks, 1) }, @@ -488,7 +491,7 @@ func TestGetFinishedBlockingDependencies(t *testing.T) { } require.NoError(t, taskDoc.Insert()) - tasks, err := taskDoc.GetFinishedBlockingDependencies(map[string]Task{ + tasks, err := taskDoc.GetFinishedBlockingDependencies(ctx, map[string]Task{ "cached-task": {Id: "cached-task", Status: evergreen.TaskFailed}, }) assert.NoError(t, err) @@ -501,7 +504,7 @@ func TestGetFinishedBlockingDependencies(t *testing.T) { } require.NoError(t, taskDoc.Insert()) - tasks, err := taskDoc.GetFinishedBlockingDependencies(map[string]Task{}) + tasks, err := taskDoc.GetFinishedBlockingDependencies(ctx, map[string]Task{}) assert.NoError(t, err) // already marked blocked assert.Empty(t, tasks) @@ -514,7 +517,7 @@ func TestGetFinishedBlockingDependencies(t *testing.T) { } require.NoError(t, taskDoc.Insert()) - tasks, err := taskDoc.GetFinishedBlockingDependencies(map[string]Task{}) + tasks, err := taskDoc.GetFinishedBlockingDependencies(ctx, map[string]Task{}) assert.NoError(t, err) assert.Len(t, tasks, 1) }} { @@ -2272,6 +2275,9 @@ func TestGetAllDependencies(t *testing.T) { } func TestGetRecursiveDependenciesUp(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + require.NoError(t, db.Clear(Collection)) tasks := []Task{ {Id: "t0"}, @@ -2285,7 +2291,7 @@ func TestGetRecursiveDependenciesUp(t *testing.T) { require.NoError(t, task.Insert()) } - taskDependsOn, err := GetRecursiveDependenciesUp([]Task{tasks[3], tasks[4]}, nil) + taskDependsOn, err := GetRecursiveDependenciesUp(ctx, []Task{tasks[3], tasks[4]}, nil) assert.NoError(t, err) assert.Len(t, taskDependsOn, 3) expectedIDs := []string{"t2", "t1", "t0"} @@ -2295,6 +2301,9 @@ func TestGetRecursiveDependenciesUp(t *testing.T) { } func TestGetRecursiveDependenciesUpWithTaskGroup(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + require.NoError(t, db.Clear(Collection)) tasks := []Task{ {Id: "t0", BuildId: "b1", TaskGroup: "tg", TaskGroupMaxHosts: 1, TaskGroupOrder: 0}, @@ -2307,7 +2316,7 @@ func TestGetRecursiveDependenciesUpWithTaskGroup(t *testing.T) { for _, task := range tasks { require.NoError(t, task.Insert()) } - taskDependsOn, err := GetRecursiveDependenciesUp([]Task{tasks[2], tasks[3]}, nil) + taskDependsOn, err := GetRecursiveDependenciesUp(ctx, []Task{tasks[2], tasks[3]}, nil) assert.NoError(t, err) assert.Len(t, taskDependsOn, 2) expectedIDs := []string{"t0", "t1"} @@ -4687,6 +4696,9 @@ func assertTasksAreEqual(t *testing.T, expected, actual Task, exectedExecution i } func TestFindAbortingAndResettingDependencies(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + defer func() { assert.NoError(t, db.Clear(Collection)) }() @@ -4694,7 +4706,7 @@ func TestFindAbortingAndResettingDependencies(t *testing.T) { "ReturnsAllMatchingDependencies": func(t *testing.T, tsk Task, depTasks []Task) { require.NoError(t, tsk.Insert()) - found, err := tsk.FindAbortingAndResettingDependencies() + found, err := tsk.FindAbortingAndResettingDependencies(ctx) assert.NoError(t, err) require.Len(t, found, 2) expected := []string{depTasks[1].Id, depTasks[3].Id} @@ -4713,7 +4725,7 @@ func TestFindAbortingAndResettingDependencies(t *testing.T) { tsk.DependsOn = []Dependency{{TaskId: intermediateDepTask.Id}} require.NoError(t, tsk.Insert()) - found, err := tsk.FindAbortingAndResettingDependencies() + found, err := tsk.FindAbortingAndResettingDependencies(ctx) assert.NoError(t, err) require.Len(t, found, 1) assert.Equal(t, depTasks[1].Id, found[0].Id) @@ -4722,7 +4734,7 @@ func TestFindAbortingAndResettingDependencies(t *testing.T) { tsk.DependsOn = append(tsk.DependsOn, Dependency{TaskId: "nonexistent"}) require.NoError(t, tsk.Insert()) - found, err := tsk.FindAbortingAndResettingDependencies() + found, err := tsk.FindAbortingAndResettingDependencies(ctx) assert.NoError(t, err) require.Len(t, found, 2) expected := []string{depTasks[1].Id, depTasks[3].Id} @@ -4734,7 +4746,7 @@ func TestFindAbortingAndResettingDependencies(t *testing.T) { tsk.DependsOn = []Dependency{tsk.DependsOn[0], tsk.DependsOn[2], tsk.DependsOn[3]} require.NoError(t, tsk.Insert()) - found, err := tsk.FindAbortingAndResettingDependencies() + found, err := tsk.FindAbortingAndResettingDependencies(ctx) assert.NoError(t, err) require.Len(t, found, 1) assert.Equal(t, depTasks[3].Id, found[0].Id) @@ -4743,7 +4755,7 @@ func TestFindAbortingAndResettingDependencies(t *testing.T) { tsk.DependsOn = nil require.NoError(t, tsk.Insert()) - found, err := tsk.FindAbortingAndResettingDependencies() + found, err := tsk.FindAbortingAndResettingDependencies(ctx) assert.NoError(t, err) assert.Empty(t, found) }, diff --git a/model/task_history.go b/model/task_history.go index a6eb04d0b42..41e00f11d7e 100644 --- a/model/task_history.go +++ b/model/task_history.go @@ -318,7 +318,7 @@ func TaskHistoryPickaxe(ctx context.Context, params PickaxeParams) ([]task.Task, task.TimeTakenKey, task.BuildVariantKey, } - last, err := task.FindWithFields(query, projection...) + last, err := task.FindWithFields(ctx, query, projection...) if err != nil { return nil, errors.Wrap(err, "finding tasks") } diff --git a/model/task_lifecycle.go b/model/task_lifecycle.go index e85641bcc64..ad16652764c 100644 --- a/model/task_lifecycle.go +++ b/model/task_lifecycle.go @@ -52,7 +52,7 @@ func SetActiveState(ctx context.Context, caller string, active bool, tasks ...ta // if the task is being activated, and it doesn't override its dependencies // activate the task's dependencies as well if !t.OverrideDependencies { - deps, err := task.GetRecursiveDependenciesUp(originalTasks, nil) + deps, err := task.GetRecursiveDependenciesUp(ctx, originalTasks, nil) catcher.Wrapf(err, "getting dependencies up for task '%s'", t.Id) if t.IsPartOfSingleHostTaskGroup() { for _, dep := range deps { @@ -1808,7 +1808,7 @@ func MarkHostTaskDispatched(ctx context.Context, t *task.Task, h *host.Host) err func MarkOneTaskReset(ctx context.Context, t *task.Task, caller string) error { if t.DisplayOnly { - execTaskIdsToRestart, err := task.FindExecTasksToReset(t) + execTaskIdsToRestart, err := task.FindExecTasksToReset(ctx, t) if err != nil { return errors.Wrap(err, "finding execution tasks to restart") } diff --git a/model/task_lifecycle_test.go b/model/task_lifecycle_test.go index 5045a93f117..b2b0d408820 100644 --- a/model/task_lifecycle_test.go +++ b/model/task_lifecycle_test.go @@ -1363,7 +1363,7 @@ func TestUpdateBuildStatusForTask(t *testing.T) { require.NoError(t, tempTask.Insert()) } // Verify tasks are inserted and found correctly - tasks, err := task.FindWithFields(task.ByBuildId(b.Id)) + tasks, err := task.FindWithFields(ctx, task.ByBuildId(b.Id)) assert.NoError(t, err) assert.Len(t, tasks, 2) diff --git a/model/version_activation.go b/model/version_activation.go index c0d90788316..ab7191e59f8 100644 --- a/model/version_activation.go +++ b/model/version_activation.go @@ -142,7 +142,7 @@ func ActivateElapsedBuildsAndTasks(ctx context.Context, v *Version) (bool, error } } if len(allReadyTaskIds) > 0 { - if err := task.ActivateTasksByIdsWithDependencies(allReadyTaskIds, evergreen.ElapsedTaskActivator); err != nil { + if err := task.ActivateTasksByIdsWithDependencies(ctx, allReadyTaskIds, evergreen.ElapsedTaskActivator); err != nil { grip.Error(message.WrapError(err, message.Fields{ "operation": "project-activation", "message": "problem activating batchtime tasks", diff --git a/rest/route/project_test.go b/rest/route/project_test.go index 887edfced04..cc836e02cad 100644 --- a/rest/route/project_test.go +++ b/rest/route/project_test.go @@ -1664,7 +1664,7 @@ func TestModifyProjectVersions(t *testing.T) { resp := rm.Run(ctx) assert.NotNil(resp) assert.Equal(http.StatusOK, resp.Status()) - foundTasks, err := task.FindWithFields(task.ByVersions([]string{"v1", "v2", "v3", "v4"}), task.IdKey, task.PriorityKey, task.ActivatedKey) + foundTasks, err := task.FindWithFields(ctx, task.ByVersions([]string{"v1", "v2", "v3", "v4"}), task.IdKey, task.PriorityKey, task.ActivatedKey) assert.NoError(err) assert.Len(foundTasks, 4) var count int @@ -1686,7 +1686,7 @@ func TestModifyProjectVersions(t *testing.T) { resp := rm.Run(ctx) assert.NotNil(resp) assert.Equal(http.StatusOK, resp.Status()) - foundTasks, err := task.FindWithFields(task.ByVersions([]string{"v1", "v2", "v3", "v4"}), task.IdKey, task.PriorityKey, task.ActivatedKey) + foundTasks, err := task.FindWithFields(ctx, task.ByVersions([]string{"v1", "v2", "v3", "v4"}), task.IdKey, task.PriorityKey, task.ActivatedKey) assert.NoError(err) assert.Len(foundTasks, 4) var count int diff --git a/scheduler/task_finder.go b/scheduler/task_finder.go index 570d23d2b76..2da0d4f52fb 100644 --- a/scheduler/task_finder.go +++ b/scheduler/task_finder.go @@ -157,7 +157,7 @@ func AlternateTaskFinder(ctx context.Context, d distro.Distro) ([]task.Task, err taskIds = append(taskIds, t) } - tasksToCache, err := task.FindWithFields(task.ByIds(taskIds), task.StatusKey, task.DependsOnKey) + tasksToCache, err := task.FindWithFields(ctx, task.ByIds(taskIds), task.StatusKey, task.DependsOnKey) if err != nil { return nil, errors.Wrap(err, "problem finding task dependencies") } diff --git a/service/rest_version.go b/service/rest_version.go index 37f03f07a65..c833e5c4526 100644 --- a/service/rest_version.go +++ b/service/rest_version.go @@ -1,6 +1,7 @@ package service import ( + "context" "encoding/json" "fmt" "net/http" @@ -192,7 +193,7 @@ func (restapi restAPI) getRecentVersions(w http.ResponseWriter, r *http.Request) result.Versions = append(result.Versions, versionInfo) } // Find all builds/tasks corresponding the set of version ids - if err = result.populateBuildsAndTasks(versionIds, versionIdx); err != nil { + if err = result.populateBuildsAndTasks(r.Context(), versionIds, versionIdx); err != nil { msg := fmt.Sprintf("Error populating builds/tasks for recent versions of project '%v'", projectIdentifier) grip.Error(errors.Wrap(err, msg)) gimlet.WriteJSONInternalError(w, responseError{Message: msg}) @@ -221,12 +222,12 @@ func (restapi restAPI) getRecentVersions(w http.ResponseWriter, r *http.Request) gimlet.WriteJSON(w, result) } -func (r *recentVersionsContent) populateBuildsAndTasks(versionIds []string, versionIdx map[string]int) error { +func (r *recentVersionsContent) populateBuildsAndTasks(ctx context.Context, versionIds []string, versionIdx map[string]int) error { builds, err := build.FindBuildsByVersions(versionIds) if err != nil { return errors.Wrap(err, "Error finding recent versions") } - tasks, err := task.FindTasksFromVersions(versionIds) + tasks, err := task.FindTasksFromVersions(ctx, versionIds) if err != nil { return errors.Wrap(err, "Error finding recent tasks for recent versions") } diff --git a/service/task.go b/service/task.go index 504d617e599..6be6617df29 100644 --- a/service/task.go +++ b/service/task.go @@ -455,7 +455,7 @@ func getTaskDependencies(ctx context.Context, t *task.Task) ([]uiDep, string, er for _, dep := range t.DependsOn { depIds = append(depIds, dep.TaskId) } - dependencies, err := task.FindWithFields(task.ByIds(depIds), task.DisplayNameKey, task.StatusKey, + dependencies, err := task.FindWithFields(ctx, task.ByIds(depIds), task.DisplayNameKey, task.StatusKey, task.ActivatedKey, task.BuildVariantKey, task.DetailsKey, task.DependsOnKey) if err != nil { return nil, "", err diff --git a/units/check_blocked_tasks.go b/units/check_blocked_tasks.go index 73bab304d1a..798e1be7835 100644 --- a/units/check_blocked_tasks.go +++ b/units/check_blocked_tasks.go @@ -139,7 +139,7 @@ func checkUnmarkedBlockingTasks(ctx context.Context, t *task.Task, dependencyCac return nil } - finishedBlockingTasks, err := t.GetFinishedBlockingDependencies(dependencyCaches) + finishedBlockingTasks, err := t.GetFinishedBlockingDependencies(ctx, dependencyCaches) catcher.Wrap(err, "getting blocking tasks") blockingTaskIds := []string{} if err == nil { diff --git a/units/patch_intent.go b/units/patch_intent.go index f512b698dbd..58b41dbdc3f 100644 --- a/units/patch_intent.go +++ b/units/patch_intent.go @@ -352,7 +352,7 @@ func (j *patchIntentProcessor) finishPatch(ctx context.Context, patchDoc *patch. return err } - if err = j.buildTasksAndVariants(patchDoc, patchedProject); err != nil { + if err = j.buildTasksAndVariants(ctx, patchDoc, patchedProject); err != nil { return errors.Wrap(err, BuildTasksAndVariantsError) } @@ -567,7 +567,7 @@ func (j *patchIntentProcessor) createGitHubMergeSubscription(ctx context.Context return catcher.Resolve() } -func (j *patchIntentProcessor) buildTasksAndVariants(patchDoc *patch.Patch, project *model.Project) error { +func (j *patchIntentProcessor) buildTasksAndVariants(ctx context.Context, patchDoc *patch.Patch, project *model.Project) error { var err error var reuseDef bool reusePatchId, failedOnly := j.intent.RepeatFailedTasksAndVariants() @@ -576,7 +576,7 @@ func (j *patchIntentProcessor) buildTasksAndVariants(patchDoc *patch.Patch, proj } if reuseDef || failedOnly { - err = j.setToPreviousPatchDefinition(patchDoc, project, reusePatchId, failedOnly) + err = j.setToPreviousPatchDefinition(ctx, patchDoc, project, reusePatchId, failedOnly) if err != nil { return err } @@ -618,7 +618,7 @@ func (j *patchIntentProcessor) buildTasksAndVariants(patchDoc *patch.Patch, proj // setToFilteredTasks sets the tasks/variants to a previous patch's activated tasks (filtered on failures if requested) // and adds dependencies and task group tasks as needed. -func setToFilteredTasks(patchDoc, reusePatch *patch.Patch, project *model.Project, failedOnly bool) error { +func setToFilteredTasks(ctx context.Context, patchDoc, reusePatch *patch.Patch, project *model.Project, failedOnly bool) error { activatedTasks, err := task.FindActivatedByVersionWithoutDisplay(reusePatch.Version) if err != nil { return errors.Wrap(err, "filtering to activated tasks") @@ -645,7 +645,7 @@ func setToFilteredTasks(patchDoc, reusePatch *patch.Patch, project *model.Projec // We only need to add dependencies and task group tasks for failed tasks because otherwise // we can rely on them being there from the previous patch. if failedOnly { - failedPlusNeeded, err := addDependenciesAndTaskGroups(failedTasks, failedTaskDisplayNames, project, vt) + failedPlusNeeded, err := addDependenciesAndTaskGroups(ctx, failedTasks, failedTaskDisplayNames, project, vt) if err != nil { return errors.Wrap(err, "getting dependencies and task groups for activated tasks") } @@ -670,12 +670,12 @@ func setToFilteredTasks(patchDoc, reusePatch *patch.Patch, project *model.Projec } // addDependenciesAndTaskGroups adds dependencies and tasks from single host task groups for the given tasks. -func addDependenciesAndTaskGroups(tasks []task.Task, taskDisplayNames []string, project *model.Project, vt patch.VariantTasks) ([]string, error) { +func addDependenciesAndTaskGroups(ctx context.Context, tasks []task.Task, taskDisplayNames []string, project *model.Project, vt patch.VariantTasks) ([]string, error) { // only add tasks if they are in the current project definition tasksInProjectVariant := project.FindTasksForVariant(vt.Variant) tasksToAdd := []string{} // add dependencies of failed tasks - taskDependencies, err := task.GetRecursiveDependenciesUp(tasks, nil) + taskDependencies, err := task.GetRecursiveDependenciesUp(ctx, tasks, nil) if err != nil { return nil, errors.Wrap(err, "getting dependencies for activated tasks") } @@ -704,7 +704,7 @@ func addDependenciesAndTaskGroups(tasks []task.Task, taskDisplayNames []string, // setToPreviousPatchDefinition sets the tasks/variants based on a previous patch. // If failedOnly is set, we only use the tasks/variants that failed. // If patchId isn't set, we just use the most recent patch for the project. -func (j *patchIntentProcessor) setToPreviousPatchDefinition(patchDoc *patch.Patch, +func (j *patchIntentProcessor) setToPreviousPatchDefinition(ctx context.Context, patchDoc *patch.Patch, project *model.Project, patchId string, failedOnly bool) error { var reusePatch *patch.Patch var err error @@ -733,7 +733,7 @@ func (j *patchIntentProcessor) setToPreviousPatchDefinition(patchDoc *patch.Patc return nil } - if err = setToFilteredTasks(patchDoc, reusePatch, project, failedOnly); err != nil { + if err = setToFilteredTasks(ctx, patchDoc, reusePatch, project, failedOnly); err != nil { return errors.Wrapf(err, "filtering tasks for '%s'", patchId) } diff --git a/units/patch_intent_test.go b/units/patch_intent_test.go index af8a837b8c2..a30cb566b91 100644 --- a/units/patch_intent_test.go +++ b/units/patch_intent_test.go @@ -392,6 +392,9 @@ func (s *PatchIntentUnitsSuite) TestCantFinalizePatchWithDisabledCommitQueue() { } func (s *PatchIntentUnitsSuite) TestSetToPreviousPatchDefinition() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + patchId := mgobson.NewObjectId().Hex() previousPatchDoc := &patch.Patch{ Id: patch.NewId(patchId), @@ -481,7 +484,7 @@ func (s *PatchIntentUnitsSuite) TestSetToPreviousPatchDefinition() { } for testName, testCase := range map[string]func(*patchIntentProcessor, *patch.Patch){ "previous/reuse": func(j *patchIntentProcessor, currentPatchDoc *patch.Patch) { - err := j.setToPreviousPatchDefinition(currentPatchDoc, &project, "", false) + err := j.setToPreviousPatchDefinition(ctx, currentPatchDoc, &project, "", false) s.NoError(err) sort.Strings(previousPatchDoc.Tasks) sort.Strings(previousPatchDoc.BuildVariants) @@ -493,7 +496,7 @@ func (s *PatchIntentUnitsSuite) TestSetToPreviousPatchDefinition() { }, "previous/reuse failed": func(j *patchIntentProcessor, currentPatchDoc *patch.Patch) { - err := j.setToPreviousPatchDefinition(currentPatchDoc, &project, "", true) + err := j.setToPreviousPatchDefinition(ctx, currentPatchDoc, &project, "", true) s.NoError(err) sort.Strings(previousPatchDoc.BuildVariants) sort.Strings(currentPatchDoc.BuildVariants) @@ -502,7 +505,7 @@ func (s *PatchIntentUnitsSuite) TestSetToPreviousPatchDefinition() { s.Equal([]string{"et1", "t1", "tgt1", "tgt2", "tgt4"}, currentPatchDoc.Tasks) }, "specific patch/reuse": func(j *patchIntentProcessor, currentPatchDoc *patch.Patch) { - err := j.setToPreviousPatchDefinition(currentPatchDoc, &project, reusePatchId, false) + err := j.setToPreviousPatchDefinition(ctx, currentPatchDoc, &project, reusePatchId, false) s.NoError(err) s.Equal(currentPatchDoc.BuildVariants, reusePatchDoc.BuildVariants) @@ -510,7 +513,7 @@ func (s *PatchIntentUnitsSuite) TestSetToPreviousPatchDefinition() { }, "specific patch/reuse failed": func(j *patchIntentProcessor, currentPatchDoc *patch.Patch) { - err := j.setToPreviousPatchDefinition(currentPatchDoc, &project, reusePatchId, true) + err := j.setToPreviousPatchDefinition(ctx, currentPatchDoc, &project, reusePatchId, true) s.NoError(err) s.Equal(currentPatchDoc.BuildVariants, reusePatchDoc.BuildVariants) s.Equal([]string{"diffTask1"}, currentPatchDoc.Tasks) @@ -655,6 +658,9 @@ func (s *PatchIntentUnitsSuite) TestSetToPreviousPatchDefinition() { } func (s *PatchIntentUnitsSuite) TestBuildTasksAndVariantsWithRepeatFailed() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + patchId := "aaaaaaaaaaff001122334455" tasks := []task.Task{ { @@ -778,13 +784,16 @@ func (s *PatchIntentUnitsSuite) TestBuildTasksAndVariantsWithRepeatFailed() { s.NoError(err) - err = j.buildTasksAndVariants(currentPatchDoc, &project) + err = j.buildTasksAndVariants(ctx, currentPatchDoc, &project) s.NoError(err) sort.Strings(currentPatchDoc.Tasks) s.Equal([]string{"t1", "t3", "t4"}, currentPatchDoc.Tasks) } func (s *PatchIntentUnitsSuite) TestBuildTasksAndVariantsWithReuse() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + patchId := "aaaaaaaaaaff001122334455" tasks := []task.Task{ { @@ -938,7 +947,7 @@ func (s *PatchIntentUnitsSuite) TestBuildTasksAndVariantsWithReuse() { s.NoError(err) // test --reuse - err = j.buildTasksAndVariants(currentPatchDoc, &project) + err = j.buildTasksAndVariants(ctx, currentPatchDoc, &project) s.NoError(err) sort.Strings(currentPatchDoc.Tasks) s.Equal([]string{"t1", "t2", "t3", "t4"}, currentPatchDoc.Tasks) @@ -952,6 +961,9 @@ func (s *PatchIntentUnitsSuite) TestBuildTasksAndVariantsWithReuse() { } func (s *PatchIntentUnitsSuite) TestBuildTasksAndVariantsWithReusePatchId() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + earlierPatchId := mgobson.NewObjectId().Hex() tasks := []task.Task{ { @@ -1105,7 +1117,7 @@ func (s *PatchIntentUnitsSuite) TestBuildTasksAndVariantsWithReusePatchId() { s.NoError(err) // test --reuse with patch ID - err = j.buildTasksAndVariants(currentPatchDoc, &project) + err = j.buildTasksAndVariants(ctx, currentPatchDoc, &project) s.NoError(err) sort.Strings(currentPatchDoc.Tasks) s.Equal([]string{"t1", "t2", "t3", "t4"}, currentPatchDoc.Tasks) diff --git a/units/task_monitor_execution_timeout.go b/units/task_monitor_execution_timeout.go index 732f26ec700..ea167f269c0 100644 --- a/units/task_monitor_execution_timeout.go +++ b/units/task_monitor_execution_timeout.go @@ -266,7 +266,7 @@ func (j *taskExecutionTimeoutPopulationJob) Run(ctx context.Context) { } queue := j.env.RemoteQueue() - tasks, err := task.FindWithFields(task.ByStaleRunningTask(evergreen.HeartbeatTimeoutThreshold), task.IdKey) + tasks, err := task.FindWithFields(ctx, task.ByStaleRunningTask(evergreen.HeartbeatTimeoutThreshold), task.IdKey) if err != nil { j.AddError(errors.Wrap(err, "finding tasks with timed-out or stale heartbeats")) return From fc2ff69f09158f93605452a58ff61da001bf0c90 Mon Sep 17 00:00:00 2001 From: Zackary Santana <64446617+ZackarySantana@users.noreply.github.com> Date: Fri, 17 Jan 2025 15:47:03 -0500 Subject: [PATCH 03/11] feat: add context to task.FindWithSort and FindTaskGroupFromBuild --- model/lifecycle_test.go | 2 +- model/task/db.go | 8 ++++---- model/task/task.go | 4 ++-- model/task_lifecycle.go | 4 ++-- model/task_lifecycle_test.go | 6 +++--- rest/route/host_agent.go | 2 +- units/host_termination.go | 2 +- 7 files changed, 14 insertions(+), 14 deletions(-) diff --git a/model/lifecycle_test.go b/model/lifecycle_test.go index e8b0b4db14e..958c637ec55 100644 --- a/model/lifecycle_test.go +++ b/model/lifecycle_test.go @@ -1335,7 +1335,7 @@ func TestCreateBuildFromVersion(t *testing.T) { So(tasks1.InsertUnordered(context.Background()), ShouldBeNil) So(tasks2.InsertUnordered(context.Background()), ShouldBeNil) So(tasks3.InsertUnordered(context.Background()), ShouldBeNil) - dbTasks, err := task.FindWithSort(bson.M{}, []string{task.DisplayNameKey, task.BuildVariantKey}) + dbTasks, err := task.FindWithSort(ctx, bson.M{}, []string{task.DisplayNameKey, task.BuildVariantKey}) So(err, ShouldBeNil) So(len(dbTasks), ShouldEqual, 9) diff --git a/model/task/db.go b/model/task/db.go index fe939377722..7510e271f1b 100644 --- a/model/task/db.go +++ b/model/task/db.go @@ -1549,8 +1549,8 @@ func HasActivatedDependentTasks(taskId string) (bool, error) { return numDependentTasks > 0, err } -func FindTaskGroupFromBuild(buildId, taskGroup string) ([]Task, error) { - tasks, err := FindWithSort(bson.M{ +func FindTaskGroupFromBuild(ctx context.Context, buildId, taskGroup string) ([]Task, error) { + tasks, err := FindWithSort(ctx, bson.M{ BuildIdKey: buildId, TaskGroupKey: taskGroup, }, []string{TaskGroupOrderKey}) @@ -1636,14 +1636,14 @@ func FindWithFields(ctx context.Context, filter bson.M, fields ...string) ([]Tas return tasks, err } -func FindWithSort(filter bson.M, sort []string) ([]Task, error) { +func FindWithSort(ctx context.Context, filter bson.M, sort []string) ([]Task, error) { tasks := []Task{} _, exists := filter[DisplayOnlyKey] if !exists { filter[DisplayOnlyKey] = bson.M{"$ne": true} } query := db.Query(filter).Sort(sort) - err := db.FindAllQ(Collection, query, &tasks) + err := db.FindAllQContext(ctx, Collection, query, &tasks) return tasks, err } diff --git a/model/task/task.go b/model/task/task.go index b06ca6d891e..5f67eeb3d45 100644 --- a/model/task/task.go +++ b/model/task/task.go @@ -2664,7 +2664,7 @@ func GetRecursiveDependenciesUp(ctx context.Context, tasks []Task, depCache map[ } } if t.IsPartOfSingleHostTaskGroup() { - tasksInGroup, err := FindTaskGroupFromBuild(t.BuildId, t.TaskGroup) + tasksInGroup, err := FindTaskGroupFromBuild(ctx, t.BuildId, t.TaskGroup) if err != nil { return nil, errors.Wrapf(err, "finding task group '%s'", t.TaskGroup) } @@ -3429,7 +3429,7 @@ func updateSchedulingLimitForResetWhenFinished(ctx context.Context, t *Task, cal return errors.Wrapf(err, "finding execution tasks for '%s'", t.Id) } } else if t.IsPartOfSingleHostTaskGroup() { - tasks, err = FindTaskGroupFromBuild(t.BuildId, t.TaskGroup) + tasks, err = FindTaskGroupFromBuild(ctx, t.BuildId, t.TaskGroup) if err != nil { return errors.Wrapf(err, "finding task group '%s'", t.TaskGroup) } diff --git a/model/task_lifecycle.go b/model/task_lifecycle.go index ad16652764c..ae59c0b2d5a 100644 --- a/model/task_lifecycle.go +++ b/model/task_lifecycle.go @@ -1150,7 +1150,7 @@ func evalLinearStepback(ctx context.Context, t *task.Task, newStepback, shouldSt if t.IsPartOfSingleHostTaskGroup() { // Stepback earlier task group tasks as well because these need to be run sequentially. catcher := grip.NewBasicCatcher() - tasks, err := task.FindTaskGroupFromBuild(t.BuildId, t.TaskGroup) + tasks, err := task.FindTaskGroupFromBuild(ctx, t.BuildId, t.TaskGroup) if err != nil { return errors.Wrapf(err, "getting task group for task '%s'", t.Id) } @@ -2370,7 +2370,7 @@ func checkResetSingleHostTaskGroup(ctx context.Context, t *task.Task, caller str if !t.IsPartOfSingleHostTaskGroup() { return nil } - tasks, err := task.FindTaskGroupFromBuild(t.BuildId, t.TaskGroup) + tasks, err := task.FindTaskGroupFromBuild(ctx, t.BuildId, t.TaskGroup) if err != nil { return errors.Wrapf(err, "getting task group for task '%s'", t.Id) } diff --git a/model/task_lifecycle_test.go b/model/task_lifecycle_test.go index b2b0d408820..eba19d41720 100644 --- a/model/task_lifecycle_test.go +++ b/model/task_lifecycle_test.go @@ -826,7 +826,7 @@ func TestSetActiveState(t *testing.T) { So(SetActiveState(ctx, "test", true, *taskDef), ShouldBeNil) - taskGroup, err := task.FindTaskGroupFromBuild(b.Id, taskDef.TaskGroup) + taskGroup, err := task.FindTaskGroupFromBuild(ctx, b.Id, taskDef.TaskGroup) So(err, ShouldBeNil) So(taskGroup, ShouldHaveLength, 4) for _, t := range taskGroup { @@ -882,7 +882,7 @@ func TestSetActiveState(t *testing.T) { taskDef.Id = "task3" So(SetActiveState(ctx, "test", false, *taskDef), ShouldBeNil) - taskGroup, err := task.FindTaskGroupFromBuild(b.Id, taskDef.TaskGroup) + taskGroup, err := task.FindTaskGroupFromBuild(ctx, b.Id, taskDef.TaskGroup) So(err, ShouldBeNil) So(taskGroup, ShouldHaveLength, 4) for _, t := range taskGroup { @@ -2459,7 +2459,7 @@ func TestMarkEndIsAutomaticRestart(t *testing.T) { }, "ResetsSingleHostTaskGroupWithFailure": func(t *testing.T) { assert.NoError(t, MarkEnd(ctx, &evergreen.Settings{}, tgTask1, "test", time.Now(), detail, false)) - tasks, err := task.FindTaskGroupFromBuild(tgTask1.BuildId, tgTask1.TaskGroup) + tasks, err := task.FindTaskGroupFromBuild(ctx, tgTask1.BuildId, tgTask1.TaskGroup) assert.NoError(t, err) require.Len(t, tasks, 2) diff --git a/rest/route/host_agent.go b/rest/route/host_agent.go index c8964f5f68c..cbe1d6447a4 100644 --- a/rest/route/host_agent.go +++ b/rest/route/host_agent.go @@ -608,7 +608,7 @@ func checkHostTaskGroupAfterDispatch(ctx context.Context, t *task.Task) error { if t.TaskGroupOrder > 1 { // If the previous task in the single-host task group has yet to run // and should run, then wait for the previous task to run. - tgTasks, err := task.FindTaskGroupFromBuild(t.BuildId, t.TaskGroup) + tgTasks, err := task.FindTaskGroupFromBuild(ctx, t.BuildId, t.TaskGroup) if err != nil { return errors.Wrap(err, "finding task group from build") } diff --git a/units/host_termination.go b/units/host_termination.go index b7ef1685c1d..d43aa8571f7 100644 --- a/units/host_termination.go +++ b/units/host_termination.go @@ -209,7 +209,7 @@ func (j *hostTerminationJob) Run(ctx context.Context) { } // Only try to restart the task group if it was successful and should have continued executing. if latestTask != nil && latestTask.IsPartOfSingleHostTaskGroup() && latestTask.Status == evergreen.TaskSucceeded { - tasks, err := task.FindTaskGroupFromBuild(latestTask.BuildId, latestTask.TaskGroup) + tasks, err := task.FindTaskGroupFromBuild(ctx, latestTask.BuildId, latestTask.TaskGroup) if err != nil { j.AddError(errors.Wrapf(err, "getting task group for task '%s'", latestTask.Id)) return From b4c41d4814a22cad67b93d310d852f30da34851b Mon Sep 17 00:00:00 2001 From: Zackary Santana <64446617+ZackarySantana@users.noreply.github.com> Date: Fri, 17 Jan 2025 15:49:54 -0500 Subject: [PATCH 04/11] feat: add context to task.FindAllOld and task.FindAllFirstExecution --- graphql/patch_resolver.go | 2 +- graphql/version_resolver.go | 2 +- model/build/build.go | 4 ++-- model/task/db.go | 8 ++++---- model/task/db_test.go | 5 ++++- model/task_lifecycle.go | 2 +- model/version.go | 4 ++-- service/build.go | 4 ++-- service/version.go | 2 +- 9 files changed, 18 insertions(+), 15 deletions(-) diff --git a/graphql/patch_resolver.go b/graphql/patch_resolver.go index 1cab122e219..07dbb545bfc 100644 --- a/graphql/patch_resolver.go +++ b/graphql/patch_resolver.go @@ -63,7 +63,7 @@ func (r *patchResolver) Builds(ctx context.Context, obj *restModel.APIPatch) ([] // Duration is the resolver for the duration field. func (r *patchResolver) Duration(ctx context.Context, obj *restModel.APIPatch) (*PatchDuration, error) { query := db.Query(task.ByVersion(*obj.Id)).WithFields(task.TimeTakenKey, task.StartTimeKey, task.FinishTimeKey, task.DisplayOnlyKey, task.ExecutionKey) - tasks, err := task.FindAllFirstExecution(query) + tasks, err := task.FindAllFirstExecution(ctx, query) if err != nil { return nil, InternalServerError.Send(ctx, err.Error()) } diff --git a/graphql/version_resolver.go b/graphql/version_resolver.go index 335fd8777ec..3fed689220a 100644 --- a/graphql/version_resolver.go +++ b/graphql/version_resolver.go @@ -483,7 +483,7 @@ func (r *versionResolver) VersionTiming(ctx context.Context, obj *restModel.APIV if v == nil { return nil, ResourceNotFound.Send(ctx, fmt.Sprintf("finding version '%s'", utility.FromStringPtr(obj.Id))) } - timeTaken, makespan, err := v.GetTimeSpent() + timeTaken, makespan, err := v.GetTimeSpent(ctx) if err != nil { return nil, InternalServerError.Send(ctx, fmt.Sprintf("getting timing for version '%s': %s", utility.FromStringPtr(obj.Id), err.Error())) } diff --git a/model/build/build.go b/model/build/build.go index cd85fcbf5de..7f0a594bed1 100644 --- a/model/build/build.go +++ b/model/build/build.go @@ -278,9 +278,9 @@ func (b *Build) MarkFinished(status string, finishTime time.Time) error { ) } -func (b *Build) GetTimeSpent() (time.Duration, time.Duration, error) { +func (b *Build) GetTimeSpent(ctx context.Context) (time.Duration, time.Duration, error) { query := db.Query(task.ByBuildId(b.Id)).WithFields(task.TimeTakenKey, task.StartTimeKey, task.FinishTimeKey, task.DisplayOnlyKey, task.ExecutionKey) - tasks, err := task.FindAllFirstExecution(query) + tasks, err := task.FindAllFirstExecution(ctx, query) if err != nil { return 0, 0, errors.Wrap(err, "getting tasks") } diff --git a/model/task/db.go b/model/task/db.go index 7510e271f1b..4df1dfdf583 100644 --- a/model/task/db.go +++ b/model/task/db.go @@ -1585,7 +1585,7 @@ func MakeOldID(taskID string, execution int) string { return fmt.Sprintf("%s_%d", taskID, execution) } -func FindAllFirstExecution(query db.Q) ([]Task, error) { +func FindAllFirstExecution(ctx context.Context, query db.Q) ([]Task, error) { existingTasks, err := FindAll(query) if err != nil { return nil, errors.Wrap(err, "getting current tasks") @@ -1601,7 +1601,7 @@ func FindAllFirstExecution(query db.Q) ([]Task, error) { } if len(oldIDs) > 0 { - oldTasks, err := FindAllOld(db.Query(ByIds(oldIDs))) + oldTasks, err := FindAllOld(ctx, db.Query(ByIds(oldIDs))) if err != nil { return nil, errors.Wrap(err, "getting old tasks") } @@ -1656,9 +1656,9 @@ func FindAll(query db.Q) ([]Task, error) { } // Find returns really all tasks that satisfy the query. -func FindAllOld(query db.Q) ([]Task, error) { +func FindAllOld(ctx context.Context, query db.Q) ([]Task, error) { tasks := []Task{} - err := db.FindAllQ(OldCollection, query, &tasks) + err := db.FindAllQContext(ctx, OldCollection, query, &tasks) return tasks, err } diff --git a/model/task/db_test.go b/model/task/db_test.go index b2967a02c94..f00b40e0ba1 100644 --- a/model/task/db_test.go +++ b/model/task/db_test.go @@ -417,6 +417,9 @@ func TestFindOneIdAndExecutionWithDisplayStatus(t *testing.T) { } func TestFindAllFirstExecution(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + require.NoError(t, db.ClearCollections(Collection, OldCollection)) tasks := []Task{ {Id: "t0"}, @@ -429,7 +432,7 @@ func TestFindAllFirstExecution(t *testing.T) { oldTask := Task{Id: MakeOldID("t1", 0)} require.NoError(t, db.Insert(OldCollection, &oldTask)) - foundTasks, err := FindAllFirstExecution(All) + foundTasks, err := FindAllFirstExecution(ctx, All) assert.NoError(t, err) assert.Len(t, foundTasks, 3) expectedIDs := []string{"t0", MakeOldID("t1", 0), "t2"} diff --git a/model/task_lifecycle.go b/model/task_lifecycle.go index ae59c0b2d5a..a7f2f47e1a9 100644 --- a/model/task_lifecycle.go +++ b/model/task_lifecycle.go @@ -1002,7 +1002,7 @@ func getVersionCtxForTracing(ctx context.Context, v *Version, project string) (c return nil, errors.New("version is nil") } - timeTaken, makespan, err := v.GetTimeSpent() + timeTaken, makespan, err := v.GetTimeSpent(ctx) if err != nil { return nil, errors.Wrap(err, "getting time spent") } diff --git a/model/version.go b/model/version.go index d0bd68382c2..79921cba992 100644 --- a/model/version.go +++ b/model/version.go @@ -220,10 +220,10 @@ func setVersionStatus(versionId, newStatus string) error { // GetTimeSpent returns the total time_taken and makespan of a version for // each task that has finished running -func (v *Version) GetTimeSpent() (time.Duration, time.Duration, error) { +func (v *Version) GetTimeSpent(ctx context.Context) (time.Duration, time.Duration, error) { query := db.Query(task.ByVersion(v.Id)).WithFields( task.TimeTakenKey, task.StartTimeKey, task.FinishTimeKey, task.DisplayOnlyKey, task.ExecutionKey) - tasks, err := task.FindAllFirstExecution(query) + tasks, err := task.FindAllFirstExecution(ctx, query) if err != nil { return 0, 0, errors.Wrapf(err, "getting tasks for version '%s'", v.Id) } diff --git a/service/build.go b/service/build.go index 4dec3e55790..957eb398105 100644 --- a/service/build.go +++ b/service/build.go @@ -75,7 +75,7 @@ func (uis *UIServer) buildPage(w http.ResponseWriter, r *http.Request) { } buildAsUI.Tasks = uiTasks - buildAsUI.TimeTaken, buildAsUI.Makespan, err = projCtx.Build.GetTimeSpent() + buildAsUI.TimeTaken, buildAsUI.Makespan, err = projCtx.Build.GetTimeSpent(r.Context()) if err != nil { uis.LoggedError(w, r, http.StatusInternalServerError, errors.Wrap(err, "can't get time spent for build")) return @@ -245,7 +245,7 @@ func (uis *UIServer) modifyBuild(w http.ResponseWriter, r *http.Request) { } updatedBuild.Tasks = uiTasks - updatedBuild.TimeTaken, updatedBuild.Makespan, err = projCtx.Build.GetTimeSpent() + updatedBuild.TimeTaken, updatedBuild.Makespan, err = projCtx.Build.GetTimeSpent(r.Context()) if err != nil { uis.LoggedError(w, r, http.StatusInternalServerError, errors.Wrap(err, "can't get time spent for build")) return diff --git a/service/version.go b/service/version.go index f97c17407d3..f5dd53e2baa 100644 --- a/service/version.go +++ b/service/version.go @@ -217,7 +217,7 @@ func (uis *UIServer) versionPage(w http.ResponseWriter, r *http.Request) { } versionAsUI.Builds = uiBuilds - versionAsUI.TimeTaken, versionAsUI.Makespan, err = projCtx.Version.GetTimeSpent() + versionAsUI.TimeTaken, versionAsUI.Makespan, err = projCtx.Version.GetTimeSpent(r.Context()) if err != nil { uis.LoggedError(w, r, http.StatusInternalServerError, err) return From 65fb94fde3d9ff3ac4990b7774ad0a416af959f3 Mon Sep 17 00:00:00 2001 From: Zackary Santana <64446617+ZackarySantana@users.noreply.github.com> Date: Fri, 17 Jan 2025 16:32:10 -0500 Subject: [PATCH 05/11] feat: add context to task.UpdateOne, task prioritizer, planner, and scheduler functions --- cloud/ec2.go | 2 +- db/db_utils.go | 13 ++ graphql/mutation_resolver.go | 4 +- model/generate.go | 4 +- model/lifecycle.go | 2 +- model/task/db.go | 28 +-- model/task/db_test.go | 9 +- model/task/generated_json_db.go | 4 +- model/task/generated_json_s3.go | 2 +- model/task/task.go | 107 ++++++----- model/task/task_test.go | 79 ++++---- model/task_lifecycle.go | 23 +-- model/task_lifecycle_test.go | 41 +++-- model/task_queue_service_test.go | 46 +++-- rest/data/scheduler.go | 4 +- rest/route/agent.go | 8 +- rest/route/agent_test.go | 2 +- rest/route/host_agent.go | 4 +- rest/route/host_agent_test.go | 6 +- rest/route/middleware_test.go | 4 +- rest/route/patch_test.go | 2 +- scheduler/planner.go | 66 ++++--- scheduler/planner_test.go | 171 +++++++++--------- scheduler/scheduler.go | 8 +- scheduler/setup_funcs.go | 13 +- scheduler/task_prioritizer.go | 17 +- scheduler/task_prioritizer_test.go | 10 +- scheduler/task_priority_cmp.go | 6 +- scheduler/task_priority_cmp_test.go | 16 +- scheduler/utilization_based_host_allocator.go | 2 +- service/task.go | 2 +- trigger/task_test.go | 4 +- units/generate_tasks.go | 4 +- units/host_monitoring_check.go | 2 +- units/provisioning_create_host.go | 2 +- units/task_monitor_execution_timeout.go | 2 +- 36 files changed, 390 insertions(+), 329 deletions(-) diff --git a/cloud/ec2.go b/cloud/ec2.go index 607a4b38cf5..947bc0b5941 100644 --- a/cloud/ec2.go +++ b/cloud/ec2.go @@ -349,7 +349,7 @@ func (m *ec2Manager) spawnOnDemandHost(ctx context.Context, h *host.Host, ec2Set } if h.SpawnOptions.SpawnedByTask { - detailErr := task.AddHostCreateDetails(h.StartedBy, h.Id, h.SpawnOptions.TaskExecutionNumber, err) + detailErr := task.AddHostCreateDetails(ctx, h.StartedBy, h.Id, h.SpawnOptions.TaskExecutionNumber, err) grip.Error(message.WrapError(detailErr, message.Fields{ "message": "error adding host create error details", "host_id": h.Id, diff --git a/db/db_utils.go b/db/db_utils.go index 889c9a384d6..491d1337473 100644 --- a/db/db_utils.go +++ b/db/db_utils.go @@ -233,6 +233,19 @@ func Update(collection string, query interface{}, update interface{}) error { return db.C(collection).Update(query, update) } +// Update updates one matching document in the collection. +func UpdateContext(ctx context.Context, collection string, query interface{}, update interface{}) error { + session, db, err := GetGlobalSessionFactory().GetContextSession(ctx) + if err != nil { + grip.Errorf("error establishing db connection: %+v", err) + + return err + } + defer session.Close() + + return db.C(collection).Update(query, update) +} + // UpdateId updates one _id-matching document in the collection. func UpdateId(collection string, id, update interface{}) error { session, db, err := GetGlobalSessionFactory().GetSession() diff --git a/graphql/mutation_resolver.go b/graphql/mutation_resolver.go index 9fe0d7186b1..c06744ddc50 100644 --- a/graphql/mutation_resolver.go +++ b/graphql/mutation_resolver.go @@ -944,7 +944,7 @@ func (r *mutationResolver) OverrideTaskDependencies(ctx context.Context, taskID if t == nil { return nil, ResourceNotFound.Send(ctx, fmt.Sprintf("cannot find task with id %s", taskID)) } - if err = t.SetOverrideDependencies(currentUser.Username()); err != nil { + if err = t.SetOverrideDependencies(ctx, currentUser.Username()); err != nil { return nil, InternalServerError.Send(ctx, fmt.Sprintf("overriding dependencies for task '%s': %s", taskID, err.Error())) } return getAPITaskFromTask(ctx, r.sc.GetURL(), *t) @@ -1322,7 +1322,7 @@ func (r *mutationResolver) ScheduleUndispatchedBaseTasks(ctx context.Context, ve baseGeneratorTask, _ := generatorTask.FindTaskOnBaseCommit(ctx) // If baseGeneratorTask is nil then it didn't exist on the base task and we can't do anything if baseGeneratorTask != nil && baseGeneratorTask.Status == evergreen.TaskUndispatched { - err = baseGeneratorTask.SetGeneratedTasksToActivate(t.BuildVariant, t.DisplayName) + err = baseGeneratorTask.SetGeneratedTasksToActivate(ctx, t.BuildVariant, t.DisplayName) if err != nil { return nil, InternalServerError.Send(ctx, fmt.Sprintf("Could not activate generated task: %s", err.Error())) } diff --git a/model/generate.go b/model/generate.go index 466fb8bdb82..abf3394a0d8 100644 --- a/model/generate.go +++ b/model/generate.go @@ -323,7 +323,7 @@ func (g *GeneratedProject) saveNewBuildsAndTasks(ctx context.Context, settings * return errors.Wrapf(err, "validating the number of tasks to be added by '%s'", g.Task.Id) } span.SetAttributes(attribute.Int(numGenerateTasksAttribute, tasksToBeGenerated)) - if err = g.Task.SetNumGeneratedTasks(tasksToBeGenerated); err != nil { + if err = g.Task.SetNumGeneratedTasks(ctx, tasksToBeGenerated); err != nil { return errors.Wrapf(err, "setting number of tasks generated by '%s'", g.Task.Id) } @@ -358,7 +358,7 @@ func (g *GeneratedProject) saveNewBuildsAndTasks(ctx context.Context, settings * numActivatedGenerateTasks := len(activatedTasksInExistingBuilds) + len(activatedTasksInNewBuilds) span.SetAttributes(attribute.Int(numActivatedGenerateTasksAttribute, numActivatedGenerateTasks)) - if err = g.Task.SetNumActivatedGeneratedTasks(numActivatedGenerateTasks); err != nil { + if err = g.Task.SetNumActivatedGeneratedTasks(ctx, numActivatedGenerateTasks); err != nil { return errors.Wrapf(err, "setting number of tasks generated and activated by '%s'", g.Task.Id) } diff --git a/model/lifecycle.go b/model/lifecycle.go index 7db674ddea5..f0ed5983fe3 100644 --- a/model/lifecycle.go +++ b/model/lifecycle.go @@ -1044,7 +1044,7 @@ func RecomputeNumDependents(ctx context.Context, t task.Task) error { SetNumDependents(taskPtrs) catcher := grip.NewBasicCatcher() for _, t := range taskPtrs { - catcher.Add(t.SetNumDependents()) + catcher.Add(t.SetNumDependents(ctx)) } return errors.Wrap(catcher.Resolve(), "setting num dependents") diff --git a/model/task/db.go b/model/task/db.go index 4df1dfdf583..5b8a3bb916b 100644 --- a/model/task/db.go +++ b/model/task/db.go @@ -1663,29 +1663,15 @@ func FindAllOld(ctx context.Context, query db.Q) ([]Task, error) { } // UpdateOne updates one task. -func UpdateOne(query interface{}, update interface{}) error { - return db.Update( +func UpdateOne(ctx context.Context, query interface{}, update interface{}) error { + return db.UpdateContext( + ctx, Collection, query, update, ) } -func UpdateOneContext(ctx context.Context, query interface{}, update interface{}) error { - res, err := evergreen.GetEnvironment().DB().Collection(Collection).UpdateOne(ctx, - query, - update, - ) - if err != nil { - return errors.Wrapf(err, "updating task") - } - if res.MatchedCount == 0 { - return adb.ErrNotFound - } - - return nil -} - func UpdateAll(query interface{}, update interface{}) (*adb.ChangeInfo, error) { return db.UpdateAll( Collection, @@ -1824,11 +1810,12 @@ func HasUnfinishedTaskForVersions(versionIds []string, taskName, variantName str return count > 0, err } -func AddHostCreateDetails(taskId, hostId string, execution int, hostCreateError error) error { +func AddHostCreateDetails(ctx context.Context, taskId, hostId string, execution int, hostCreateError error) error { if hostCreateError == nil { return nil } err := UpdateOne( + ctx, ByIdAndExecution(taskId, execution), bson.M{"$push": bson.M{ HostCreateDetailsKey: HostCreateDetail{HostId: hostId, Error: hostCreateError.Error()}, @@ -2702,8 +2689,9 @@ func enableDisabledTasks(taskIDs []string) error { // IncNumNextTaskDispatches sets the number of times a host has requested this // task and execution as its next task. -func (t *Task) IncNumNextTaskDispatches() error { +func (t *Task) IncNumNextTaskDispatches(ctx context.Context) error { if err := UpdateOne( + ctx, ByIdAndExecution(t.Id, t.Execution), bson.M{ "$inc": bson.M{NumNextTaskDispatchesKey: 1}, @@ -2717,7 +2705,7 @@ func (t *Task) IncNumNextTaskDispatches() error { // UpdateHasAnnotations updates a task's HasAnnotations flag, indicating if there // are any annotations with populated IssuesKey for its id / execution pair. func UpdateHasAnnotations(ctx context.Context, taskId string, execution int, hasAnnotations bool) error { - err := UpdateOneContext( + err := UpdateOne( ctx, ByIdAndExecution(taskId, execution), []bson.M{ diff --git a/model/task/db_test.go b/model/task/db_test.go index f00b40e0ba1..943e52a22b3 100644 --- a/model/task/db_test.go +++ b/model/task/db_test.go @@ -478,7 +478,7 @@ func TestAddHostCreateDetails(t *testing.T) { task := Task{Id: "t1", Execution: 0} assert.NoError(t, task.Insert()) errToSave := errors.Wrapf(errors.New("InsufficientCapacityError"), "error trying to start host") - assert.NoError(t, AddHostCreateDetails(task.Id, "h1", 0, errToSave)) + assert.NoError(t, AddHostCreateDetails(ctx, task.Id, "h1", 0, errToSave)) dbTask, err := FindOneId(ctx, task.Id) assert.NoError(t, err) assert.NotNil(t, dbTask) @@ -486,7 +486,7 @@ func TestAddHostCreateDetails(t *testing.T) { assert.Equal(t, "h1", dbTask.HostCreateDetails[0].HostId) assert.Contains(t, dbTask.HostCreateDetails[0].Error, "InsufficientCapacityError") - assert.NoError(t, AddHostCreateDetails(task.Id, "h2", 0, errToSave)) + assert.NoError(t, AddHostCreateDetails(ctx, task.Id, "h2", 0, errToSave)) dbTask, err = FindOneId(ctx, task.Id) assert.NoError(t, err) assert.NotNil(t, dbTask) @@ -2015,6 +2015,9 @@ func TestCountNumExecutionsForInterval(t *testing.T) { } func TestHasActivatedDependentTasks(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + assert.NoError(t, db.Clear(Collection)) t1 := Task{ Id: "activeDependent", @@ -2048,7 +2051,7 @@ func TestHasActivatedDependentTasks(t *testing.T) { assert.True(t, hasDependentTasks) // Tasks overriding dependencies don't count as dependent. - assert.NoError(t, t3.SetOverrideDependencies("me")) + assert.NoError(t, t3.SetOverrideDependencies(ctx, "me")) hasDependentTasks, err = HasActivatedDependentTasks("secondTask") assert.NoError(t, err) assert.False(t, hasDependentTasks) diff --git a/model/task/generated_json_db.go b/model/task/generated_json_db.go index 4e1c46994db..c83d24d5972 100644 --- a/model/task/generated_json_db.go +++ b/model/task/generated_json_db.go @@ -17,6 +17,6 @@ func (s generatedJSONDBStorage) Find(_ context.Context, t *Task) (GeneratedJSONF // Insert inserts all the generated JSON files for the given task. If the files // are already persisted, this will no-op. -func (s generatedJSONDBStorage) Insert(_ context.Context, t *Task, files GeneratedJSONFiles) error { - return t.SetGeneratedJSON(files) +func (s generatedJSONDBStorage) Insert(ctx context.Context, t *Task, files GeneratedJSONFiles) error { + return t.SetGeneratedJSON(ctx, files) } diff --git a/model/task/generated_json_s3.go b/model/task/generated_json_s3.go index b66a7df1557..17c75b67c1f 100644 --- a/model/task/generated_json_s3.go +++ b/model/task/generated_json_s3.go @@ -85,7 +85,7 @@ func (s *generatedJSONS3Storage) Insert(ctx context.Context, t *Task, files Gene } } - if err := t.SetGeneratedJSONStorageMethod(evergreen.ProjectStorageMethodS3); err != nil { + if err := t.SetGeneratedJSONStorageMethod(ctx, evergreen.ProjectStorageMethodS3); err != nil { return errors.Wrapf(err, "settings generated JSON storage method to S3 for task '%s'", t.Id) } diff --git a/model/task/task.go b/model/task/task.go index 5f67eeb3d45..f552708c9ba 100644 --- a/model/task/task.go +++ b/model/task/task.go @@ -664,13 +664,14 @@ func (t *Task) isSystemUnresponsive() bool { return false } -func (t *Task) SetOverrideDependencies(userID string) error { +func (t *Task) SetOverrideDependencies(ctx context.Context, userID string) error { dependenciesMetTime := time.Now() t.OverrideDependencies = true t.DependenciesMetTime = dependenciesMetTime t.DisplayStatusCache = t.DetermineDisplayStatus() event.LogTaskDependenciesOverridden(t.Id, t.Execution, userID) return UpdateOne( + ctx, bson.M{ IdKey: t.Id, }, @@ -710,6 +711,7 @@ func (t *Task) AddDependency(ctx context.Context, d Dependency) error { t.DependsOn = append(t.DependsOn, d) t.DisplayStatusCache = t.DetermineDisplayStatus() return UpdateOne( + ctx, bson.M{ IdKey: t.Id, }, @@ -782,6 +784,7 @@ func (t *Task) DependenciesMet(ctx context.Context, depCaches map[string]Task) ( t.setDependenciesMetTime() err = UpdateOne( + ctx, bson.M{IdKey: t.Id}, bson.M{ "$set": bson.M{ @@ -1010,8 +1013,9 @@ func (t *Task) PreviousCompletedTask(ctx context.Context, project string, status return FindOne(ctx, query) } -func (t *Task) cacheExpectedDuration() error { +func (t *Task) cacheExpectedDuration(ctx context.Context) error { return UpdateOne( + ctx, bson.M{ IdKey: t.Id, }, @@ -1075,7 +1079,7 @@ func (t *Task) MarkAsContainerDispatched(ctx context.Context, env evergreen.Envi // updates fail. func (t *Task) MarkAsHostDispatched(ctx context.Context, hostID, distroID, agentRevision string, dispatchTime time.Time) error { doUpdate := func(update []bson.M) error { - return UpdateOneContext(ctx, bson.M{IdKey: t.Id}, update) + return UpdateOne(ctx, bson.M{IdKey: t.Id}, update) } if err := t.markAsHostDispatchedWithFunc(doUpdate, hostID, distroID, agentRevision, dispatchTime); err != nil { return err @@ -1291,7 +1295,7 @@ func MarkTasksAsContainerDeallocated(taskIDs []string) error { } // MarkGeneratedTasks marks that the task has generated tasks. -func MarkGeneratedTasks(taskID string) error { +func MarkGeneratedTasks(ctx context.Context, taskID string) error { query := bson.M{ IdKey: taskID, GeneratedTasksKey: bson.M{"$exists": false}, @@ -1304,7 +1308,7 @@ func MarkGeneratedTasks(taskID string) error { GenerateTasksErrorKey: 1, }, } - err := UpdateOne(query, update) + err := UpdateOne(ctx, query, update) if adb.ResultsNotFound(err) { return nil } @@ -1312,7 +1316,7 @@ func MarkGeneratedTasks(taskID string) error { } // MarkGeneratedTasksErr marks that the task hit errors generating tasks. -func MarkGeneratedTasksErr(taskID string, errorToSet error) error { +func MarkGeneratedTasksErr(ctx context.Context, taskID string, errorToSet error) error { if errorToSet == nil || adb.ResultsNotFound(errorToSet) || db.IsDuplicateKey(errorToSet) { return nil } @@ -1325,7 +1329,7 @@ func MarkGeneratedTasksErr(taskID string, errorToSet error) error { GenerateTasksErrorKey: errorToSet.Error(), }, } - err := UpdateOne(query, update) + err := UpdateOne(ctx, query, update) if adb.ResultsNotFound(err) { return nil } @@ -1345,12 +1349,13 @@ func GenerateNotRun() ([]Task, error) { // SetGeneratedJSON sets JSON data to generate tasks from. If the generated JSON // files have already been stored, this is a no-op. -func (t *Task) SetGeneratedJSON(files GeneratedJSONFiles) error { +func (t *Task) SetGeneratedJSON(ctx context.Context, files GeneratedJSONFiles) error { if len(t.GeneratedJSONAsString) > 0 || t.GeneratedJSONStorageMethod != "" { return nil } if err := UpdateOne( + ctx, bson.M{ IdKey: t.Id, GeneratedJSONAsStringKey: bson.M{"$exists": false}, @@ -1374,12 +1379,13 @@ func (t *Task) SetGeneratedJSON(files GeneratedJSONFiles) error { // SetGeneratedJSONStorageMethod sets the task's generated JSON file storage // method. If it's already been set, this is a no-op. -func (t *Task) SetGeneratedJSONStorageMethod(method evergreen.ParserProjectStorageMethod) error { +func (t *Task) SetGeneratedJSONStorageMethod(ctx context.Context, method evergreen.ParserProjectStorageMethod) error { if t.GeneratedJSONStorageMethod != "" { return nil } if err := UpdateOne( + ctx, bson.M{ IdKey: t.Id, GeneratedJSONStorageMethodKey: nil, @@ -1399,8 +1405,9 @@ func (t *Task) SetGeneratedJSONStorageMethod(method evergreen.ParserProjectStora } // SetGeneratedTasksToActivate adds a task to stepback after activation -func (t *Task) SetGeneratedTasksToActivate(buildVariantName, taskName string) error { +func (t *Task) SetGeneratedTasksToActivate(ctx context.Context, buildVariantName, taskName string) error { return UpdateOne( + ctx, bson.M{ IdKey: t.Id, }, @@ -1631,10 +1638,11 @@ func DeactivateStepbackTask(ctx context.Context, projectId, buildVariantName, ta } // MarkFailed changes the state of the task to failed. -func (t *Task) MarkFailed() error { +func (t *Task) MarkFailed(ctx context.Context) error { t.Status = evergreen.TaskFailed t.DisplayStatusCache = t.DetermineDisplayStatus() return UpdateOne( + ctx, bson.M{ IdKey: t.Id, }, @@ -1647,7 +1655,7 @@ func (t *Task) MarkFailed() error { ) } -func (t *Task) MarkSystemFailed(description string) error { +func (t *Task) MarkSystemFailed(ctx context.Context, description string) error { t.FinishTime = time.Now() t.Details = GetSystemFailureDetails(description) @@ -1671,7 +1679,7 @@ func (t *Task) MarkSystemFailed(description string) error { "execution_platform": t.ExecutionPlatform, }) - return t.MarkEnd(t.FinishTime, &t.Details) + return t.MarkEnd(ctx, t.FinishTime, &t.Details) } // GetSystemFailureDetails returns a task's end details based on an input description. @@ -1692,7 +1700,7 @@ func GetSystemFailureDetails(description string) apimodels.TaskEndDetail { func (t *Task) SetAborted(ctx context.Context, reason AbortInfo) error { t.Aborted = true t.DisplayStatus = t.DetermineDisplayStatus() - return UpdateOneContext( + return UpdateOne( ctx, bson.M{ IdKey: t.Id, @@ -1716,8 +1724,9 @@ func taskAbortUpdate(reason AbortInfo) bson.M { // SetLastAndPreviousStepbackIds sets the LastFailingStepbackTaskId, // LastPassingStepbackTaskId, and PreviousStepbackTaskId for a given task id. -func SetLastAndPreviousStepbackIds(taskId string, s StepbackInfo) error { +func SetLastAndPreviousStepbackIds(ctx context.Context, taskId string, s StepbackInfo) error { return UpdateOne( + ctx, bson.M{ IdKey: taskId, }, @@ -1735,8 +1744,9 @@ func SetLastAndPreviousStepbackIds(taskId string, s StepbackInfo) error { // AddGeneratedStepbackInfoForGenerator appends a new StepbackInfo to the // task's GeneratedStepbackInfo. -func AddGeneratedStepbackInfoForGenerator(taskId string, s StepbackInfo) error { +func AddGeneratedStepbackInfoForGenerator(ctx context.Context, taskId string, s StepbackInfo) error { return UpdateOne( + ctx, bson.M{ IdKey: taskId, }, @@ -1782,14 +1792,15 @@ func SetGeneratedStepbackInfoForGenerator(ctx context.Context, taskId string, s ) // If no documents were modified, fallback to adding the new StepbackInfo. if r.ModifiedCount == 0 { - return AddGeneratedStepbackInfoForGenerator(taskId, s) + return AddGeneratedStepbackInfoForGenerator(ctx, taskId, s) } return err } // SetNextStepbackId sets the NextStepbackTaskId for a given task id. -func SetNextStepbackId(taskId string, s StepbackInfo) error { +func SetNextStepbackId(ctx context.Context, taskId string, s StepbackInfo) error { return UpdateOne( + ctx, bson.M{ IdKey: taskId, }, @@ -1900,7 +1911,7 @@ func (t *Task) GetTestLogs(ctx context.Context, getOpts taskoutput.TestLogGetOpt // because in cases where multiple calls to attach test results are made for a // task, only one call needs to have a test failure for the ResultsFailed field // to be set to true. -func (t *Task) SetResultsInfo(service string, failedResults bool) error { +func (t *Task) SetResultsInfo(ctx context.Context, service string, failedResults bool) error { if t.DisplayOnly { return errors.New("cannot set results info on a display task") } @@ -1920,7 +1931,7 @@ func (t *Task) SetResultsInfo(service string, failedResults bool) error { set[ResultsFailedKey] = true } - return errors.WithStack(UpdateOne(ById(t.Id), bson.M{"$set": set})) + return errors.WithStack(UpdateOne(ctx, ById(t.Id), bson.M{"$set": set})) } // HasResults returns whether the task has test results or not. @@ -2342,7 +2353,7 @@ func DeactivateDependencies(tasks []string, caller string) error { // MarkEnd handles the Task updates associated with ending a task. If the task's start time is zero // at this time, it will set it to the finish time minus the timeout time. -func (t *Task) MarkEnd(finishTime time.Time, detail *apimodels.TaskEndDetail) error { +func (t *Task) MarkEnd(ctx context.Context, finishTime time.Time, detail *apimodels.TaskEndDetail) error { // if there is no start time set, either set it to the create time // or set 2 hours previous to the finish time. if utility.IsZeroTime(t.StartTime) { @@ -2383,6 +2394,7 @@ func (t *Task) MarkEnd(finishTime time.Time, detail *apimodels.TaskEndDetail) er t.ContainerAllocatedTime = time.Time{} t.DisplayStatusCache = t.DetermineDisplayStatus() return UpdateOne( + ctx, bson.M{ IdKey: t.Id, }, @@ -2489,7 +2501,8 @@ func (t *Task) displayTaskPriority() int { // Reset sets the task state to a state in which it is scheduled to re-run. func (t *Task) Reset(ctx context.Context, caller string) error { - return UpdateOneContext(ctx, + return UpdateOne( + ctx, bson.M{ IdKey: t.Id, StatusKey: bson.M{"$in": evergreen.TaskCompletedStatuses}, @@ -2603,9 +2616,10 @@ func resetTaskUpdate(t *Task, caller string) []bson.M { } // UpdateHeartbeat updates the heartbeat to be the current time -func (t *Task) UpdateHeartbeat() error { +func (t *Task) UpdateHeartbeat(ctx context.Context) error { t.LastHeartbeat = time.Now() return UpdateOne( + ctx, bson.M{ IdKey: t.Id, }, @@ -2618,8 +2632,9 @@ func (t *Task) UpdateHeartbeat() error { } // SetNumGeneratedTasks sets the number of generated tasks to the given value. -func (t *Task) SetNumGeneratedTasks(numGeneratedTasks int) error { +func (t *Task) SetNumGeneratedTasks(ctx context.Context, numGeneratedTasks int) error { return UpdateOne( + ctx, bson.M{ IdKey: t.Id, }, @@ -2632,8 +2647,9 @@ func (t *Task) SetNumGeneratedTasks(numGeneratedTasks int) error { } // SetNumActivatedGeneratedTasks sets the number of activated generated tasks to the given value. -func (t *Task) SetNumActivatedGeneratedTasks(numActivatedGeneratedTasks int) error { +func (t *Task) SetNumActivatedGeneratedTasks(ctx context.Context, numActivatedGeneratedTasks int) error { return UpdateOne( + ctx, bson.M{ IdKey: t.Id, }, @@ -2742,12 +2758,13 @@ func getRecursiveDependenciesDown(tasks []string, taskMap map[string]bool) ([]Ta } // MarkStart updates the task's start time and sets the status to started -func (t *Task) MarkStart(startTime time.Time) error { +func (t *Task) MarkStart(ctx context.Context, startTime time.Time) error { // record the start time in the in-memory task t.StartTime = startTime t.Status = evergreen.TaskStarted t.DisplayStatusCache = t.DetermineDisplayStatus() return UpdateOne( + ctx, bson.M{ IdKey: t.Id, }, @@ -2763,10 +2780,11 @@ func (t *Task) MarkStart(startTime time.Time) error { } // MarkUnscheduled marks the task as undispatched and updates it in the database -func (t *Task) MarkUnscheduled() error { +func (t *Task) MarkUnscheduled(ctx context.Context) error { t.Status = evergreen.TaskUndispatched t.DisplayStatusCache = t.DetermineDisplayStatus() return UpdateOne( + ctx, bson.M{ IdKey: t.Id, }, @@ -3040,7 +3058,7 @@ func (t *Task) Archive(ctx context.Context) error { return errors.Wrap(err, "inserting archived task into old tasks") } t.Aborted = false - err = UpdateOneContext( + err = UpdateOne( ctx, bson.M{ IdKey: t.Id, @@ -3344,6 +3362,7 @@ func (t *Task) SetResetWhenFinished(ctx context.Context, caller string) error { t.ResetFailedWhenFinished = false t.ResetWhenFinished = true return UpdateOne( + ctx, bson.M{ IdKey: t.Id, }, @@ -3362,7 +3381,7 @@ func (t *Task) SetResetWhenFinished(ctx context.Context, caller string) error { // automatically reset when finished via the agent status server) reset itself // when finished. It will also increment the number of automatic resets the task // has performed. -func (t *Task) SetResetWhenFinishedWithInc() error { +func (t *Task) SetResetWhenFinishedWithInc(ctx context.Context) error { if t.ResetWhenFinished { return nil } @@ -3370,6 +3389,7 @@ func (t *Task) SetResetWhenFinishedWithInc() error { return errors.New("cannot set reset when finished for aborted task") } err := UpdateOne( + ctx, bson.M{ IdKey: t.Id, AbortedKey: bson.M{"$ne": true}, @@ -3403,6 +3423,7 @@ func (t *Task) SetResetFailedWhenFinished(ctx context.Context, caller string) er t.ResetWhenFinished = false t.ResetFailedWhenFinished = true return UpdateOne( + ctx, bson.M{ IdKey: t.Id, }, @@ -3598,7 +3619,7 @@ func (t *Task) GetDisplayTask(ctx context.Context) (*Task, error) { if t.DisplayTaskId == nil { // Cache display task ID for future use. If we couldn't find the display task, // we cache the empty string to show that it doesn't exist. - grip.Error(message.WrapError(t.SetDisplayTaskID(dtId), message.Fields{ + grip.Error(message.WrapError(t.SetDisplayTaskID(ctx, dtId), message.Fields{ "message": "failed to cache display task ID for task", "task_id": t.Id, "display_task_id": dtId, @@ -3655,7 +3676,7 @@ func GetAllDependencies(taskIDs []string, taskMap map[string]*Task) ([]Dependenc return deps, nil } -func (t *Task) FetchExpectedDuration() util.DurationStats { +func (t *Task) FetchExpectedDuration(ctx context.Context) util.DurationStats { if t.DurationPrediction.TTL == 0 { t.DurationPrediction.TTL = utility.JitterInterval(predictionTTL) } @@ -3667,7 +3688,7 @@ func (t *Task) FetchExpectedDuration() util.DurationStats { t.DurationPrediction.Value = t.ExpectedDuration t.DurationPrediction.CollectedAt = time.Now().Add(-time.Minute) - if err := t.cacheExpectedDuration(); err != nil { + if err := t.cacheExpectedDuration(ctx); err != nil { grip.Error(message.WrapError(err, message.Fields{ "task": t.Id, "message": "caching expected duration", @@ -3715,7 +3736,7 @@ func (t *Task) FetchExpectedDuration() util.DurationStats { stats, ok := t.DurationPrediction.Get() if ok { - if err := t.cacheExpectedDuration(); err != nil { + if err := t.cacheExpectedDuration(ctx); err != nil { grip.Error(message.WrapError(err, message.Fields{ "task": t.Id, "message": "caching expected duration", @@ -4037,25 +4058,18 @@ func (t *Task) UpdateDependsOn(status string, newDependencyIDs []string) error { return errors.Wrap(err, "updating dependencies") } -func (t *Task) SetTaskGroupInfo() error { - return errors.WithStack(UpdateOne(bson.M{IdKey: t.Id}, - bson.M{"$set": bson.M{ - TaskGroupOrderKey: t.TaskGroupOrder, - TaskGroupMaxHostsKey: t.TaskGroupMaxHosts, - }})) -} - -func (t *Task) SetDisplayTaskID(id string) error { +func (t *Task) SetDisplayTaskID(ctx context.Context, id string) error { t.DisplayTaskId = utility.ToStringPtr(id) - return errors.WithStack(UpdateOne(bson.M{IdKey: t.Id}, + return errors.WithStack(UpdateOne(ctx, bson.M{IdKey: t.Id}, bson.M{"$set": bson.M{ DisplayTaskIdKey: id, }})) } // SetCheckRunId sets the checkRunId for the task -func (t *Task) SetCheckRunId(checkRunId int64) error { +func (t *Task) SetCheckRunId(ctx context.Context, checkRunId int64) error { if err := UpdateOne( + ctx, bson.M{ IdKey: t.Id, }, @@ -4071,7 +4085,7 @@ func (t *Task) SetCheckRunId(checkRunId int64) error { return nil } -func (t *Task) SetNumDependents() error { +func (t *Task) SetNumDependents(ctx context.Context) error { update := bson.M{ "$set": bson.M{ NumDependentsKey: t.NumDependents, @@ -4082,7 +4096,7 @@ func (t *Task) SetNumDependents() error { NumDependentsKey: "", }} } - return UpdateOne(bson.M{ + return UpdateOne(ctx, bson.M{ IdKey: t.Id, }, update) } @@ -4127,6 +4141,7 @@ func AddExecTasksToDisplayTask(ctx context.Context, displayTaskId string, execTa } return UpdateOne( + ctx, bson.M{IdKey: displayTaskId}, update, ) diff --git a/model/task/task_test.go b/model/task/task_test.go index a288f70562c..377b9774fbf 100644 --- a/model/task/task_test.go +++ b/model/task/task_test.go @@ -43,14 +43,14 @@ var depTaskIds = []Dependency{ } // update statuses of test tasks in the db -func updateTestDepTasks(t *testing.T) { +func updateTestDepTasks(ctx context.Context, t *testing.T) { // cases for success/default for _, depTaskId := range depTaskIds[:3] { - require.NoError(t, UpdateOne(bson.M{"_id": depTaskId.TaskId}, bson.M{"$set": bson.M{"status": evergreen.TaskSucceeded}})) + require.NoError(t, UpdateOne(ctx, bson.M{"_id": depTaskId.TaskId}, bson.M{"$set": bson.M{"status": evergreen.TaskSucceeded}})) } // cases for * and failure for _, depTaskId := range depTaskIds[3:] { - require.NoError(t, UpdateOne(bson.M{"_id": depTaskId.TaskId}, bson.M{"$set": bson.M{"status": evergreen.TaskFailed}})) + require.NoError(t, UpdateOne(ctx, bson.M{"_id": depTaskId.TaskId}, bson.M{"$set": bson.M{"status": evergreen.TaskFailed}})) } } @@ -298,6 +298,7 @@ func TestDependenciesMet(t *testing.T) { func() { taskDoc.DependsOn = depTaskIds So(UpdateOne( + ctx, bson.M{"_id": depTaskIds[0].TaskId}, bson.M{ "$set": bson.M{ @@ -315,7 +316,7 @@ func TestDependenciesMet(t *testing.T) { Convey("if all of the tasks dependencies are finished properly, it"+ " should correctly believe its dependencies are met", func() { taskDoc.DependsOn = depTaskIds - updateTestDepTasks(t) + updateTestDepTasks(ctx, t) met, err := taskDoc.DependenciesMet(ctx, map[string]Task{}) So(err, ShouldBeNil) So(met, ShouldBeTrue) @@ -326,7 +327,7 @@ func TestDependenciesMet(t *testing.T) { " cache during dependency checking", func() { dependencyCache := make(map[string]Task) taskDoc.DependsOn = depTaskIds - updateTestDepTasks(t) + updateTestDepTasks(ctx, t) met, err := taskDoc.DependenciesMet(ctx, dependencyCache) So(err, ShouldBeNil) So(met, ShouldBeTrue) @@ -340,7 +341,7 @@ func TestDependenciesMet(t *testing.T) { Convey("cached dependencies should be used rather than fetching them"+ " from the database", func() { - updateTestDepTasks(t) + updateTestDepTasks(ctx, t) dependencyCache := make(map[string]Task) taskDoc.DependsOn = depTaskIds met, err := taskDoc.DependenciesMet(ctx, dependencyCache) @@ -365,6 +366,7 @@ func TestDependenciesMet(t *testing.T) { Convey("extraneous tasks in the dependency cache should be ignored", func() { So(UpdateOne( + ctx, bson.M{"_id": depTaskIds[0].TaskId}, bson.M{ "$set": bson.M{ @@ -373,6 +375,7 @@ func TestDependenciesMet(t *testing.T) { }, ), ShouldBeNil) So(UpdateOne( + ctx, bson.M{"_id": depTaskIds[1].TaskId}, bson.M{ "$set": bson.M{ @@ -381,6 +384,7 @@ func TestDependenciesMet(t *testing.T) { }, ), ShouldBeNil) So(UpdateOne( + ctx, bson.M{"_id": depTaskIds[2].TaskId}, bson.M{ "$set": bson.M{ @@ -680,7 +684,7 @@ func TestMarkDependenciesFinished(t *testing.T) { t0.FinishTime = time.Now() require.NoError(t, t0.MarkDependenciesFinished(ctx, true)) - assert.NoError(t, t0.MarkEnd(t0.FinishTime, &apimodels.TaskEndDetail{Status: evergreen.TaskFailed})) + assert.NoError(t, t0.MarkEnd(ctx, t0.FinishTime, &apimodels.TaskEndDetail{Status: evergreen.TaskFailed})) dbTask1, err := FindOneId(ctx, t1.Id) require.NoError(t, err) @@ -713,7 +717,7 @@ func TestMarkDependenciesFinished(t *testing.T) { t0.FinishTime = time.Now().Round(time.Millisecond) require.NoError(t, t0.MarkDependenciesFinished(ctx, true)) - assert.NoError(t, t0.MarkEnd(t0.FinishTime, &apimodels.TaskEndDetail{Status: evergreen.TaskSucceeded})) + assert.NoError(t, t0.MarkEnd(ctx, t0.FinishTime, &apimodels.TaskEndDetail{Status: evergreen.TaskSucceeded})) dbTask1, err := FindOneId(ctx, t1.Id) require.NoError(t, err) @@ -755,11 +759,11 @@ func TestMarkDependenciesFinished(t *testing.T) { t0.FinishTime = time.Now() require.NoError(t, t0.MarkDependenciesFinished(ctx, true)) - assert.NoError(t, t0.MarkEnd(t0.FinishTime, &apimodels.TaskEndDetail{Status: evergreen.TaskSucceeded})) + assert.NoError(t, t0.MarkEnd(ctx, t0.FinishTime, &apimodels.TaskEndDetail{Status: evergreen.TaskSucceeded})) t1.FinishTime = time.Now().Round(time.Millisecond) require.NoError(t, t1.MarkDependenciesFinished(ctx, true)) - assert.NoError(t, t1.MarkEnd(t1.FinishTime, &apimodels.TaskEndDetail{Status: evergreen.TaskSucceeded})) + assert.NoError(t, t1.MarkEnd(ctx, t1.FinishTime, &apimodels.TaskEndDetail{Status: evergreen.TaskSucceeded})) dbTask2, err := FindOneId(ctx, t2.Id) require.NoError(t, err) @@ -967,7 +971,7 @@ func TestSetTasksScheduledTime(t *testing.T) { depsFinishedTime := time.Now() So(tasks[2].MarkDependenciesFinished(ctx, true), ShouldBeNil) - So(tasks[2].MarkEnd(newTime, &apimodels.TaskEndDetail{Status: evergreen.TaskSucceeded}), ShouldBeNil) + So(tasks[2].MarkEnd(ctx, newTime, &apimodels.TaskEndDetail{Status: evergreen.TaskSucceeded}), ShouldBeNil) t3FromDb, err := FindOneId(ctx, "t3") So(err, ShouldBeNil) depsMet, err := t3FromDb.DependenciesMet(ctx, map[string]Task{}) @@ -1122,7 +1126,7 @@ func TestEndingTask(t *testing.T) { Status: evergreen.TaskFailed, } - So(t.MarkEnd(now, details), ShouldBeNil) + So(t.MarkEnd(ctx, now, details), ShouldBeNil) t, err := FindOne(ctx, db.Query(ById(t.Id))) So(err, ShouldBeNil) So(t.Status, ShouldEqual, evergreen.TaskFailed) @@ -1141,7 +1145,7 @@ func TestEndingTask(t *testing.T) { details := &apimodels.TaskEndDetail{ Status: evergreen.TaskFailed, } - So(t.MarkEnd(now, details), ShouldBeNil) + So(t.MarkEnd(ctx, now, details), ShouldBeNil) t, err := FindOne(ctx, db.Query(ById(t.Id))) So(err, ShouldBeNil) So(t.StartTime.Unix(), ShouldEqual, t.IngestTime.Unix()) @@ -1158,7 +1162,7 @@ func TestEndingTask(t *testing.T) { details := &apimodels.TaskEndDetail{ Status: evergreen.TaskFailed, } - So(t.MarkEnd(now, details), ShouldBeNil) + So(t.MarkEnd(ctx, now, details), ShouldBeNil) t, err := FindOne(ctx, db.Query(ById(t.Id))) So(err, ShouldBeNil) startTime := now.Add(-2 * time.Hour) @@ -1181,7 +1185,7 @@ func TestEndingTask(t *testing.T) { Status: evergreen.TaskFailed, } - So(t.MarkEnd(now, details), ShouldBeNil) + So(t.MarkEnd(ctx, now, details), ShouldBeNil) t, err := FindOne(ctx, db.Query(ById(t.Id))) So(err, ShouldBeNil) So(t.Status, ShouldEqual, evergreen.TaskFailed) @@ -2207,14 +2211,14 @@ func TestMarkGeneratedTasks(t *testing.T) { mockError := errors.New("mock error") - require.NoError(t, MarkGeneratedTasks(t1.Id)) + require.NoError(t, MarkGeneratedTasks(ctx, t1.Id)) found, err := FindOneId(ctx, t1.Id) require.NoError(t, err) require.True(t, found.GeneratedTasks) require.Equal(t, "", found.GenerateTasksError) - require.NoError(t, MarkGeneratedTasks(t1.Id)) - require.NoError(t, MarkGeneratedTasksErr(t1.Id, mockError)) + require.NoError(t, MarkGeneratedTasks(ctx, t1.Id)) + require.NoError(t, MarkGeneratedTasksErr(ctx, t1.Id, mockError)) found, err = FindOneId(ctx, t1.Id) require.NoError(t, err) require.True(t, found.GeneratedTasks) @@ -2224,7 +2228,7 @@ func TestMarkGeneratedTasks(t *testing.T) { Id: "t3", } require.NoError(t, t3.Insert()) - require.NoError(t, MarkGeneratedTasksErr(t3.Id, mongo.ErrNoDocuments)) + require.NoError(t, MarkGeneratedTasksErr(ctx, t3.Id, mongo.ErrNoDocuments)) found, err = FindOneId(ctx, t3.Id) require.NoError(t, err) require.False(t, found.GeneratedTasks, "document not found should not set generated tasks, since this was a race and did not generate.tasks") @@ -2235,7 +2239,7 @@ func TestMarkGeneratedTasks(t *testing.T) { } dupError := errors.New("duplicate key error") require.NoError(t, t4.Insert()) - require.NoError(t, MarkGeneratedTasksErr(t4.Id, dupError)) + require.NoError(t, MarkGeneratedTasksErr(ctx, t4.Id, dupError)) found, err = FindOneId(ctx, t4.Id) require.NoError(t, err) require.False(t, found.GeneratedTasks, "duplicate key error should not set generated tasks, since this was a race and did not generate.tasks") @@ -3537,28 +3541,28 @@ func TestSetGeneratedTasksToActivate(t *testing.T) { assert.NoError(t, task.Insert()) // add stepback task to variant - assert.NoError(t, task.SetGeneratedTasksToActivate("bv2", "t2")) + assert.NoError(t, task.SetGeneratedTasksToActivate(ctx, "bv2", "t2")) taskFromDb, err := FindOneId(ctx, "t1") assert.NoError(t, err) assert.NotNil(t, taskFromDb) assert.Equal(t, []string{"t2"}, taskFromDb.GeneratedTasksToActivate["bv2"]) // add different stepback task to variant - assert.NoError(t, task.SetGeneratedTasksToActivate("bv2", "t2.0")) + assert.NoError(t, task.SetGeneratedTasksToActivate(ctx, "bv2", "t2.0")) taskFromDb, err = FindOneId(ctx, "t1") assert.NoError(t, err) assert.NotNil(t, taskFromDb) assert.Equal(t, []string{"t2", "t2.0"}, taskFromDb.GeneratedTasksToActivate["bv2"]) // verify duplicate doesn't overwrite - assert.NoError(t, task.SetGeneratedTasksToActivate("bv2", "t2.0")) + assert.NoError(t, task.SetGeneratedTasksToActivate(ctx, "bv2", "t2.0")) taskFromDb, err = FindOneId(ctx, "t1") assert.NoError(t, err) assert.NotNil(t, taskFromDb) assert.Equal(t, []string{"t2", "t2.0"}, taskFromDb.GeneratedTasksToActivate["bv2"]) // adding second variant doesn't affect previous - assert.NoError(t, task.SetGeneratedTasksToActivate("bv3", "t3")) + assert.NoError(t, task.SetGeneratedTasksToActivate(ctx, "bv3", "t3")) taskFromDb, err = FindOneId(ctx, "t1") assert.NoError(t, err) assert.NotNil(t, taskFromDb) @@ -3582,7 +3586,7 @@ func TestSetNextStepbackId(t *testing.T) { PreviousStepbackTaskId: "t5", } - require.NoError(t, SetNextStepbackId(task.Id, s)) + require.NoError(t, SetNextStepbackId(ctx, task.Id, s)) taskFromDb, err := FindOneId(ctx, "t1") require.NoError(t, err) require.NotNil(t, taskFromDb) @@ -3608,7 +3612,7 @@ func TestSetLastAndPreviousStepbackIds(t *testing.T) { PreviousStepbackTaskId: "t5", } - require.NoError(t, SetLastAndPreviousStepbackIds(task.Id, s)) + require.NoError(t, SetLastAndPreviousStepbackIds(ctx, task.Id, s)) taskFromDb, err := FindOneId(ctx, "t1") require.NoError(t, err) require.NotNil(t, taskFromDb) @@ -3681,7 +3685,7 @@ func TestArchiveMany(t *testing.T) { assert.False(t, task.Aborted) assert.Equal(t, 1, task.Execution) } - oldTasks, err := FindAllOld(db.Query(ByVersion("v"))) + oldTasks, err := FindAllOld(ctx, db.Query(ByVersion("v"))) assert.NoError(t, err) assert.Len(t, oldTasks, 4) for _, task := range oldTasks { @@ -3804,6 +3808,7 @@ func TestArchiveManyAfterFailedOnly(t *testing.T) { // During runtime we do not archive the same task multiple times without resetting in between. // For the sake of this test, we manually untoggle CanReset so we can archive the task multiple times in a row. err = UpdateOne( + ctx, bson.M{IdKey: t3.Id}, bson.M{"$set": bson.M{CanResetKey: false}}, ) @@ -3910,7 +3915,7 @@ func TestSetCheckRunId(t *testing.T) { } assert.NoError(t, t1.Insert()) - assert.NoError(t, t1.SetCheckRunId(12345)) + assert.NoError(t, t1.SetCheckRunId(ctx, 12345)) var err error t1, err = FindOneId(ctx, t1.Id) @@ -4249,6 +4254,7 @@ func TestArchiveFailedOnly(t *testing.T) { t.Run("ArchivesExecutionTasksAfterFailedOnly", func(t *testing.T) { // Manually clear CanReset for the sake of this test. err := UpdateOne( + ctx, bson.M{IdKey: dt.Id}, bson.M{"$set": bson.M{CanResetKey: false}}, ) @@ -4538,6 +4544,7 @@ func (s *TaskConnectorFetchByIdSuite) TestFindByIdAndExecution() { for i := 0; i < 10; i++ { s.NoError(testTask1.Archive(ctx)) err := UpdateOne( + ctx, bson.M{IdKey: "task_1"}, bson.M{CanResetKey: false}, ) @@ -5441,7 +5448,7 @@ func TestSetGeneratedJSON(t *testing.T) { files := GeneratedJSONFiles{"generated_json"} require.NoError(t, tsk.Insert()) - require.NoError(t, tsk.SetGeneratedJSON(files)) + require.NoError(t, tsk.SetGeneratedJSON(ctx, files)) dbTask, err := FindOneId(ctx, tsk.Id) require.NoError(t, err) @@ -5453,7 +5460,7 @@ func TestSetGeneratedJSON(t *testing.T) { tsk.GeneratedJSONAsString = originalFiles require.NoError(t, tsk.Insert()) - require.NoError(t, tsk.SetGeneratedJSON([]string{"new_generated_json"})) + require.NoError(t, tsk.SetGeneratedJSON(ctx, []string{"new_generated_json"})) dbTask, err := FindOneId(ctx, tsk.Id) require.NoError(t, err) @@ -5467,7 +5474,7 @@ func TestSetGeneratedJSON(t *testing.T) { tsk.GeneratedJSONStorageMethod = evergreen.ProjectStorageMethodDB require.NoError(t, tsk.Insert()) - require.NoError(t, tsk.SetGeneratedJSON(GeneratedJSONFiles{"new_generated_json"})) + require.NoError(t, tsk.SetGeneratedJSON(ctx, GeneratedJSONFiles{"new_generated_json"})) dbTask, err := FindOneId(ctx, tsk.Id) require.NoError(t, err) @@ -5479,7 +5486,7 @@ func TestSetGeneratedJSON(t *testing.T) { tsk.GeneratedJSONStorageMethod = evergreen.ProjectStorageMethodS3 require.NoError(t, tsk.Insert()) - require.NoError(t, tsk.SetGeneratedJSON(GeneratedJSONFiles{"new_generated_json"})) + require.NoError(t, tsk.SetGeneratedJSON(ctx, GeneratedJSONFiles{"new_generated_json"})) dbTask, err := FindOneId(ctx, tsk.Id) require.NoError(t, err) @@ -5488,7 +5495,7 @@ func TestSetGeneratedJSON(t *testing.T) { assert.Empty(t, dbTask.GeneratedJSONAsString) }, "FailsForNonexistentTask": func(t *testing.T, tsk *Task) { - assert.Error(t, tsk.SetGeneratedJSON(GeneratedJSONFiles{"generated_json"})) + assert.Error(t, tsk.SetGeneratedJSON(ctx, GeneratedJSONFiles{"generated_json"})) assert.Empty(t, tsk.GeneratedJSONAsString) assert.Empty(t, tsk.GeneratedJSONStorageMethod) }, @@ -5517,7 +5524,7 @@ func TestSetGeneratedJSONStorageMethod(t *testing.T) { "Succeeds": func(t *testing.T, tsk *Task) { require.NoError(t, tsk.Insert()) - require.NoError(t, tsk.SetGeneratedJSONStorageMethod(evergreen.ProjectStorageMethodS3)) + require.NoError(t, tsk.SetGeneratedJSONStorageMethod(ctx, evergreen.ProjectStorageMethodS3)) dbTask, err := FindOneId(ctx, tsk.Id) require.NoError(t, err) @@ -5528,7 +5535,7 @@ func TestSetGeneratedJSONStorageMethod(t *testing.T) { tsk.GeneratedJSONStorageMethod = evergreen.ProjectStorageMethodDB require.NoError(t, tsk.Insert()) - require.NoError(t, tsk.SetGeneratedJSONStorageMethod(evergreen.ProjectStorageMethodS3)) + require.NoError(t, tsk.SetGeneratedJSONStorageMethod(ctx, evergreen.ProjectStorageMethodS3)) dbTask, err := FindOneId(ctx, tsk.Id) require.NoError(t, err) @@ -5536,7 +5543,7 @@ func TestSetGeneratedJSONStorageMethod(t *testing.T) { assert.Equal(t, evergreen.ProjectStorageMethodDB, dbTask.GeneratedJSONStorageMethod) }, "FailsForNonexistentTask": func(t *testing.T, tsk *Task) { - assert.Error(t, tsk.SetGeneratedJSONStorageMethod(evergreen.ProjectStorageMethodDB)) + assert.Error(t, tsk.SetGeneratedJSONStorageMethod(ctx, evergreen.ProjectStorageMethodDB)) assert.Empty(t, tsk.GeneratedJSONStorageMethod) }, } { diff --git a/model/task_lifecycle.go b/model/task_lifecycle.go index a7f2f47e1a9..2449a59bb34 100644 --- a/model/task_lifecycle.go +++ b/model/task_lifecycle.go @@ -258,7 +258,7 @@ func activatePreviousTask(ctx context.Context, taskId, caller string, originalSt // If this is a generator task and we originally were stepping back a generated task, activate the generated task // once the generator finishes. if prevTask.GenerateTask && originalStepbackTask != nil { - if err = prevTask.SetGeneratedTasksToActivate(originalStepbackTask.BuildVariant, originalStepbackTask.DisplayName); err != nil { + if err = prevTask.SetGeneratedTasksToActivate(ctx, originalStepbackTask.BuildVariant, originalStepbackTask.DisplayName); err != nil { return errors.Wrap(err, "setting generated tasks to activate") } } @@ -355,7 +355,7 @@ func TryResetTask(ctx context.Context, settings *evergreen.Settings, taskId, use } if detail != nil { - if err = t.MarkEnd(time.Now(), detail); err != nil { + if err = t.MarkEnd(ctx, time.Now(), detail); err != nil { return errors.Wrap(err, "marking task as ended") } } @@ -618,7 +618,7 @@ func doBisectStepback(ctx context.Context, t *task.Task) error { } s.NextStepbackTaskId = nextTask.Id // Store our next task to our current task. - if err := task.SetNextStepbackId(t.Id, s); err != nil { + if err := task.SetNextStepbackId(ctx, t.Id, s); err != nil { return errors.Wrapf(err, "could not set next stepback task id for stepback task '%s'", t.Id) } // If the next task has finished, negative priority, or already activated, no-op. @@ -626,7 +626,7 @@ func doBisectStepback(ctx context.Context, t *task.Task) error { return nil } // Store our last and previous stepback tasks in our upcoming/next task. - if err = task.SetLastAndPreviousStepbackIds(nextTask.Id, s); err != nil { + if err = task.SetLastAndPreviousStepbackIds(ctx, nextTask.Id, s); err != nil { return errors.Wrapf(err, "setting stepback info for task '%s'", nextTask.Id) } @@ -649,7 +649,7 @@ func doBisectStepback(ctx context.Context, t *task.Task) error { // If this is a generator task, activate generated tasks. if nextTask.GenerateTask { - if err = nextTask.SetGeneratedTasksToActivate(nextTask.BuildVariant, nextTask.DisplayName); err != nil { + if err = nextTask.SetGeneratedTasksToActivate(ctx, nextTask.BuildVariant, nextTask.DisplayName); err != nil { return errors.Wrap(err, "setting generated tasks to activate") } } @@ -725,7 +725,7 @@ func doBisectStepbackForGeneratedTask(ctx context.Context, generator *task.Task, // This is only for UI purposes. The generated task needs these fields populated // to show stepback options in the UI. We create a new stepback info because we do // not want to copy over the generated related fields. - if err := task.SetLastAndPreviousStepbackIds(generated.Id, task.StepbackInfo{ + if err := task.SetLastAndPreviousStepbackIds(ctx, generated.Id, task.StepbackInfo{ LastFailingStepbackTaskId: s.LastFailingStepbackTaskId, LastPassingStepbackTaskId: s.LastPassingStepbackTaskId, PreviousStepbackTaskId: s.PreviousStepbackTaskId, @@ -734,7 +734,7 @@ func doBisectStepbackForGeneratedTask(ctx context.Context, generator *task.Task, return errors.Wrapf(err, "could not set stepback info for generated task '%s'", generated.Id) } // Store our last and previous stepback tasks in our upcoming/next task. - if err = task.AddGeneratedStepbackInfoForGenerator(nextTask.Id, s); err != nil { + if err = task.AddGeneratedStepbackInfoForGenerator(ctx, nextTask.Id, s); err != nil { return errors.Wrapf(err, "setting stepback info for task '%s'", nextTask.Id) } @@ -758,7 +758,7 @@ func doBisectStepbackForGeneratedTask(ctx context.Context, generator *task.Task, // If this is a generator task, activate generated tasks. if nextTask.GenerateTask { - if err = nextTask.SetGeneratedTasksToActivate(generated.BuildVariant, generated.DisplayName); err != nil { + if err = nextTask.SetGeneratedTasksToActivate(ctx, generated.BuildVariant, generated.DisplayName); err != nil { return errors.Wrap(err, "setting generated tasks to activate") } } @@ -807,7 +807,7 @@ func MarkEnd(ctx context.Context, settings *evergreen.Settings, t *task.Task, ca } startPhaseAt := time.Now() - err := t.MarkEnd(finishTime, &detailsCopy) + err := t.MarkEnd(ctx, finishTime, &detailsCopy) grip.NoticeWhen(time.Since(startPhaseAt) > slowThreshold, message.Fields{ "message": "slow operation", @@ -1756,7 +1756,7 @@ func MarkStart(ctx context.Context, t *task.Task, updates *StatusChanges) error startTime := time.Now().Round(time.Millisecond) - if err = t.MarkStart(startTime); err != nil { + if err = t.MarkStart(ctx, startTime); err != nil { return errors.WithStack(err) } event.LogTaskStarted(t.Id, t.Execution) @@ -2137,7 +2137,7 @@ func endAndResetSystemFailedTask(ctx context.Context, settings *evergreen.Settin return errors.WithStack(MarkEnd(ctx, settings, t, evergreen.MonitorPackage, time.Now(), &failureDetails, false)) } - if err := t.MarkSystemFailed(description); err != nil { + if err := t.MarkSystemFailed(ctx, description); err != nil { return errors.Wrap(err, "marking task as system failed") } if err := logTaskEndStats(ctx, t); err != nil { @@ -2343,6 +2343,7 @@ func tryUpdateDisplayTaskAtomically(ctx context.Context, dt task.Task) (updated } if err := task.UpdateOne( + ctx, bson.M{ task.IdKey: dt.Id, // Require that the status/activation state is updated atomically. diff --git a/model/task_lifecycle_test.go b/model/task_lifecycle_test.go index eba19d41720..5e3e8985d43 100644 --- a/model/task_lifecycle_test.go +++ b/model/task_lifecycle_test.go @@ -723,7 +723,7 @@ func TestSetActiveState(t *testing.T) { }) Convey("activating a task with override dependencies set should not activate the tasks it depends on", func() { - So(testTask.SetOverrideDependencies(userName), ShouldBeNil) + So(testTask.SetOverrideDependencies(ctx, userName), ShouldBeNil) So(SetActiveState(ctx, userName, true, testTask), ShouldBeNil) depTask, err := task.FindOne(ctx, db.Query(task.ById(dep1.Id))) @@ -1451,6 +1451,7 @@ func TestUpdateVersionAndPatchStatusForBuilds(t *testing.T) { assert.Equal(t, evergreen.VersionStarted, dbPatch.Status) err = task.UpdateOne( + ctx, bson.M{task.IdKey: testTask.Id}, bson.M{"$set": bson.M{task.StatusKey: evergreen.TaskFailed}}, ) @@ -1707,7 +1708,7 @@ func TestUpdateBuildAndVersionStatusForTaskAbort(t *testing.T) { // abort started task assert.NoError(t, testTask.SetAborted(ctx, task.AbortInfo{})) - assert.NoError(t, testTask.MarkFailed()) + assert.NoError(t, testTask.MarkFailed(ctx)) assert.NoError(t, UpdateBuildAndVersionStatusForTask(ctx, &testTask)) dbBuild1, err = build.FindOneId(b1.Id) assert.NoError(t, err) @@ -1724,7 +1725,7 @@ func TestUpdateBuildAndVersionStatusForTaskAbort(t *testing.T) { // restart aborted task assert.NoError(t, testTask.Archive(ctx)) - assert.NoError(t, testTask.MarkUnscheduled()) + assert.NoError(t, testTask.MarkUnscheduled(ctx)) assert.NoError(t, UpdateBuildAndVersionStatusForTask(ctx, &testTask)) dbBuild1, err = build.FindOneId(b1.Id) assert.NoError(t, err) @@ -2921,7 +2922,7 @@ func TestTryResetTaskWithTaskGroup(t *testing.T) { assert.Contains(err.Error(), "cannot reset task in this status") }, "CanResetTaskGroup": func(t *testing.T, t1 *task.Task, t2Id string) { - assert.NoError(t1.MarkFailed()) + assert.NoError(t1.MarkFailed(ctx)) assert.NoError(TryResetTask(ctx, settings, t2Id, "user", "test", nil)) var err error @@ -2967,7 +2968,7 @@ func TestTryResetTaskWithTaskGroup(t *testing.T) { } assert.NoError(runningTask.Insert()) assert.NoError(otherTask.Insert()) - assert.NoError(runningTask.MarkStart(time.Now())) + assert.NoError(runningTask.MarkStart(ctx, time.Now())) t1 := *runningTask test(t, &t1, otherTask.Id) }) @@ -5494,7 +5495,7 @@ func TestDisplayTaskUpdates(t *testing.T) { assert.Equal(evergreen.TaskStarted, dbTask.Status) // a blocked execution task should not contribute to the status - assert.NoError(task10.MarkFailed()) + assert.NoError(task10.MarkFailed(ctx)) assert.NoError(UpdateDisplayTaskForTask(ctx, &task8)) dbTask, err = task.FindOne(ctx, db.Query(task.ById(blockedDt.Id))) assert.NoError(err) @@ -5689,17 +5690,17 @@ func TestDisplayTaskUpdatesAreConcurrencySafe(t *testing.T) { // Simulate a condition where some goroutines see the execution task as // still running, while others see it as succeeded. - if err := et1.MarkEnd(time.Now(), &apimodels.TaskEndDetail{Status: evergreen.TaskSucceeded}); err != nil { + if err := et1.MarkEnd(ctx, time.Now(), &apimodels.TaskEndDetail{Status: evergreen.TaskSucceeded}); err != nil { errs <- err return } - if err := et1.MarkStart(time.Now()); err != nil { + if err := et1.MarkStart(ctx, time.Now()); err != nil { errs <- err return } - if err := et1.MarkEnd(time.Now(), &apimodels.TaskEndDetail{Status: evergreen.TaskSucceeded}); err != nil { + if err := et1.MarkEnd(ctx, time.Now(), &apimodels.TaskEndDetail{Status: evergreen.TaskSucceeded}); err != nil { errs <- err return } @@ -5934,7 +5935,7 @@ func TestMarkEndDeactivatesPrevious(t *testing.T) { assert.NoError(err) assert.True(checkTask.Activated) - require.NoError(t, task.UpdateOne(bson.M{"_id": finishedTask.Id}, + require.NoError(t, task.UpdateOne(ctx, bson.M{"_id": finishedTask.Id}, bson.M{"$set": bson.M{"status": evergreen.TaskUndispatched}})) finishedTask.Requester = evergreen.RepotrackerVersionRequester finishedTask.Status = evergreen.TaskUndispatched @@ -5957,7 +5958,7 @@ func TestEvalBisectStepback(t *testing.T) { for tName, tCase := range map[string]func(t *testing.T, t10 task.Task){ "NoPreviousSuccessfulTask": func(t *testing.T, t10 task.Task) { // Set the first task to failed status. - require.NoError(task.UpdateOne(bson.M{"_id": "t1"}, + require.NoError(task.UpdateOne(ctx, bson.M{"_id": "t1"}, bson.M{"$set": bson.M{"status": evergreen.TaskFailed}})) require.NoError(evalStepback(ctx, &t10, evergreen.TaskFailed)) midTask, err := task.ByBeforeMidwayTaskFromIds(ctx, "t10", "t1") @@ -6026,7 +6027,7 @@ func TestEvalBisectStepback(t *testing.T) { // 2nd Iteration. Task failed, moving last failing stepback to midtask. prevTask.Status = evergreen.TaskFailed - require.NoError(task.UpdateOne(bson.M{"_id": midTask.Id}, + require.NoError(task.UpdateOne(ctx, bson.M{"_id": midTask.Id}, bson.M{"$set": bson.M{"status": evergreen.TaskFailed}})) // Activate next stepback require.NoError(evalStepback(ctx, &prevTask, evergreen.TaskFailed)) @@ -6080,7 +6081,7 @@ func TestEvalBisectStepback(t *testing.T) { // 2nd Iteration. Task passed, moving last passing stepback to midtask. midTask.Status = evergreen.TaskSucceeded prevTask := *midTask - require.NoError(task.UpdateOne(bson.M{"_id": midTask.Id}, + require.NoError(task.UpdateOne(ctx, bson.M{"_id": midTask.Id}, bson.M{"$set": bson.M{"status": evergreen.TaskSucceeded}})) // Activate next stepback require.NoError(evalStepback(ctx, midTask, evergreen.TaskSucceeded)) @@ -6113,7 +6114,7 @@ func TestEvalBisectStepback(t *testing.T) { "GeneratedTasksStepbackGenerator": func(t *testing.T, t10 task.Task) { // Make all generator tasks pass. for i := 1; i <= 10; i++ { - require.NoError(task.UpdateOne(bson.M{"_id": fmt.Sprintf("t%d", i)}, + require.NoError(task.UpdateOne(ctx, bson.M{"_id": fmt.Sprintf("t%d", i)}, bson.M{"$set": bson.M{"status": evergreen.TaskSucceeded}})) } generated1Tasks := []task.Task{} @@ -6153,16 +6154,16 @@ func TestEvalBisectStepback(t *testing.T) { } // Make the first generated tasks fail and the last pass. generated1Tasks[0].Status = evergreen.TaskSucceeded - require.NoError(task.UpdateOne(bson.M{"_id": generated1Tasks[0].Id}, + require.NoError(task.UpdateOne(ctx, bson.M{"_id": generated1Tasks[0].Id}, bson.M{"$set": bson.M{"status": generated1Tasks[0].Status}})) generated2Tasks[0].Status = evergreen.TaskSucceeded - require.NoError(task.UpdateOne(bson.M{"_id": generated2Tasks[0].Id}, + require.NoError(task.UpdateOne(ctx, bson.M{"_id": generated2Tasks[0].Id}, bson.M{"$set": bson.M{"status": generated2Tasks[0].Status}})) generated1Tasks[9].Status = evergreen.TaskFailed - require.NoError(task.UpdateOne(bson.M{"_id": generated1Tasks[9].Id}, + require.NoError(task.UpdateOne(ctx, bson.M{"_id": generated1Tasks[9].Id}, bson.M{"$set": bson.M{"status": generated1Tasks[9].Status}})) generated2Tasks[9].Status = evergreen.TaskFailed - require.NoError(task.UpdateOne(bson.M{"_id": generated2Tasks[9].Id}, + require.NoError(task.UpdateOne(ctx, bson.M{"_id": generated2Tasks[9].Id}, bson.M{"$set": bson.M{"status": generated2Tasks[9].Status}})) require.NoError(evalStepback(ctx, &generated1Tasks[9], evergreen.TaskFailed)) require.NoError(evalStepback(ctx, &generated2Tasks[9], evergreen.TaskFailed)) @@ -6221,12 +6222,12 @@ func TestEvalBisectStepback(t *testing.T) { midTaskG1, err := task.FindOneId(ctx, "g1-5") require.NoError(err) midTaskG1.Status = evergreen.TaskSucceeded - require.NoError(task.UpdateOne(bson.M{"_id": midTaskG1.Id}, + require.NoError(task.UpdateOne(ctx, bson.M{"_id": midTaskG1.Id}, bson.M{"$set": bson.M{"status": midTaskG1.Status}})) midTaskG2, err := task.FindOneId(ctx, "g2-5") require.NoError(err) midTaskG2.Status = evergreen.TaskFailed - require.NoError(task.UpdateOne(bson.M{"_id": midTaskG2.Id}, + require.NoError(task.UpdateOne(ctx, bson.M{"_id": midTaskG2.Id}, bson.M{"$set": bson.M{"status": midTaskG2.Status}})) prevTask := *midTask diff --git a/model/task_queue_service_test.go b/model/task_queue_service_test.go index acc6182719b..30878dd7ffa 100644 --- a/model/task_queue_service_test.go +++ b/model/task_queue_service_test.go @@ -169,7 +169,7 @@ func (s *taskDAGDispatchServiceSuite) TestOutsideTasksWithTaskGroupDependencies( s.Require().NoError(t1.Insert()) s.Require().NoError(t3.Insert()) s.Require().NoError(t5.Insert()) - err = setTaskStatus("taskgroup_task3", evergreen.TaskSucceeded) + err = setTaskStatus(s.ctx, "taskgroup_task3", evergreen.TaskSucceeded) s.Require().NoError(err) s.taskQueue.Queue = s.refreshTaskQueue(service) @@ -341,7 +341,7 @@ func (s *taskDAGDispatchServiceSuite) TestIntraTaskGroupDependencies() { s.Require().NoError(t2.Insert()) s.Require().NoError(t3.Insert()) s.Require().NoError(t4.Insert()) - err = setTaskStatus("task2", evergreen.TaskSucceeded) + err = setTaskStatus(s.ctx, "task2", evergreen.TaskSucceeded) s.Require().NoError(err) s.taskQueue.Queue = s.refreshTaskQueue(service) @@ -364,9 +364,9 @@ func (s *taskDAGDispatchServiceSuite) TestIntraTaskGroupDependencies() { s.Require().NoError(t2.Insert()) s.Require().NoError(t3.Insert()) s.Require().NoError(t4.Insert()) - err = setTaskStatus("task2", evergreen.TaskSucceeded) + err = setTaskStatus(s.ctx, "task2", evergreen.TaskSucceeded) s.Require().NoError(err) - err = setTaskStatus("task4", evergreen.TaskSucceeded) + err = setTaskStatus(s.ctx, "task4", evergreen.TaskSucceeded) s.Require().NoError(err) s.taskQueue.Queue = s.refreshTaskQueue(service) @@ -387,11 +387,11 @@ func (s *taskDAGDispatchServiceSuite) TestIntraTaskGroupDependencies() { s.Require().NoError(t2.Insert()) s.Require().NoError(t3.Insert()) s.Require().NoError(t4.Insert()) - err = setTaskStatus("task2", evergreen.TaskSucceeded) + err = setTaskStatus(s.ctx, "task2", evergreen.TaskSucceeded) s.Require().NoError(err) - err = setTaskStatus("task3", evergreen.TaskSucceeded) + err = setTaskStatus(s.ctx, "task3", evergreen.TaskSucceeded) s.Require().NoError(err) - err = setTaskStatus("task4", evergreen.TaskSucceeded) + err = setTaskStatus(s.ctx, "task4", evergreen.TaskSucceeded) s.Require().NoError(err) s.taskQueue.Queue = s.refreshTaskQueue(service) @@ -1257,8 +1257,9 @@ func (s *taskDAGDispatchServiceSuite) TestSingleHostTaskGroupsBlock() { s.Require().Nil(next) } -func setTaskStatus(taskID string, status string) error { +func setTaskStatus(ctx context.Context, taskID string, status string) error { return task.UpdateOne( + ctx, bson.M{ task.IdKey: taskID, }, @@ -1289,7 +1290,7 @@ func (s *taskDAGDispatchServiceSuite) TestFindNextTask() { next = service.FindNextTask(s.ctx, spec, utility.ZeroTime) s.Require().NotNil(next) s.Equal(fmt.Sprintf("%d", 5*i+1), next.Id) - s.Require().NoError(setTaskStatus(next.Id, evergreen.TaskSucceeded)) + s.Require().NoError(setTaskStatus(s.ctx, next.Id, evergreen.TaskSucceeded)) } // Dispatch the first 5 tasks for taskGroupTasks "group_2_variant_1_project_1_version_1", which represents a task group that initially contains 20 tasks. @@ -1303,7 +1304,7 @@ func (s *taskDAGDispatchServiceSuite) TestFindNextTask() { } next = service.FindNextTask(s.ctx, spec, utility.ZeroTime) s.Equal(fmt.Sprintf("%d", 5*i+2), next.Id) - s.Require().NoError(setTaskStatus(next.Id, evergreen.TaskSucceeded)) + s.Require().NoError(setTaskStatus(s.ctx, next.Id, evergreen.TaskSucceeded)) } // Dispatch the first 5 tasks for taskGroupTasks "group_1_variant_2_project_1_version_1", which represents a task group that initially contains 20 tasks. @@ -1317,7 +1318,7 @@ func (s *taskDAGDispatchServiceSuite) TestFindNextTask() { } next = service.FindNextTask(s.ctx, spec, utility.ZeroTime) s.Equal(fmt.Sprintf("%d", 5*i+3), next.Id) - s.Require().NoError(setTaskStatus(next.Id, evergreen.TaskSucceeded)) + s.Require().NoError(setTaskStatus(s.ctx, next.Id, evergreen.TaskSucceeded)) } // Dispatch the first 5 tasks for taskGroupTasks "group_1_variant_1_project_1_version_2", which represents a task group that initially contains 20 tasks. @@ -1331,7 +1332,7 @@ func (s *taskDAGDispatchServiceSuite) TestFindNextTask() { } next = service.FindNextTask(s.ctx, spec, utility.ZeroTime) s.Equal(fmt.Sprintf("%d", 5*i+4), next.Id) - s.Require().NoError(setTaskStatus(next.Id, evergreen.TaskSucceeded)) + s.Require().NoError(setTaskStatus(s.ctx, next.Id, evergreen.TaskSucceeded)) } // The taskGroupTasks "group_1_variant_1_project_1_version_1" now contains 15 tasks; dispatch another 5 of them. @@ -1345,7 +1346,7 @@ func (s *taskDAGDispatchServiceSuite) TestFindNextTask() { } next = service.FindNextTask(s.ctx, spec, utility.ZeroTime) s.Equal(fmt.Sprintf("%d", 5*i+26), next.Id) - s.Require().NoError(setTaskStatus(next.Id, evergreen.TaskSucceeded)) + s.Require().NoError(setTaskStatus(s.ctx, next.Id, evergreen.TaskSucceeded)) } ////////////////////////////////////////////////////////////////////////////// @@ -1363,7 +1364,7 @@ func (s *taskDAGDispatchServiceSuite) TestFindNextTask() { next = service.FindNextTask(s.ctx, spec, utility.ZeroTime) s.Equal("0", next.Id) s.Equal("", next.Group) - s.Require().NoError(setTaskStatus(next.Id, evergreen.TaskSucceeded)) + s.Require().NoError(setTaskStatus(s.ctx, next.Id, evergreen.TaskSucceeded)) currentID := 0 var nextInt int @@ -1383,7 +1384,7 @@ func (s *taskDAGDispatchServiceSuite) TestFindNextTask() { s.Equal("project_1", next.Project) s.Equal("version_1", next.Version) s.Equal("project_1", next.Project) - s.Require().NoError(setTaskStatus(next.Id, evergreen.TaskSucceeded)) + s.Require().NoError(setTaskStatus(s.ctx, next.Id, evergreen.TaskSucceeded)) } // Make another 15 requests for a task, passing an "empty" TaskSpec{} - all 15 dispatched tasks should come from the "group_2_variant_1_project_1_version_1" taskGroupTasks. @@ -1401,7 +1402,7 @@ func (s *taskDAGDispatchServiceSuite) TestFindNextTask() { s.Equal("project_1", next.Project) s.Equal("version_1", next.Version) s.Equal("project_1", next.Project) - s.Require().NoError(setTaskStatus(next.Id, evergreen.TaskSucceeded)) + s.Require().NoError(setTaskStatus(s.ctx, next.Id, evergreen.TaskSucceeded)) } // Make another 15 requests for a task, passing an "empty" TaskSpec{} - all 15 dispatched tasks should come from the "group_1_variant_2_project_1_version_1" taskGroupTasks. @@ -1419,7 +1420,7 @@ func (s *taskDAGDispatchServiceSuite) TestFindNextTask() { s.Equal("project_1", next.Project) s.Equal("version_1", next.Version) s.Equal("project_1", next.Project) - s.Require().NoError(setTaskStatus(next.Id, evergreen.TaskSucceeded)) + s.Require().NoError(setTaskStatus(s.ctx, next.Id, evergreen.TaskSucceeded)) } // Make another 15 requests for a task, passing an "empty" TaskSpec{} - all 15 dispatched tasks should come from the "group_1_variant_1_project_1_version_2" taskGroupTasks. @@ -1437,7 +1438,7 @@ func (s *taskDAGDispatchServiceSuite) TestFindNextTask() { s.Equal("project_1", next.Project) s.Equal("version_2", next.Version) s.Equal("project_1", next.Project) - s.Require().NoError(setTaskStatus(next.Id, evergreen.TaskSucceeded)) + s.Require().NoError(setTaskStatus(s.ctx, next.Id, evergreen.TaskSucceeded)) } // Make another 19 requests for a task, passing an "empty" TaskSpec{} - all 19 dispatched tasks should be standalone tasks. @@ -1447,7 +1448,7 @@ func (s *taskDAGDispatchServiceSuite) TestFindNextTask() { next = service.FindNextTask(s.ctx, spec, utility.ZeroTime) s.Equal(expectedStandaloneTaskOrder[i], next.Id) s.Equal("", next.Group) - s.Require().NoError(setTaskStatus(next.Id, evergreen.TaskSucceeded)) + s.Require().NoError(setTaskStatus(s.ctx, next.Id, evergreen.TaskSucceeded)) s.taskQueue.Queue = s.refreshTaskQueue(service) } } @@ -1554,6 +1555,7 @@ func (s *taskDAGDispatchServiceSuite) TestTaskGroupTasksRunningHostsVersusMaxHos func (s *taskDAGDispatchServiceSuite) TestTaskGroupWithExternalDependency() { dependsOn := []task.Dependency{{TaskId: "95"}} err := task.UpdateOne( + s.ctx, bson.M{ task.IdKey: "1", }, @@ -1610,6 +1612,7 @@ func (s *taskDAGDispatchServiceSuite) TestTaskGroupWithExternalDependency() { // Set task "95"'s status to evergreen.TaskSucceeded. err = task.UpdateOne( + s.ctx, bson.M{ task.IdKey: "95", }, @@ -1817,6 +1820,9 @@ func (s *taskDAGDispatchServiceSuite) TestNewSingleHostTaskGroupLimits() { } func (s *taskDAGDispatchServiceSuite) TestGenerateTaskLimits() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + defer evergreen.SetEnvironment(evergreen.GetEnvironment()) s.Require().NoError(db.ClearCollections(task.Collection)) @@ -1911,7 +1917,7 @@ func (s *taskDAGDispatchServiceSuite) TestGenerateTaskLimits() { s.Equal(t2.Id, next.Id) // Mark running task as complete so that t1 can be dispatched. - s.Require().NoError(running.MarkEnd(time.Now(), nil)) + s.Require().NoError(running.MarkEnd(ctx, time.Now(), nil)) // Fake a refresh of the in-memory queue. s.Require().NoError(db.ClearCollections(task.Collection)) diff --git a/rest/data/scheduler.go b/rest/data/scheduler.go index af3d6bbbf65..e8b77e42d2e 100644 --- a/rest/data/scheduler.go +++ b/rest/data/scheduler.go @@ -35,7 +35,7 @@ func CompareTasks(ctx context.Context, taskIds []string, useLegacy bool) ([]stri cmp := scheduler.CmpBasedTaskPrioritizer{} logic := map[string]map[string]string{} if useLegacy { - tasks, logic, err = cmp.PrioritizeTasks(distroId, tasks, versions) + tasks, logic, err = cmp.PrioritizeTasks(ctx, distroId, tasks, versions) if err != nil { return nil, nil, errors.Wrap(err, "prioritizing tasks") } @@ -51,7 +51,7 @@ func CompareTasks(ctx context.Context, taskIds []string, useLegacy bool) ([]stri if d == nil { return nil, nil, errors.Errorf("distro '%s' not found", distroId) } - taskPlan := scheduler.PrepareTasksForPlanning(d, tasks) + taskPlan := scheduler.PrepareTasksForPlanning(ctx, d, tasks) tasks = taskPlan.Export(ctx) } prioritizedIds := []string{} diff --git a/rest/route/agent.go b/rest/route/agent.go index 6eaca42e29e..af4d87bf641 100644 --- a/rest/route/agent.go +++ b/rest/route/agent.go @@ -297,7 +297,7 @@ func (h *markTaskForRestartHandler) Run(ctx context.Context) gimlet.Responder { if err = projectRef.CheckAndUpdateAutoRestartLimit(maxDailyAutoRestarts); err != nil { return gimlet.MakeJSONInternalErrorResponder(errors.Wrapf(err, "checking auto restart limit for '%s'", projectRef.Id)) } - if err = taskToRestart.SetResetWhenFinishedWithInc(); err != nil { + if err = taskToRestart.SetResetWhenFinishedWithInc(ctx); err != nil { return gimlet.MakeJSONInternalErrorResponder(errors.Wrapf(err, "setting reset when finished for task '%s'", h.taskID)) } return gimlet.NewJSONResponse(struct{}{}) @@ -660,7 +660,7 @@ func (h *setTaskResultsInfoHandler) Run(ctx context.Context) gimlet.Responder { }) } - if err = t.SetResultsInfo(h.info.Service, h.info.Failed); err != nil { + if err = t.SetResultsInfo(ctx, h.info.Service, h.info.Failed); err != nil { return gimlet.MakeJSONInternalErrorResponder(errors.Wrapf(err, "setting results info for task '%s'", h.taskID)) } @@ -781,7 +781,7 @@ func (h *heartbeatHandler) Run(ctx context.Context) gimlet.Responder { heartbeatResponse.Abort = true } - if err := t.UpdateHeartbeat(); err != nil { + if err := t.UpdateHeartbeat(ctx); err != nil { grip.Warningf("updating heartbeat for task %s: %+v", t.Id, err) } return gimlet.NewJSONResponse(heartbeatResponse) @@ -1506,7 +1506,7 @@ func (h *checkRunHandler) Run(ctx context.Context) gimlet.Responder { } checkRunInt := utility.FromInt64Ptr(checkRun.ID) - if err = t.SetCheckRunId(checkRunInt); err != nil { + if err = t.SetCheckRunId(ctx, checkRunInt); err != nil { err = errors.Wrap(err, "setting check run ID on task") grip.Error(message.WrapError(err, message.Fields{ diff --git a/rest/route/agent_test.go b/rest/route/agent_test.go index c3ce1255084..e114df72bd6 100644 --- a/rest/route/agent_test.go +++ b/rest/route/agent_test.go @@ -216,7 +216,7 @@ func TestMarkTaskForReset(t *testing.T) { assert.Equal(t, 1, foundTask.NumAutomaticRestarts) // Should fail if the task resets and tries to auto restart again - require.NoError(t, foundTask.MarkEnd(time.Now(), &apimodels.TaskEndDetail{ + require.NoError(t, foundTask.MarkEnd(ctx, time.Now(), &apimodels.TaskEndDetail{ Status: evergreen.TaskFailed, })) require.NoError(t, foundTask.Archive(ctx)) diff --git a/rest/route/host_agent.go b/rest/route/host_agent.go index cbe1d6447a4..ca38aae37b5 100644 --- a/rest/route/host_agent.go +++ b/rest/route/host_agent.go @@ -547,7 +547,7 @@ func assignNextAvailableTask(ctx context.Context, env evergreen.Environment, tas continue } - grip.Error(message.WrapError(nextTask.IncNumNextTaskDispatches(), message.Fields{ + grip.Error(message.WrapError(nextTask.IncNumNextTaskDispatches(ctx), message.Fields{ "message": "problem updating the number of times the task has been dispatched", "task_id": nextTask.Id, "task_execution": nextTask.Execution, @@ -1039,7 +1039,7 @@ func sendBackRunningTask(ctx context.Context, env evergreen.Environment, h *host } if t.Activated { - grip.Error(message.WrapError(t.IncNumNextTaskDispatches(), message.Fields{ + grip.Error(message.WrapError(t.IncNumNextTaskDispatches(ctx), message.Fields{ "message": "problem updating the number of times the task has been dispatched", "task_id": t.Id, "task_execution": t.Execution, diff --git a/rest/route/host_agent_test.go b/rest/route/host_agent_test.go index 44c5ce24ae3..7db31101d1c 100644 --- a/rest/route/host_agent_test.go +++ b/rest/route/host_agent_test.go @@ -1330,7 +1330,7 @@ func TestAssignNextAvailableTask(t *testing.T) { assert.Equal(t, "", h.RunningTask) }, "a dispatched task should not be updated in the host": func(ctx context.Context, t *testing.T, env *mock.Environment, d data) { - require.NoError(t, task.UpdateOne(bson.M{"_id": d.Task3.Id}, + require.NoError(t, task.UpdateOne(ctx, bson.M{"_id": d.Task3.Id}, bson.M{"$set": bson.M{"status": evergreen.TaskStarted}})) nextTaskId := d.Tq3.Queue[1].Id details := &apimodels.GetNextTaskDetails{} @@ -1501,9 +1501,9 @@ func TestAssignNextAvailableTask(t *testing.T) { TaskGroupMaxHosts: 2, } require.NoError(t, tg1Task3.Insert()) - require.NoError(t, task.UpdateOne(bson.M{"_id": d.Tg1Task1.Id}, + require.NoError(t, task.UpdateOne(ctx, bson.M{"_id": d.Tg1Task1.Id}, bson.M{"$set": bson.M{"task_group_max_hosts": 2}})) - require.NoError(t, task.UpdateOne(bson.M{"_id": d.Tg1Task2.Id}, + require.NoError(t, task.UpdateOne(ctx, bson.M{"_id": d.Tg1Task2.Id}, bson.M{"$set": bson.M{"task_group_max_hosts": 2}})) details := &apimodels.GetNextTaskDetails{} // The first host should get the top of the task group. diff --git a/rest/route/middleware_test.go b/rest/route/middleware_test.go index 62617e5311a..75c280008f5 100644 --- a/rest/route/middleware_test.go +++ b/rest/route/middleware_test.go @@ -231,13 +231,13 @@ func TestTaskAuthMiddleware(t *testing.T) { m.ServeHTTP(rw, r, func(rw http.ResponseWriter, r *http.Request) {}) assert.NotEqual(http.StatusOK, rw.Code) - assert.NoError(task.UpdateOne(bson.M{task.IdKey: "completedTask"}, bson.M{"$set": bson.M{task.FinishTimeKey: time.Now().Add(-30 * time.Minute)}})) + assert.NoError(task.UpdateOne(ctx, bson.M{task.IdKey: "completedTask"}, bson.M{"$set": bson.M{task.FinishTimeKey: time.Now().Add(-30 * time.Minute)}})) r.Header.Set(evergreen.TaskHeader, "completedTask") rw = httptest.NewRecorder() m.ServeHTTP(rw, r, func(rw http.ResponseWriter, r *http.Request) {}) assert.Equal(http.StatusOK, rw.Code) - assert.NoError(task.UpdateOne(bson.M{task.IdKey: "completedTask"}, bson.M{"$set": bson.M{task.FinishTimeKey: time.Now().Add(-90 * time.Minute)}})) + assert.NoError(task.UpdateOne(ctx, bson.M{task.IdKey: "completedTask"}, bson.M{"$set": bson.M{task.FinishTimeKey: time.Now().Add(-90 * time.Minute)}})) r.Header.Set(evergreen.TaskHeader, "completedTask") rw = httptest.NewRecorder() m.ServeHTTP(rw, r, func(rw http.ResponseWriter, r *http.Request) {}) diff --git a/rest/route/patch_test.go b/rest/route/patch_test.go index fc7348833a4..12a8057f3c9 100644 --- a/rest/route/patch_test.go +++ b/rest/route/patch_test.go @@ -1398,7 +1398,7 @@ tasks: assert.NoError(t, err) assert.Len(t, tasks, 1) // manually set the task as running and its generated JSON for simplicity - err = task.UpdateOne(task.ById(tasks[0].Id), bson.M{ + err = task.UpdateOne(ctx, task.ById(tasks[0].Id), bson.M{ "$set": bson.M{ task.StatusKey: evergreen.TaskStarted, task.GeneratedJSONAsStringKey: generatedProject, diff --git a/scheduler/planner.go b/scheduler/planner.go index e6b0e25f21e..58ccca5c82f 100644 --- a/scheduler/planner.go +++ b/scheduler/planner.go @@ -70,9 +70,9 @@ func (cache UnitCache) Create(id string, t task.Task) *Unit { } // Export returns an unordered sequence of unique Units. -func (cache UnitCache) Export() TaskPlan { +func (cache UnitCache) Export(ctx context.Context) TaskPlan { seen := StringSet{} - tpl := TaskPlan{} + tpl := TaskPlan{ctx: ctx} for id := range cache { if seen.Visit(cache[id].ID()) { continue @@ -82,7 +82,7 @@ func (cache UnitCache) Export() TaskPlan { continue } - tpl = append(tpl, cache[id]) + tpl.units = append(tpl.units, cache[id]) } return tpl @@ -117,11 +117,11 @@ func NewUnit(t task.Task) *Unit { // Export returns an unordered sequence of tasks from unit. All tasks // are unique. -func (unit *Unit) Export() TaskList { - out := make(TaskList, 0, len(unit.tasks)) +func (unit *Unit) Export(ctx context.Context) TaskList { + out := TaskList{ctx: ctx} for _, t := range unit.tasks { - out = append(out, t) + out.tasks = append(out.tasks, t) } return out @@ -291,7 +291,7 @@ func (u *unitInfo) computePriority(breakdown *task.SortingValueBreakdown) int64 return initialPriority } -func (unit *Unit) info() unitInfo { +func (unit *Unit) info(ctx context.Context) unitInfo { info := unitInfo{ Settings: unit.distro.PlannerSettings, } @@ -314,7 +314,7 @@ func (unit *Unit) info() unitInfo { } info.TotalPriority += t.Priority - info.ExpectedRuntime += t.FetchExpectedDuration().Average + info.ExpectedRuntime += t.FetchExpectedDuration(ctx).Average info.NumDependents += int64(t.NumDependents) info.TaskIDs = append(info.TaskIDs, t.Id) } @@ -328,12 +328,12 @@ func (unit *Unit) info() unitInfo { // Generally, higher point values are given to larger units and for // units that have been in the queue for longer, with longer expected // runtimes. The tasks' priority acts as a multiplying factor. -func (unit *Unit) sortingValueBreakdown() task.SortingValueBreakdown { +func (unit *Unit) sortingValueBreakdown(ctx context.Context) task.SortingValueBreakdown { if unit.cachedValue.TotalValue > 0 { return unit.cachedValue } - info := unit.info() + info := unit.info(ctx) unit.cachedValue = info.value() return unit.cachedValue } @@ -363,13 +363,16 @@ func (s StringSet) Visit(id string) bool { // prioritizes tasks by the number of dependencies, priority, and // expected duration. This sorting is used for ordering tasks within a // unit. -type TaskList []task.Task +type TaskList struct { + ctx context.Context + tasks []task.Task +} -func (tl TaskList) Len() int { return len(tl) } -func (tl TaskList) Swap(i, j int) { tl[i], tl[j] = tl[j], tl[i] } +func (tl TaskList) Len() int { return len(tl.tasks) } +func (tl TaskList) Swap(i, j int) { tl.tasks[i], tl.tasks[j] = tl.tasks[j], tl.tasks[i] } func (tl TaskList) Less(i, j int) bool { - t1 := tl[i] - t2 := tl[j] + t1 := tl.tasks[i] + t2 := tl.tasks[j] // TODO note about impact of this with versions. if t1.TaskGroupOrder != t2.TaskGroupOrder { @@ -384,23 +387,26 @@ func (tl TaskList) Less(i, j int) bool { return t1.Priority > t2.Priority } - return t1.FetchExpectedDuration().Average > t2.FetchExpectedDuration().Average + return t1.FetchExpectedDuration(tl.ctx).Average > t2.FetchExpectedDuration(tl.ctx).Average } // TaskPlan provides a sortable interface on top of a slice of // schedulable units, with ordering of units provided by the // implementation of SortingValueBreakdown. -type TaskPlan []*Unit +type TaskPlan struct { + ctx context.Context + units []*Unit +} -func (tpl TaskPlan) Len() int { return len(tpl) } +func (tpl TaskPlan) Len() int { return len(tpl.units) } func (tpl TaskPlan) Less(i, j int) bool { - return tpl[i].sortingValueBreakdown().TotalValue > tpl[j].sortingValueBreakdown().TotalValue + return tpl.units[i].sortingValueBreakdown(tpl.ctx).TotalValue > tpl.units[j].sortingValueBreakdown(tpl.ctx).TotalValue } -func (tpl TaskPlan) Swap(i, j int) { tpl[i], tpl[j] = tpl[j], tpl[i] } +func (tpl TaskPlan) Swap(i, j int) { tpl.units[i], tpl.units[j] = tpl.units[j], tpl.units[i] } func (tpl TaskPlan) Keys() []string { out := []string{} - for _, unit := range tpl { + for _, unit := range tpl.units { out = append(out, unit.Keys()...) } return out @@ -408,7 +414,7 @@ func (tpl TaskPlan) Keys() []string { // PrepareTasksForPlanning takes a list of tasks for a distro and // returns a TaskPlan, grouping tasks into the appropriate units. -func PrepareTasksForPlanning(distro *distro.Distro, tasks []task.Task) TaskPlan { +func PrepareTasksForPlanning(ctx context.Context, distro *distro.Distro, tasks []task.Task) TaskPlan { cache := UnitCache{} for _, t := range tasks { @@ -435,7 +441,7 @@ func PrepareTasksForPlanning(distro *distro.Distro, tasks []task.Task) TaskPlan } } - return cache.Export() + return cache.Export(ctx) } // Export sorts the TaskPlan returning a unique list of tasks. @@ -444,16 +450,16 @@ func (tpl TaskPlan) Export(ctx context.Context) []task.Task { output := []task.Task{} seen := StringSet{} - for _, unit := range tpl { - sortingValueBreakdown := unit.sortingValueBreakdown() - tasks := unit.Export() + for _, unit := range tpl.units { + sortingValueBreakdown := unit.sortingValueBreakdown(ctx) + tasks := unit.Export(ctx) sort.Sort(tasks) - for i := range tasks { - if seen.Visit(tasks[i].Id) { + for i := range tasks.tasks { + if seen.Visit(tasks.tasks[i].Id) { continue } - tasks[i].SetSortingValueBreakdownAttributes(ctx, sortingValueBreakdown) - output = append(output, tasks[i]) + tasks.tasks[i].SetSortingValueBreakdownAttributes(ctx, sortingValueBreakdown) + output = append(output, tasks.tasks[i]) } } diff --git a/scheduler/planner_test.go b/scheduler/planner_test.go index abfc74675f8..b8196a28a4c 100644 --- a/scheduler/planner_test.go +++ b/scheduler/planner_test.go @@ -16,7 +16,10 @@ import ( ) func TestPlanner(t *testing.T) { - _, err := evergreen.GetEnvironment().DB().Collection(task.Collection).Indexes().CreateOne(context.Background(), mongo.IndexModel{Keys: task.DurationIndex}) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + _, err := evergreen.GetEnvironment().DB().Collection(task.Collection).Indexes().CreateOne(ctx, mongo.IndexModel{Keys: task.DurationIndex}) assert.NoError(t, err) t.Run("Caches", func(t *testing.T) { @@ -116,7 +119,7 @@ func TestPlanner(t *testing.T) { cache := UnitCache{} one := task.Task{Id: "one"} cache.Create("one", one) - assert.Empty(t, cache.Export()) + assert.Empty(t, cache.Export(ctx)) }) t.Run("ExportPropogatesTasks", func(t *testing.T) { cache := UnitCache{} @@ -124,13 +127,13 @@ func TestPlanner(t *testing.T) { two := task.Task{Id: "two"} cache.Create("one", one).SetDistro(&distro.Distro{}) cache.Create("two", two).SetDistro(&distro.Distro{}) - plan := cache.Export() + plan := cache.Export(ctx) assert.Len(t, plan, 2) - for _, ts := range plan { + for _, ts := range plan.units { ts.SetDistro(&distro.Distro{}) require.Len(t, ts.tasks, 1) } - for _, ts := range plan.Export(context.Background()) { + for _, ts := range plan.Export(ctx) { require.True(t, ts.Id == "one" || ts.Id == "two") } }) @@ -139,7 +142,7 @@ func TestPlanner(t *testing.T) { one := task.Task{Id: "one"} cache.Create("one", one).SetDistro(&distro.Distro{}) cache.Create("two", one).SetDistro(&distro.Distro{}) - plan := cache.Export() + plan := cache.Export(ctx) assert.Len(t, plan, 1) }) }) @@ -169,9 +172,9 @@ func TestPlanner(t *testing.T) { t.Run("HashCaches", func(t *testing.T) { unit := NewUnit(task.Task{Id: "foo"}) hash := unit.ID() - assert.Len(t, unit.Export(), 1) + assert.Len(t, unit.Export(ctx), 1) unit.Add(task.Task{Id: "bar"}) - assert.Len(t, unit.Export(), 2) + assert.Len(t, unit.Export(ctx), 2) newHash := unit.ID() assert.Equal(t, hash, newHash) }) @@ -197,119 +200,119 @@ func TestPlanner(t *testing.T) { t.Run("SingleTask", func(t *testing.T) { unit := NewUnit(task.Task{Id: "foo"}) unit.SetDistro(&distro.Distro{}) - assert.EqualValues(t, 180, unit.sortingValueBreakdown().TotalValue) - verifyRankBreakdown(t, unit.sortingValueBreakdown()) + assert.EqualValues(t, 180, unit.sortingValueBreakdown(ctx).TotalValue) + verifyRankBreakdown(t, unit.sortingValueBreakdown(ctx)) }) t.Run("MultipleTasks", func(t *testing.T) { unit := NewUnit(task.Task{Id: "foo"}) unit.SetDistro(&distro.Distro{}) unit.Add(task.Task{Id: "bar"}) - assert.EqualValues(t, 181, unit.sortingValueBreakdown().TotalValue) - verifyRankBreakdown(t, unit.sortingValueBreakdown()) + assert.EqualValues(t, 181, unit.sortingValueBreakdown(ctx).TotalValue) + verifyRankBreakdown(t, unit.sortingValueBreakdown(ctx)) }) t.Run("MergeQueue", func(t *testing.T) { unit := NewUnit(task.Task{Id: "foo", Requester: evergreen.GithubMergeRequester}) unit.SetDistro(&distro.Distro{}) - assert.EqualValues(t, 2413, unit.sortingValueBreakdown().TotalValue) - verifyRankBreakdown(t, unit.sortingValueBreakdown()) + assert.EqualValues(t, 2413, unit.sortingValueBreakdown(ctx).TotalValue) + verifyRankBreakdown(t, unit.sortingValueBreakdown(ctx)) }) t.Run("Patches", func(t *testing.T) { t.Run("CLI", func(t *testing.T) { unit := NewUnit(task.Task{Id: "foo", Requester: evergreen.PatchVersionRequester}) unit.SetDistro(&distro.Distro{}) unit.distro.PlannerSettings.PatchFactor = 10 - assert.EqualValues(t, 22, unit.sortingValueBreakdown().TotalValue) - verifyRankBreakdown(t, unit.sortingValueBreakdown()) + assert.EqualValues(t, 22, unit.sortingValueBreakdown(ctx).TotalValue) + verifyRankBreakdown(t, unit.sortingValueBreakdown(ctx)) }) t.Run("Github", func(t *testing.T) { unit := NewUnit(task.Task{Id: "foo", Requester: evergreen.GithubPRRequester}) unit.SetDistro(&distro.Distro{}) unit.distro.PlannerSettings.PatchFactor = 10 - assert.EqualValues(t, 22, unit.sortingValueBreakdown().TotalValue) - verifyRankBreakdown(t, unit.sortingValueBreakdown()) + assert.EqualValues(t, 22, unit.sortingValueBreakdown(ctx).TotalValue) + verifyRankBreakdown(t, unit.sortingValueBreakdown(ctx)) }) }) t.Run("Priority", func(t *testing.T) { unit := NewUnit(task.Task{Id: "foo", Priority: 10}) unit.SetDistro(&distro.Distro{}) - assert.EqualValues(t, 1970, unit.sortingValueBreakdown().TotalValue) - verifyRankBreakdown(t, unit.sortingValueBreakdown()) + assert.EqualValues(t, 1970, unit.sortingValueBreakdown(ctx).TotalValue) + verifyRankBreakdown(t, unit.sortingValueBreakdown(ctx)) }) t.Run("TimeInQueuePatch", func(t *testing.T) { unit := NewUnit(task.Task{Id: "foo", Requester: evergreen.PatchVersionRequester, ActivatedTime: time.Now().Add(-time.Hour)}) unit.SetDistro(&distro.Distro{}) - assert.EqualValues(t, 73, unit.sortingValueBreakdown().TotalValue) - verifyRankBreakdown(t, unit.sortingValueBreakdown()) + assert.EqualValues(t, 73, unit.sortingValueBreakdown(ctx).TotalValue) + verifyRankBreakdown(t, unit.sortingValueBreakdown(ctx)) }) t.Run("TimeInQueueMainline", func(t *testing.T) { unit := NewUnit(task.Task{Id: "foo", Requester: evergreen.RepotrackerVersionRequester, ActivatedTime: time.Now().Add(-time.Hour)}) unit.SetDistro(&distro.Distro{}) - assert.EqualValues(t, 178, unit.sortingValueBreakdown().TotalValue) - verifyRankBreakdown(t, unit.sortingValueBreakdown()) + assert.EqualValues(t, 178, unit.sortingValueBreakdown(ctx).TotalValue) + verifyRankBreakdown(t, unit.sortingValueBreakdown(ctx)) }) t.Run("LifeTimePatch", func(t *testing.T) { unit := NewUnit(task.Task{Id: "foo", Requester: evergreen.PatchVersionRequester, IngestTime: time.Now().Add(-10 * time.Hour)}) unit.SetDistro(&distro.Distro{}) - assert.EqualValues(t, 613, unit.sortingValueBreakdown().TotalValue) - verifyRankBreakdown(t, unit.sortingValueBreakdown()) + assert.EqualValues(t, 613, unit.sortingValueBreakdown(ctx).TotalValue) + verifyRankBreakdown(t, unit.sortingValueBreakdown(ctx)) }) t.Run("LifeTimeMainlineNew", func(t *testing.T) { unit := NewUnit(task.Task{Id: "foo", Requester: evergreen.RepotrackerVersionRequester, IngestTime: time.Now().Add(-10 * time.Minute)}) unit.SetDistro(&distro.Distro{}) - assert.EqualValues(t, 179, unit.sortingValueBreakdown().TotalValue) - verifyRankBreakdown(t, unit.sortingValueBreakdown()) + assert.EqualValues(t, 179, unit.sortingValueBreakdown(ctx).TotalValue) + verifyRankBreakdown(t, unit.sortingValueBreakdown(ctx)) }) t.Run("LifeTimeMainlineOld", func(t *testing.T) { unit := NewUnit(task.Task{Id: "foo", Requester: evergreen.RepotrackerVersionRequester, IngestTime: time.Now().Add(-7 * 24 * time.Hour)}) unit.SetDistro(&distro.Distro{}) - assert.EqualValues(t, 12, unit.sortingValueBreakdown().TotalValue) - verifyRankBreakdown(t, unit.sortingValueBreakdown()) + assert.EqualValues(t, 12, unit.sortingValueBreakdown(ctx).TotalValue) + verifyRankBreakdown(t, unit.sortingValueBreakdown(ctx)) }) t.Run("NumDependents", func(t *testing.T) { unit := NewUnit(task.Task{Id: "foo", NumDependents: 2}) unit.SetDistro(&distro.Distro{}) - assert.EqualValues(t, 182, unit.sortingValueBreakdown().TotalValue) - verifyRankBreakdown(t, unit.sortingValueBreakdown()) + assert.EqualValues(t, 182, unit.sortingValueBreakdown(ctx).TotalValue) + verifyRankBreakdown(t, unit.sortingValueBreakdown(ctx)) }) t.Run("NumDependentsWithFactor", func(t *testing.T) { unit := NewUnit(task.Task{Id: "foo", NumDependents: 2}) unit.SetDistro(&distro.Distro{}) unit.distro.PlannerSettings.NumDependentsFactor = 10 - assert.EqualValues(t, 200, unit.sortingValueBreakdown().TotalValue) - verifyRankBreakdown(t, unit.sortingValueBreakdown()) + assert.EqualValues(t, 200, unit.sortingValueBreakdown(ctx).TotalValue) + verifyRankBreakdown(t, unit.sortingValueBreakdown(ctx)) }) t.Run("NumDependentsWithFractionFactor", func(t *testing.T) { unit := NewUnit(task.Task{Id: "foo", NumDependents: 2}) unit.SetDistro(&distro.Distro{}) unit.distro.PlannerSettings.NumDependentsFactor = 0.5 - assert.EqualValues(t, 181, unit.sortingValueBreakdown().TotalValue) - verifyRankBreakdown(t, unit.sortingValueBreakdown()) + assert.EqualValues(t, 181, unit.sortingValueBreakdown(ctx).TotalValue) + verifyRankBreakdown(t, unit.sortingValueBreakdown(ctx)) }) t.Run("GenerateTask", func(t *testing.T) { unit := NewUnit(task.Task{Id: "foo", GenerateTask: true}) unit.SetDistro(&distro.Distro{}) unit.distro.PlannerSettings.GenerateTaskFactor = 10 - assert.EqualValues(t, 1791, unit.sortingValueBreakdown().TotalValue) - verifyRankBreakdown(t, unit.sortingValueBreakdown()) + assert.EqualValues(t, 1791, unit.sortingValueBreakdown(ctx).TotalValue) + verifyRankBreakdown(t, unit.sortingValueBreakdown(ctx)) }) t.Run("TaskGroup", func(t *testing.T) { unit := NewUnit(task.Task{Id: "foo", TaskGroup: "tg1"}) unit.Add(task.Task{Id: "bar", TaskGroup: "tg1"}) unit.Add(task.Task{Id: "baz", TaskGroup: "tg1"}) unit.SetDistro(&distro.Distro{}) - assert.EqualValues(t, 719, unit.sortingValueBreakdown().TotalValue) - verifyRankBreakdown(t, unit.sortingValueBreakdown()) + assert.EqualValues(t, 719, unit.sortingValueBreakdown(ctx).TotalValue) + verifyRankBreakdown(t, unit.sortingValueBreakdown(ctx)) }) }) t.Run("RankCachesValue", func(t *testing.T) { unit := NewUnit(task.Task{Id: "foo", Priority: 100}) unit.SetDistro(&distro.Distro{}) - assert.EqualValues(t, 18080, unit.sortingValueBreakdown().TotalValue) - verifyRankBreakdown(t, unit.sortingValueBreakdown()) + assert.EqualValues(t, 18080, unit.sortingValueBreakdown(ctx).TotalValue) + verifyRankBreakdown(t, unit.sortingValueBreakdown(ctx)) unit.Add(task.Task{Id: "bar"}) - assert.EqualValues(t, 18080, unit.sortingValueBreakdown().TotalValue) - verifyRankBreakdown(t, unit.sortingValueBreakdown()) + assert.EqualValues(t, 18080, unit.sortingValueBreakdown(ctx).TotalValue) + verifyRankBreakdown(t, unit.sortingValueBreakdown(ctx)) }) }) t.Run("TaskPlan", func(t *testing.T) { @@ -318,91 +321,91 @@ func TestPlanner(t *testing.T) { for _, u := range units { u.SetDistro(d) } - return TaskPlan(units) + return TaskPlan{ctx: ctx, units: units} } t.Run("NoChange", func(t *testing.T) { plan := buildPlan(NewUnit(task.Task{Id: "foo"}), NewUnit(task.Task{Id: "bar"})) sort.Stable(plan) - out := plan.Export(context.Background()) + out := plan.Export(ctx) assert.Equal(t, "foo", out[0].Id) assert.Equal(t, "bar", out[1].Id) }) t.Run("ChangeOrder", func(t *testing.T) { plan := buildPlan(NewUnit(task.Task{Id: "foo"}), NewUnit(task.Task{Id: "bar", Priority: 10})) sort.Stable(plan) - out := plan.Export(context.Background()) + out := plan.Export(ctx) assert.Equal(t, "bar", out[0].Id) assert.Equal(t, "foo", out[1].Id) }) t.Run("Deduplicates", func(t *testing.T) { plan := buildPlan(NewUnit(task.Task{Id: "foo"}), NewUnit(task.Task{Id: "foo"})) - assert.Len(t, plan.Export(context.Background()), 1) + assert.Len(t, plan.Export(ctx), 1) }) }) t.Run("TaskList", func(t *testing.T) { t.Run("NoChange", func(t *testing.T) { - plan := TaskList{{Id: "second"}, {Id: "first"}} - assert.Equal(t, "second", plan[0].Id) - assert.Equal(t, "first", plan[1].Id) + plan := TaskList{ctx: ctx, tasks: []task.Task{{Id: "second"}, {Id: "first"}}} + assert.Equal(t, "second", plan.tasks[0].Id) + assert.Equal(t, "first", plan.tasks[1].Id) sort.Sort(plan) - assert.Equal(t, "second", plan[0].Id) - assert.Equal(t, "first", plan[1].Id) + assert.Equal(t, "second", plan.tasks[0].Id) + assert.Equal(t, "first", plan.tasks[1].Id) }) t.Run("TaskGroupOrder", func(t *testing.T) { - plan := TaskList{{Id: "second", TaskGroupOrder: 2}, {Id: "first", TaskGroupOrder: 1}} + plan := TaskList{ctx: ctx, tasks: []task.Task{{Id: "second", TaskGroupOrder: 2}, {Id: "first", TaskGroupOrder: 1}}} sort.Sort(plan) - assert.Equal(t, 1, plan[0].TaskGroupOrder) - assert.Equal(t, 2, plan[1].TaskGroupOrder) + assert.Equal(t, 1, plan.tasks[0].TaskGroupOrder) + assert.Equal(t, 2, plan.tasks[1].TaskGroupOrder) - assert.Equal(t, "first", plan[0].Id) - assert.Equal(t, "second", plan[1].Id) + assert.Equal(t, "first", plan.tasks[0].Id) + assert.Equal(t, "second", plan.tasks[1].Id) }) t.Run("NumDependents", func(t *testing.T) { - plan := TaskList{{Id: "second"}, {Id: "first", NumDependents: 2}} + plan := TaskList{ctx: ctx, tasks: []task.Task{{Id: "second"}, {Id: "first", NumDependents: 2}}} sort.Sort(plan) - assert.Equal(t, "first", plan[0].Id) - assert.Equal(t, "second", plan[1].Id) + assert.Equal(t, "first", plan.tasks[0].Id) + assert.Equal(t, "second", plan.tasks[1].Id) }) t.Run("Priority", func(t *testing.T) { - plan := TaskList{{Id: "second"}, {Id: "first", Priority: 100}} + plan := TaskList{ctx: ctx, tasks: []task.Task{{Id: "second"}, {Id: "first", Priority: 100}}} sort.Sort(plan) - assert.Equal(t, "first", plan[0].Id) - assert.Equal(t, "second", plan[1].Id) + assert.Equal(t, "first", plan.tasks[0].Id) + assert.Equal(t, "second", plan.tasks[1].Id) }) t.Run("ExpectedDuration", func(t *testing.T) { - plan := TaskList{{Id: "second"}, {Id: "first"}} - plan[1].DurationPrediction.Value = time.Hour - plan[1].DurationPrediction.TTL = time.Hour * 24 - plan[1].DurationPrediction.CollectedAt = time.Now() + plan := TaskList{ctx: ctx, tasks: []task.Task{{Id: "second"}, {Id: "first"}}} + plan.tasks[1].DurationPrediction.Value = time.Hour + plan.tasks[1].DurationPrediction.TTL = time.Hour * 24 + plan.tasks[1].DurationPrediction.CollectedAt = time.Now() - plan[0].DurationPrediction.Value = time.Minute - plan[0].DurationPrediction.TTL = time.Hour * 24 - plan[0].DurationPrediction.CollectedAt = time.Now() + plan.tasks[0].DurationPrediction.Value = time.Minute + plan.tasks[0].DurationPrediction.TTL = time.Hour * 24 + plan.tasks[0].DurationPrediction.CollectedAt = time.Now() sort.Sort(plan) - assert.Equal(t, "first", plan[0].Id) - assert.Equal(t, "second", plan[1].Id) + assert.Equal(t, "first", plan.tasks[0].Id) + assert.Equal(t, "second", plan.tasks[1].Id) }) }) }) t.Run("PrepareTaskPlan", func(t *testing.T) { t.Run("Noop", func(t *testing.T) { - assert.Empty(t, PrepareTasksForPlanning(&distro.Distro{}, []task.Task{})) + assert.Empty(t, PrepareTasksForPlanning(ctx, &distro.Distro{}, []task.Task{})) }) t.Run("TaskGroupsGrouped", func(t *testing.T) { - plan := PrepareTasksForPlanning(&distro.Distro{}, []task.Task{ + plan := PrepareTasksForPlanning(ctx, &distro.Distro{}, []task.Task{ {Id: "one", TaskGroup: "first"}, {Id: "two", TaskGroup: "first"}, {Id: "three"}, }) assert.Len(t, plan, 2) - assert.Len(t, plan.Export(context.Background()), 3) + assert.Len(t, plan.Export(ctx), 3) }) t.Run("VersionsGrouped", func(t *testing.T) { - plan := PrepareTasksForPlanning(&distro.Distro{ + plan := PrepareTasksForPlanning(ctx, &distro.Distro{ PlannerSettings: distro.PlannerSettings{ GroupVersions: func() *bool { b := true; return &b }(), }, @@ -413,10 +416,10 @@ func TestPlanner(t *testing.T) { }) assert.Len(t, plan, 2) - assert.Len(t, plan.Export(context.Background()), 3) + assert.Len(t, plan.Export(ctx), 3) }) t.Run("VersionsAndTaskGroupsGrouped", func(t *testing.T) { - plan := PrepareTasksForPlanning(&distro.Distro{ + plan := PrepareTasksForPlanning(ctx, &distro.Distro{ PlannerSettings: distro.PlannerSettings{ GroupVersions: func() *bool { b := true; return &b }(), }, @@ -430,13 +433,13 @@ func TestPlanner(t *testing.T) { }) assert.Len(t, plan, 3) - tasks := plan.Export(context.Background()) + tasks := plan.Export(ctx) assert.Len(t, tasks, 6) assert.Equal(t, "one", tasks[0].TaskGroup) assert.Equal(t, "one", tasks[1].TaskGroup) }) t.Run("DependenciesGrouped", func(t *testing.T) { - plan := PrepareTasksForPlanning(&distro.Distro{}, []task.Task{ + plan := PrepareTasksForPlanning(ctx, &distro.Distro{}, []task.Task{ {Id: "one", DependsOn: []task.Dependency{{TaskId: "two"}}}, {Id: "three"}, {Id: "two"}, @@ -444,7 +447,7 @@ func TestPlanner(t *testing.T) { }) require.Len(t, plan, 4, "keys:%s", plan.Keys()) - tasks := plan.Export(context.Background()) + tasks := plan.Export(ctx) require.Len(t, tasks, 4) assert.Equal(t, "three", tasks[3].Id) @@ -455,14 +458,14 @@ func TestPlanner(t *testing.T) { }) t.Run("ExternalDependenciesIgnored", func(t *testing.T) { - plan := PrepareTasksForPlanning(&distro.Distro{}, []task.Task{ + plan := PrepareTasksForPlanning(ctx, &distro.Distro{}, []task.Task{ {Id: "one", DependsOn: []task.Dependency{{TaskId: "missing"}}}, {Id: "three"}, {Id: "two", DependsOn: []task.Dependency{{TaskId: "missing"}}}, }) assert.Len(t, plan, 3) - assert.Len(t, plan.Export(context.Background()), 3) + assert.Len(t, plan.Export(ctx), 3) }) }) } diff --git a/scheduler/scheduler.go b/scheduler/scheduler.go index a3d10b4be54..bfbf10b2b2b 100644 --- a/scheduler/scheduler.go +++ b/scheduler/scheduler.go @@ -38,12 +38,12 @@ func PrioritizeTasks(ctx context.Context, d *distro.Distro, tasks []task.Task, o func runTunablePlanner(ctx context.Context, d *distro.Distro, tasks []task.Task, opts TaskPlannerOptions) ([]task.Task, error) { var err error - tasks, err = PopulateCaches(opts.ID, tasks) + tasks, err = PopulateCaches(ctx, opts.ID, tasks) if err != nil { return nil, errors.WithStack(err) } - plan := PrepareTasksForPlanning(d, tasks).Export(ctx) + plan := PrepareTasksForPlanning(ctx, d, tasks).Export(ctx) info := GetDistroQueueInfo(ctx, d.Id, plan, d.GetTargetTime(), opts) info.SecondaryQueue = opts.IsSecondaryQueue info.PlanCreatedAt = opts.StartedAt @@ -101,7 +101,7 @@ type distroScheduler struct { } func (s *distroScheduler) scheduleDistro(ctx context.Context, distroID string, runnableTasks []task.Task, versions map[string]model.Version, maxThreshold time.Duration, isSecondaryQueue bool) ([]task.Task, error) { - prioritizedTasks, _, err := s.PrioritizeTasks(distroID, runnableTasks, versions) + prioritizedTasks, _, err := s.PrioritizeTasks(ctx, distroID, runnableTasks, versions) if err != nil { return nil, errors.Wrapf(err, "prioritizing tasks for distro '%s'", distroID) @@ -138,7 +138,7 @@ func GetDistroQueueInfo(ctx context.Context, distroID string, tasks []task.Task, name = task.GetTaskGroupString() } - duration := task.FetchExpectedDuration().Average + duration := task.FetchExpectedDuration(ctx).Average if task.DistroId != distroID { isSecondaryQueue = true diff --git a/scheduler/setup_funcs.go b/scheduler/setup_funcs.go index 2a158f5b98f..aa8c4d5f454 100644 --- a/scheduler/setup_funcs.go +++ b/scheduler/setup_funcs.go @@ -1,6 +1,7 @@ package scheduler import ( + "context" "fmt" "runtime" "sort" @@ -12,11 +13,11 @@ import ( // Function run before sorting all the tasks. Used to fetch and store // information needed for prioritizing the tasks. -type sortSetupFunc func(comparator *CmpBasedTaskComparator) error +type sortSetupFunc func(ctx context.Context, comparator *CmpBasedTaskComparator) error // PopulateCaches runs setup functions and is used by the new/tunable // scheduler to reprocess tasks before running the new planner. -func PopulateCaches(id string, tasks []task.Task) ([]task.Task, error) { +func PopulateCaches(ctx context.Context, id string, tasks []task.Task) ([]task.Task, error) { cmp := &CmpBasedTaskComparator{ tasks: tasks, runtimeID: id, @@ -24,14 +25,14 @@ func PopulateCaches(id string, tasks []task.Task) ([]task.Task, error) { cacheExpectedDurations, }, } - if err := cmp.setupForSortingTasks(); err != nil { + if err := cmp.setupForSortingTasks(ctx); err != nil { return nil, errors.WithStack(err) } return cmp.tasks, nil } -func cacheExpectedDurations(comparator *CmpBasedTaskComparator) error { +func cacheExpectedDurations(ctx context.Context, comparator *CmpBasedTaskComparator) error { work := make(chan task.Task, len(comparator.tasks)) output := make(chan task.Task, len(comparator.tasks)) @@ -46,7 +47,7 @@ func cacheExpectedDurations(comparator *CmpBasedTaskComparator) error { go func() { defer wg.Done() for t := range work { - _ = t.FetchExpectedDuration() + _ = t.FetchExpectedDuration(ctx) output <- t } }() @@ -68,7 +69,7 @@ func cacheExpectedDurations(comparator *CmpBasedTaskComparator) error { // groupTaskGroups puts tasks that have the same build and task group next to // each other in the queue. This ensures that, in a stable sort, // byTaskGroupOrder sorts task group members relative to each other. -func groupTaskGroups(comparator *CmpBasedTaskComparator) error { +func groupTaskGroups(ctx context.Context, comparator *CmpBasedTaskComparator) error { taskMap := make(map[string]task.Task) taskKeys := []string{} for _, t := range comparator.tasks { diff --git a/scheduler/task_prioritizer.go b/scheduler/task_prioritizer.go index 00411e3343f..1c32c164b9f 100644 --- a/scheduler/task_prioritizer.go +++ b/scheduler/task_prioritizer.go @@ -1,6 +1,7 @@ package scheduler import ( + "context" "fmt" "sort" "time" @@ -20,12 +21,13 @@ type TaskPrioritizer interface { // Takes in a slice of tasks and the current MCI settings. // Returns the slice of tasks, sorted in the order in which they should // be run, as well as an error if appropriate. - PrioritizeTasks(distroId string, tasks []task.Task, versions map[string]model.Version) ([]task.Task, map[string]map[string]string, error) + PrioritizeTasks(ctx context.Context, distroId string, tasks []task.Task, versions map[string]model.Version) ([]task.Task, map[string]map[string]string, error) } // CmpBasedTaskComparator runs the tasks through a slice of comparator functions // determining which is more important. type CmpBasedTaskComparator struct { + ctx context.Context runtimeID string tasks []task.Task versions map[string]model.Version @@ -46,8 +48,9 @@ type CmpBasedTaskQueues struct { // NewCmpBasedTaskComparator returns a new task prioritizer, using the default set of comparators // as well as the setup functions necessary for those comparators. -func NewCmpBasedTaskComparator(id string) *CmpBasedTaskComparator { +func NewCmpBasedTaskComparator(ctx context.Context, id string) *CmpBasedTaskComparator { return &CmpBasedTaskComparator{ + ctx: ctx, runtimeID: id, setupFuncs: []sortSetupFunc{ cacheExpectedDurations, @@ -74,8 +77,8 @@ type CmpBasedTaskPrioritizer struct { // whether they are part of patch versions or automatically created versions. // Then prioritizes each slice, and merges them. // Returns a full slice of the prioritized tasks, and an error if one occurs. -func (prioritizer *CmpBasedTaskPrioritizer) PrioritizeTasks(distroId string, tasks []task.Task, versions map[string]model.Version) ([]task.Task, map[string]map[string]string, error) { - comparator := NewCmpBasedTaskComparator(prioritizer.runtimeID) +func (prioritizer *CmpBasedTaskPrioritizer) PrioritizeTasks(ctx context.Context, distroId string, tasks []task.Task, versions map[string]model.Version) ([]task.Task, map[string]map[string]string, error) { + comparator := NewCmpBasedTaskComparator(ctx, prioritizer.runtimeID) comparator.versions = versions // split the tasks into repotracker tasks and patch tasks, then prioritize // individually and merge @@ -93,7 +96,7 @@ func (prioritizer *CmpBasedTaskPrioritizer) PrioritizeTasks(distroId string, tas comparator.tasks = taskList startAt = time.Now() - err := comparator.setupForSortingTasks() + err := comparator.setupForSortingTasks(ctx) if err != nil { return nil, nil, errors.Wrap(err, "Error running setup for sorting tasks") } @@ -140,9 +143,9 @@ func (prioritizer *CmpBasedTaskPrioritizer) PrioritizeTasks(distroId string, tas // Run all of the setup functions necessary for prioritizing the tasks. // Returns an error if any of the setup funcs return an error. -func (cbtc *CmpBasedTaskComparator) setupForSortingTasks() error { +func (cbtc *CmpBasedTaskComparator) setupForSortingTasks(ctx context.Context) error { for _, setupFunc := range cbtc.setupFuncs { - if err := setupFunc(cbtc); err != nil { + if err := setupFunc(ctx, cbtc); err != nil { return errors.Wrap(err, "Error running setup for sorting") } } diff --git a/scheduler/task_prioritizer_test.go b/scheduler/task_prioritizer_test.go index a921aa81214..f2912d2295a 100644 --- a/scheduler/task_prioritizer_test.go +++ b/scheduler/task_prioritizer_test.go @@ -1,6 +1,7 @@ package scheduler import ( + "context" "testing" "github.com/evergreen-ci/evergreen" @@ -43,13 +44,16 @@ func (c *idComparator) compare(t1, t2 task.Task, p *CmpBasedTaskComparator) (int return 0, "", nil } func TestCmpBasedTaskComparator(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + var taskComparator *CmpBasedTaskComparator var taskIds []string var tasks []task.Task Convey("With a CmpBasedTaskComparator", t, func() { - taskComparator = NewCmpBasedTaskComparator("test-id") + taskComparator = NewCmpBasedTaskComparator(ctx, "test-id") taskIds = []string{"t1", "t2"} @@ -147,7 +151,7 @@ func TestCmpBasedTaskComparator(t *testing.T) { }) Convey("Splitting tasks by requester should separate tasks based on the Requester field", t, func() { - taskComparator = NewCmpBasedTaskComparator("test-id") + taskComparator = NewCmpBasedTaskComparator(ctx, "test-id") taskIds = []string{"t1", "t2", "t3", "t4", "t5"} tasks = []task.Task{ {Id: taskIds[0], Requester: evergreen.RepotrackerVersionRequester}, @@ -170,7 +174,7 @@ func TestCmpBasedTaskComparator(t *testing.T) { }) Convey("Splitting tasks with priority greater than 100 should always put those tasks in the high priority queue", t, func() { - taskComparator = NewCmpBasedTaskComparator("test-id") + taskComparator = NewCmpBasedTaskComparator(ctx, "test-id") taskIds = []string{"t1", "t2", "t3", "t4", "t5"} tasks = []task.Task{ {Id: taskIds[0], Requester: evergreen.RepotrackerVersionRequester, Priority: 101}, diff --git a/scheduler/task_priority_cmp.go b/scheduler/task_priority_cmp.go index 25a0535b741..22479dd5cb8 100644 --- a/scheduler/task_priority_cmp.go +++ b/scheduler/task_priority_cmp.go @@ -99,9 +99,9 @@ func (c *byAge) compare(t1, t2 task.Task, _ *CmpBasedTaskComparator) (int, strin type byRuntime struct{} func (c *byRuntime) name() string { return "expected runtime" } -func (c *byRuntime) compare(t1, t2 task.Task, _ *CmpBasedTaskComparator) (int, string, error) { - oneExpected := t1.FetchExpectedDuration().Average - twoExpected := t2.FetchExpectedDuration().Average +func (c *byRuntime) compare(t1, t2 task.Task, cmp *CmpBasedTaskComparator) (int, string, error) { + oneExpected := t1.FetchExpectedDuration(cmp.ctx).Average + twoExpected := t2.FetchExpectedDuration(cmp.ctx).Average reason := fmt.Sprintf("%s is %s; %s is %s", t1.Id, oneExpected.String(), t2.Id, twoExpected.String()) if oneExpected == 0 || twoExpected == 0 { diff --git a/scheduler/task_priority_cmp_test.go b/scheduler/task_priority_cmp_test.go index 58f9eb3d477..def357305ba 100644 --- a/scheduler/task_priority_cmp_test.go +++ b/scheduler/task_priority_cmp_test.go @@ -1,6 +1,7 @@ package scheduler import ( + "context" "testing" "time" @@ -24,13 +25,16 @@ var ( ) func TestTaskImportanceComparators(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + var taskComparator *CmpBasedTaskComparator var taskIds []string var tasks []task.Task Convey("When using the task importance comparators", t, func() { - taskComparator = &CmpBasedTaskComparator{} + taskComparator = NewCmpBasedTaskComparator(ctx, "runtime-id") taskIds = []string{"t1", "t2"} @@ -270,6 +274,9 @@ func TestByTaskGroupOrder(t *testing.T) { } func TestPrioritizeTasksWithSameTaskGroupsAndDifferentBuilds(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + assert := assert.New(t) require := require.New(t) require.NoError(db.ClearCollections(model.VersionCollection, task.Collection)) @@ -328,7 +335,7 @@ func TestPrioritizeTasksWithSameTaskGroupsAndDifferentBuilds(t *testing.T) { require.NoError(tasks.Insert()) prioritizer := &CmpBasedTaskPrioritizer{} - sorted, _, err := prioritizer.PrioritizeTasks("distro", tasks.Export(), versions) + sorted, _, err := prioritizer.PrioritizeTasks(ctx, "distro", tasks.Export(), versions) require.NoError(err) assert.Equal("task_4", sorted[0].Id) assert.Equal("task_1", sorted[1].Id) @@ -337,6 +344,9 @@ func TestPrioritizeTasksWithSameTaskGroupsAndDifferentBuilds(t *testing.T) { } func TestTaskGroupsNotOutOfOrderFromOtherComparators(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + assert := assert.New(t) require := require.New(t) require.NoError(db.ClearCollections(model.VersionCollection)) @@ -387,7 +397,7 @@ func TestTaskGroupsNotOutOfOrderFromOtherComparators(t *testing.T) { } prioritizer := &CmpBasedTaskPrioritizer{} - sorted, _, err := prioritizer.PrioritizeTasks("distro", tasks, versions) + sorted, _, err := prioritizer.PrioritizeTasks(ctx, "distro", tasks, versions) assert.NoError(err) list := []string{} for _, t := range sorted { diff --git a/scheduler/utilization_based_host_allocator.go b/scheduler/utilization_based_host_allocator.go index f084e7f29b6..5338b0caa2a 100644 --- a/scheduler/utilization_based_host_allocator.go +++ b/scheduler/utilization_based_host_allocator.go @@ -350,7 +350,7 @@ func getSoonToBeFreeHosts(ctx context.Context, existingHosts []host.Host, future defer recovery.LogStackTraceAndContinue("panic during future free host calculation") defer wg.Done() for t := range source { - durationStats := t.FetchExpectedDuration() + durationStats := t.FetchExpectedDuration(ctx) expectedDuration := durationStats.Average durationStdDev := durationStats.StdDev elapsedTime := time.Since(t.StartTime) diff --git a/service/task.go b/service/task.go index 6be6617df29..b5c23e87776 100644 --- a/service/task.go +++ b/service/task.go @@ -837,7 +837,7 @@ func (uis *UIServer) taskModify(w http.ResponseWriter, r *http.Request) { http.Error(w, "not authorized to override dependencies", http.StatusUnauthorized) return } - err = projCtx.Task.SetOverrideDependencies(authUser.Username()) + err = projCtx.Task.SetOverrideDependencies(ctx, authUser.Username()) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return diff --git a/trigger/task_test.go b/trigger/task_test.go index 7584340f5c0..2ba061e8914 100644 --- a/trigger/task_test.go +++ b/trigger/task_test.go @@ -824,7 +824,7 @@ func (s *taskSuite) makeTest(ctx context.Context, testName, testStatus string) { Execution: s.task.Execution, Status: testStatus, })) - s.Require().NoError(s.task.SetResultsInfo(testresult.TestResultsServiceLocal, testStatus == evergreen.TestFailedStatus)) + s.Require().NoError(s.task.SetResultsInfo(ctx, testresult.TestResultsServiceLocal, testStatus == evergreen.TestFailedStatus)) } func (s *taskSuite) tryDoubleTrigger(shouldGenerate bool) { @@ -1432,7 +1432,7 @@ func TestTaskRegressionByTestDisplayTask(t *testing.T) { TestName: "f1", Status: evergreen.TestFailedStatus, })) - require.NoError(t, tasks[4].SetResultsInfo(testresult.TestResultsServiceLocal, true)) + require.NoError(t, tasks[4].SetResultsInfo(ctx, testresult.TestResultsServiceLocal, true)) notification, err = tr.taskRegressionByTest(ctx, &event.Subscription{ID: "s1", Subscriber: subscriber, Trigger: "t1"}) assert.NoError(t, err) require.NotNil(t, notification) diff --git a/units/generate_tasks.go b/units/generate_tasks.go index b2ffa56c125..80ae3320b6e 100644 --- a/units/generate_tasks.go +++ b/units/generate_tasks.go @@ -271,11 +271,11 @@ func (j *generateTasksJob) Run(ctx context.Context) { if err != nil && !shouldNoop { j.AddError(err) - j.AddError(task.MarkGeneratedTasksErr(j.TaskID, err)) + j.AddError(task.MarkGeneratedTasksErr(ctx, j.TaskID, err)) return } if !shouldNoop { - j.AddError(task.MarkGeneratedTasks(j.TaskID)) + j.AddError(task.MarkGeneratedTasks(ctx, j.TaskID)) if t.IsPatchRequest() { activatedTasks, err := task.CountActivatedTasksForVersion(t.Version) if err != nil { diff --git a/units/host_monitoring_check.go b/units/host_monitoring_check.go index 85d71466d98..d54b90f8966 100644 --- a/units/host_monitoring_check.go +++ b/units/host_monitoring_check.go @@ -247,7 +247,7 @@ func handleTerminatedHostSpawnedByTask(ctx context.Context, h *host.Host) error catcher := grip.NewBasicCatcher() catcher.Wrap(err, "inserting new host for task") - catcher.Wrap(task.AddHostCreateDetails(h.SpawnOptions.TaskID, h.Id, h.SpawnOptions.TaskExecutionNumber, errors.New("host was externally terminated")), "adding host create details") + catcher.Wrap(task.AddHostCreateDetails(ctx, h.SpawnOptions.TaskID, h.Id, h.SpawnOptions.TaskExecutionNumber, errors.New("host was externally terminated")), "adding host create details") return catcher.Resolve() } diff --git a/units/provisioning_create_host.go b/units/provisioning_create_host.go index 9b89355b52c..cebad7e8267 100644 --- a/units/provisioning_create_host.go +++ b/units/provisioning_create_host.go @@ -222,7 +222,7 @@ func (j *createHostJob) Run(ctx context.Context) { })) if j.host.SpawnOptions.SpawnedByTask { - if err := task.AddHostCreateDetails(j.host.StartedBy, j.host.Id, j.host.SpawnOptions.TaskExecutionNumber, j.Error()); err != nil { + if err := task.AddHostCreateDetails(ctx, j.host.StartedBy, j.host.Id, j.host.SpawnOptions.TaskExecutionNumber, j.Error()); err != nil { j.AddError(errors.Wrapf(err, "adding host create error details")) } } diff --git a/units/task_monitor_execution_timeout.go b/units/task_monitor_execution_timeout.go index ea167f269c0..8e2f75d6806 100644 --- a/units/task_monitor_execution_timeout.go +++ b/units/task_monitor_execution_timeout.go @@ -182,7 +182,7 @@ func (j *taskExecutionTimeoutJob) cleanUpTimedOutTask(ctx context.Context) error "operation": "cleanup timed out task", "job": j.ID(), }) - return errors.WithStack(j.task.MarkUnscheduled()) + return errors.WithStack(j.task.MarkUnscheduled(ctx)) } if host.RunningTask == j.task.Id { From 6c13aa8aa2d0234189051e51d054a3bb1a780d91 Mon Sep 17 00:00:00 2001 From: Zackary Santana <64446617+ZackarySantana@users.noreply.github.com> Date: Fri, 17 Jan 2025 16:43:13 -0500 Subject: [PATCH 06/11] feat: add context to task.UpdateAll --- db/db_utils.go | 28 +++++++++++++++++ model/generate.go | 2 +- model/lifecycle.go | 20 ++++++------ model/pod/dispatcher/dispatcher.go | 2 +- model/task/db.go | 13 +++++--- model/task/db_test.go | 4 +-- model/task/task.go | 50 +++++++++++++++++------------- model/task/task_test.go | 43 +++++++++++++++---------- model/task_lifecycle.go | 16 +++++----- model/task_lifecycle_test.go | 16 +++++----- units/check_blocked_tasks.go | 2 +- units/crons.go | 2 +- units/task_stranded_cleanup.go | 2 +- 13 files changed, 124 insertions(+), 76 deletions(-) diff --git a/db/db_utils.go b/db/db_utils.go index 491d1337473..c3d811f1783 100644 --- a/db/db_utils.go +++ b/db/db_utils.go @@ -246,6 +246,34 @@ func UpdateContext(ctx context.Context, collection string, query interface{}, up return db.C(collection).Update(query, update) } +func UpdateAllContext(ctx context.Context, collection string, query interface{}, update interface{}) (*db.ChangeInfo, error) { + switch query.(type) { + case *Q, Q: + grip.EmergencyPanic(message.Fields{ + "message": "invalid query passed to update all", + "cause": "programmer error", + "query": query, + "collection": collection, + }) + case nil: + grip.EmergencyPanic(message.Fields{ + "message": "nil query passed to update all", + "query": query, + "collection": collection, + }) + } + + session, db, err := GetGlobalSessionFactory().GetContextSession(ctx) + if err != nil { + grip.Errorf("error establishing db connection: %+v", err) + + return nil, err + } + defer session.Close() + + return db.C(collection).UpdateAll(query, update) +} + // UpdateId updates one _id-matching document in the collection. func UpdateId(collection string, id, update interface{}) error { session, db, err := GetGlobalSessionFactory().GetSession() diff --git a/model/generate.go b/model/generate.go index abf3394a0d8..9715d5a2525 100644 --- a/model/generate.go +++ b/model/generate.go @@ -715,7 +715,7 @@ func (g *GeneratedProject) addDependencies(ctx context.Context, newTaskIds []str defer span.End() statuses := []string{evergreen.TaskSucceeded, task.AllStatuses} for _, status := range statuses { - if err := g.Task.UpdateDependsOn(status, newTaskIds); err != nil { + if err := g.Task.UpdateDependsOn(ctx, status, newTaskIds); err != nil { return errors.Wrapf(err, "updating tasks depending on '%s'", g.Task.Id) } } diff --git a/model/lifecycle.go b/model/lifecycle.go index f0ed5983fe3..c48105ea8de 100644 --- a/model/lifecycle.go +++ b/model/lifecycle.go @@ -70,7 +70,7 @@ func SetVersionActivation(ctx context.Context, versionId string, active bool, ca return errors.Wrap(err, "getting tasks to activate") } if len(tasksToModify) > 0 { - if err = task.ActivateTasks(tasksToModify, time.Now(), false, caller); err != nil { + if err = task.ActivateTasks(ctx, tasksToModify, time.Now(), false, caller); err != nil { return errors.Wrap(err, "updating tasks for activation") } } @@ -85,7 +85,7 @@ func SetVersionActivation(ctx context.Context, versionId string, active bool, ca return errors.Wrap(err, "getting tasks to deactivate") } if len(tasksToModify) > 0 { - if err = task.DeactivateTasks(tasksToModify, false, caller); err != nil { + if err = task.DeactivateTasks(ctx, tasksToModify, false, caller); err != nil { return errors.Wrap(err, "deactivating tasks") } } @@ -153,7 +153,7 @@ func setTaskActivationForBuilds(ctx context.Context, buildIds []string, active, } } } - if err = task.ActivateTasks(tasksToActivate, time.Now(), withDependencies, caller); err != nil { + if err = task.ActivateTasks(ctx, tasksToActivate, time.Now(), withDependencies, caller); err != nil { return errors.Wrap(err, "updating tasks for activation") } @@ -171,7 +171,7 @@ func setTaskActivationForBuilds(ctx context.Context, buildIds []string, active, if err != nil { return errors.Wrap(err, "getting tasks to deactivate") } - if err = task.DeactivateTasks(tasks, withDependencies, caller); err != nil { + if err = task.DeactivateTasks(ctx, tasks, withDependencies, caller); err != nil { return errors.Wrap(err, "deactivating tasks") } } @@ -242,7 +242,7 @@ func SetTaskPriority(ctx context.Context, t task.Task, priority int64, caller st for _, taskToUpdate := range tasks { taskIDs = append(taskIDs, taskToUpdate.Id) } - _, err = task.UpdateAll( + _, err = task.UpdateAll(ctx, bson.M{task.IdKey: bson.M{"$in": taskIDs}}, bson.M{"$set": bson.M{task.PriorityKey: priority}}, ) @@ -276,7 +276,7 @@ func SetVersionsPriority(ctx context.Context, versionIds []string, priority int6 } func setTasksPriority(ctx context.Context, query bson.M, priority int64, caller string) error { - _, err := task.UpdateAll(query, + _, err := task.UpdateAll(ctx, query, bson.M{"$set": bson.M{task.PriorityKey: priority}}, ) if err != nil { @@ -814,7 +814,7 @@ func createTasksForBuild(ctx context.Context, creationInfo TaskCreationInfo) (ta } // update existing exec tasks - grip.Error(message.WrapError(task.AddDisplayTaskIdToExecTasks(id, execTasksThatNeedParentId), message.Fields{ + grip.Error(message.WrapError(task.AddDisplayTaskIdToExecTasks(ctx, id, execTasksThatNeedParentId), message.Fields{ "message": "problem adding display task ID to exec tasks", "exec_tasks_to_update": execTasksThatNeedParentId, "display_task_id": id, @@ -1029,7 +1029,7 @@ func RecomputeNumDependents(ctx context.Context, t task.Task) error { taskPtrs = append(taskPtrs, &depTasks[i]) } query := task.ByVersion(t.Version) - _, err = task.UpdateAll(query, bson.M{"$set": bson.M{task.NumDependentsKey: 0}}) + _, err = task.UpdateAll(ctx, query, bson.M{"$set": bson.M{task.NumDependentsKey: 0}}) if err != nil { return errors.Wrap(err, "resetting num dependents") } @@ -1664,7 +1664,7 @@ func addNewBuilds(ctx context.Context, creationInfo TaskCreationInfo, existingBu return nil, errors.Wrap(err, "getting dependencies for activated tasks") } - if err = task.ActivateTasks(activatedTaskDependencies, time.Now(), true, evergreen.User); err != nil { + if err = task.ActivateTasks(ctx, activatedTaskDependencies, time.Now(), true, evergreen.User); err != nil { return nil, errors.Wrap(err, "activating dependencies for new tasks") } @@ -1792,7 +1792,7 @@ func addNewTasksToExistingBuilds(ctx context.Context, creationInfo TaskCreationI if err != nil { return nil, errors.Wrap(err, "getting dependencies for activated tasks") } - if err = task.ActivateTasks(activatedTaskDependencies, time.Now(), true, evergreen.User); err != nil { + if err = task.ActivateTasks(ctx, activatedTaskDependencies, time.Now(), true, evergreen.User); err != nil { return nil, errors.Wrap(err, "activating existing dependencies for new tasks") } diff --git a/model/pod/dispatcher/dispatcher.go b/model/pod/dispatcher/dispatcher.go index 1ba62f80ccb..795beed0ba2 100644 --- a/model/pod/dispatcher/dispatcher.go +++ b/model/pod/dispatcher/dispatcher.go @@ -321,7 +321,7 @@ func (pd *PodDispatcher) RemovePod(ctx context.Context, env evergreen.Environmen return errors.Wrap(err, "marking unallocatable container tasks as system-failed") } - if err := task.MarkTasksAsContainerDeallocated(pd.TaskIDs); err != nil { + if err := task.MarkTasksAsContainerDeallocated(ctx, pd.TaskIDs); err != nil { return errors.Wrap(err, "marking all tasks in dispatcher as container deallocated") } diff --git a/model/task/db.go b/model/task/db.go index 5b8a3bb916b..f1364e94127 100644 --- a/model/task/db.go +++ b/model/task/db.go @@ -1672,8 +1672,9 @@ func UpdateOne(ctx context.Context, query interface{}, update interface{}) error ) } -func UpdateAll(query interface{}, update interface{}) (*adb.ChangeInfo, error) { - return db.UpdateAll( +func UpdateAll(ctx context.Context, query interface{}, update interface{}) (*adb.ChangeInfo, error) { + return db.UpdateAllContext( + ctx, Collection, query, update, @@ -2648,8 +2649,9 @@ func FindAllDependencyTasksToModify(tasks []Task, isBlocking, ignoreDependencySt return allTasks, nil } -func activateTasks(taskIDs []string, caller string, activationTime time.Time) error { +func activateTasks(ctx context.Context, taskIDs []string, caller string, activationTime time.Time) error { _, err := UpdateAll( + ctx, bson.M{ IdKey: bson.M{"$in": taskIDs}, ActivatedKey: false, @@ -2667,14 +2669,15 @@ func activateTasks(taskIDs []string, caller string, activationTime time.Time) er if err != nil { return errors.Wrap(err, "setting tasks to active") } - if err = enableDisabledTasks(taskIDs); err != nil { + if err = enableDisabledTasks(ctx, taskIDs); err != nil { return errors.Wrap(err, "enabling disabled tasks") } return nil } -func enableDisabledTasks(taskIDs []string) error { +func enableDisabledTasks(ctx context.Context, taskIDs []string) error { _, err := UpdateAll( + ctx, bson.M{ IdKey: bson.M{"$in": taskIDs}, PriorityKey: evergreen.DisabledTaskPriority, diff --git a/model/task/db_test.go b/model/task/db_test.go index 943e52a22b3..24cbfd7259e 100644 --- a/model/task/db_test.go +++ b/model/task/db_test.go @@ -2082,7 +2082,7 @@ func TestActivateTasksUpdate(t *testing.T) { } require.NoError(t, t0.Insert()) - assert.NoError(t, activateTasks([]string{t0.Id}, caller, activationTime)) + assert.NoError(t, activateTasks(ctx, []string{t0.Id}, caller, activationTime)) dbTask, err := FindOneId(ctx, t0.Id) assert.NoError(t, err) assert.True(t, dbTask.Activated) @@ -2108,7 +2108,7 @@ func TestActivateTasksUpdate(t *testing.T) { require.NoError(t, d.Insert(ctx)) require.NoError(t, t0.Insert()) - assert.NoError(t, activateTasks([]string{t0.Id}, caller, activationTime)) + assert.NoError(t, activateTasks(ctx, []string{t0.Id}, caller, activationTime)) tasks, err := FindHostSchedulable(ctx, "d") require.NoError(t, err) diff --git a/model/task/task.go b/model/task/task.go index f552708c9ba..9a599cd1a94 100644 --- a/model/task/task.go +++ b/model/task/task.go @@ -1279,12 +1279,12 @@ func (t *Task) MarkAsContainerDeallocated(ctx context.Context, env evergreen.Env // MarkTasksAsContainerDeallocated marks multiple container tasks as no longer // allocated containers. -func MarkTasksAsContainerDeallocated(taskIDs []string) error { +func MarkTasksAsContainerDeallocated(ctx context.Context, taskIDs []string) error { if len(taskIDs) == 0 { return nil } - if _, err := UpdateAll(bson.M{ + if _, err := UpdateAll(ctx, bson.M{ IdKey: bson.M{"$in": taskIDs}, ExecutionPlatformKey: ExecutionPlatformContainer, }, containerDeallocatedUpdate()); err != nil { @@ -1447,20 +1447,21 @@ func SetTasksScheduledAndDepsMetTime(ctx context.Context, tasks []Task, schedule uniqueIDsToSchedule := utility.UniqueStrings(idsToSchedule) uniqueIDsToSetDependenciesMet := utility.UniqueStrings(idsToSetDependenciesMet) - if err := setScheduledTimeForTasks(uniqueIDsToSchedule, scheduledTime); err != nil { + if err := setScheduledTimeForTasks(ctx, uniqueIDsToSchedule, scheduledTime); err != nil { return errors.Wrap(err, "setting scheduled time for tasks") } - if err := setDependenciesMetTimeForTasks(uniqueIDsToSetDependenciesMet, scheduledTime); err != nil { + if err := setDependenciesMetTimeForTasks(ctx, uniqueIDsToSetDependenciesMet, scheduledTime); err != nil { return errors.Wrap(err, "setting dependencies met time for tasks") } return nil } -func setScheduledTimeForTasks(uniqueIDsToSchedule []string, scheduledTime time.Time) error { +func setScheduledTimeForTasks(ctx context.Context, uniqueIDsToSchedule []string, scheduledTime time.Time) error { if len(uniqueIDsToSchedule) == 0 { return nil } _, err := UpdateAll( + ctx, bson.M{ IdKey: bson.M{ "$in": uniqueIDsToSchedule, @@ -1481,11 +1482,12 @@ func setScheduledTimeForTasks(uniqueIDsToSchedule []string, scheduledTime time.T return nil } -func setDependenciesMetTimeForTasks(uniqueIDsToSetDependenciesMet []string, dependenciesMetTime time.Time) error { +func setDependenciesMetTimeForTasks(ctx context.Context, uniqueIDsToSetDependenciesMet []string, dependenciesMetTime time.Time) error { if len(uniqueIDsToSetDependenciesMet) == 0 { return nil } _, err := UpdateAll( + ctx, bson.M{ IdKey: bson.M{ "$in": uniqueIDsToSetDependenciesMet, @@ -1625,7 +1627,7 @@ func DeactivateStepbackTask(ctx context.Context, projectId, buildVariantName, ta return errors.Errorf("no stepback task '%s' for variant '%s' found", taskName, buildVariantName) } - if err = DeactivateTasks([]Task{*t}, true, caller); err != nil { + if err = DeactivateTasks(ctx, []Task{*t}, true, caller); err != nil { return errors.Wrap(err, "deactivating stepback task") } if t.IsAbortable() { @@ -1965,7 +1967,7 @@ func (t *Task) HasResults(ctx context.Context) bool { } // ActivateTasks sets all given tasks to active, logs them as activated, and proceeds to activate any dependencies that were deactivated. -func ActivateTasks(tasks []Task, activationTime time.Time, updateDependencies bool, caller string) error { +func ActivateTasks(ctx context.Context, tasks []Task, activationTime time.Time, updateDependencies bool, caller string) error { if len(tasks) == 0 { return nil } @@ -1994,7 +1996,7 @@ func ActivateTasks(tasks []Task, activationTime time.Time, updateDependencies bo if err = UpdateSchedulingLimit(caller, tasks[0].Requester, numTasksModified, true); err != nil { return err } - err = activateTasks(taskIDs, caller, activationTime) + err = activateTasks(ctx, taskIDs, caller, activationTime) if err != nil { return errors.Wrap(err, "activating tasks") } @@ -2009,7 +2011,7 @@ func ActivateTasks(tasks []Task, activationTime time.Time, updateDependencies bo })) if len(depTaskIDsToUpdate) > 0 { - return activateDeactivatedDependencies(depTasksToUpdate, depTaskIDsToUpdate, caller) + return activateDeactivatedDependencies(ctx, depTasksToUpdate, depTaskIDsToUpdate, caller) } return nil } @@ -2051,7 +2053,7 @@ func ActivateTasksByIdsWithDependencies(ctx context.Context, ids []string, calle return errors.Wrap(err, "getting recursive dependencies") } - if err = ActivateTasks(append(tasks, dependOn...), time.Now(), true, caller); err != nil { + if err = ActivateTasks(ctx, append(tasks, dependOn...), time.Now(), true, caller); err != nil { return errors.Wrap(err, "updating tasks for activation") } return nil @@ -2141,8 +2143,9 @@ func getDependencyTaskIdsToActivate(tasks []string, updateDependencies bool) (ma // activateDeactivatedDependencies activates tasks that depend on these tasks which were deactivated because a task // they depended on was deactivated. Only activate when all their dependencies are activated or are being activated -func activateDeactivatedDependencies(tasksToActivate map[string]Task, taskIDsToActivate []string, caller string) error { +func activateDeactivatedDependencies(ctx context.Context, tasksToActivate map[string]Task, taskIDsToActivate []string, caller string) error { _, err := UpdateAll( + ctx, bson.M{IdKey: bson.M{"$in": taskIDsToActivate}}, []bson.M{ { @@ -2225,7 +2228,7 @@ func topologicalSort(tasks []Task) ([]Task, error) { return sortedTasks, nil } -func DeactivateTasks(tasks []Task, updateDependencies bool, caller string) error { +func DeactivateTasks(ctx context.Context, tasks []Task, updateDependencies bool, caller string) error { if len(tasks) == 0 { return nil } @@ -2259,6 +2262,7 @@ func DeactivateTasks(tasks []Task, updateDependencies bool, caller string) error } _, err = UpdateAll( + ctx, bson.M{ IdKey: bson.M{"$in": taskIDs}, }, @@ -2285,7 +2289,7 @@ func DeactivateTasks(tasks []Task, updateDependencies bool, caller string) error })) if len(depTaskIDsToUpdate) > 0 { - return deactivateDependencies(depTasksToUpdate, depTaskIDsToUpdate, caller) + return deactivateDependencies(ctx, depTasksToUpdate, depTaskIDsToUpdate, caller) } return nil } @@ -2310,11 +2314,12 @@ func getDependencyTasksToUpdate(tasks []string, updateDependencies bool) ([]Task return tasksToUpdate, taskIDsToUpdate, nil } -func deactivateDependencies(tasksToUpdate []Task, taskIDsToUpdate []string, caller string) error { +func deactivateDependencies(ctx context.Context, tasksToUpdate []Task, taskIDsToUpdate []string, caller string) error { if len(tasksToUpdate) == 0 { return nil } _, err := UpdateAll( + ctx, bson.M{ IdKey: bson.M{"$in": taskIDsToUpdate}, }, @@ -2343,12 +2348,12 @@ func deactivateDependencies(tasksToUpdate []Task, taskIDsToUpdate []string, call // DeactivateDependencies gets all tasks that are blocked by the given tasks (this could be 1st level // or recursive) and deactivates them. Then it sends out the event logs for the deactivation. -func DeactivateDependencies(tasks []string, caller string) error { +func DeactivateDependencies(ctx context.Context, tasks []string, caller string) error { tasksToUpdate, taskIDsToUpdate, err := getDependencyTasksToUpdate(tasks, true) if err != nil { return errors.Wrap(err, "retrieving dependency tasks to deactivate") } - return errors.Wrap(deactivateDependencies(tasksToUpdate, taskIDsToUpdate, caller), "marking dependencies deactivated") + return errors.Wrap(deactivateDependencies(ctx, tasksToUpdate, taskIDsToUpdate, caller), "marking dependencies deactivated") } // MarkEnd handles the Task updates associated with ending a task. If the task's start time is zero @@ -2514,7 +2519,7 @@ func (t *Task) Reset(ctx context.Context, caller string) error { // ResetTasks performs the same DB updates as (*Task).Reset, but resets many // tasks instead of a single one. -func ResetTasks(tasks []Task, caller string) error { +func ResetTasks(ctx context.Context, tasks []Task, caller string) error { if len(tasks) == 0 { return nil } @@ -2524,6 +2529,7 @@ func ResetTasks(tasks []Task, caller string) error { } if _, err := UpdateAll( + ctx, bson.M{ IdKey: bson.M{"$in": taskIDs}, StatusKey: bson.M{"$in": evergreen.TaskCompletedStatuses}, @@ -3003,6 +3009,7 @@ func abortTasksByQuery(ctx context.Context, q bson.M, reason AbortInfo) error { return nil } _, err = UpdateAll( + ctx, ByIds(ids), []bson.M{ bson.M{"$set": taskAbortUpdate(reason)}, @@ -4019,7 +4026,7 @@ func AddParentDisplayTasks(tasks []Task) ([]Task, error) { // UpdateDependsOn appends new dependencies to tasks that already depend on this task // if the task does not explicitly omit having generated tasks as dependencies -func (t *Task) UpdateDependsOn(status string, newDependencyIDs []string) error { +func (t *Task) UpdateDependsOn(ctx context.Context, status string, newDependencyIDs []string) error { newDependencies := make([]Dependency, 0, len(newDependencyIDs)) for _, depID := range newDependencyIDs { if depID == t.Id { @@ -4038,6 +4045,7 @@ func (t *Task) UpdateDependsOn(status string, newDependencyIDs []string) error { } _, err := UpdateAll( + ctx, bson.M{ DependsOnKey: bson.M{"$elemMatch": bson.M{ DependencyTaskIdKey: t.Id, @@ -4101,11 +4109,11 @@ func (t *Task) SetNumDependents(ctx context.Context) error { }, update) } -func AddDisplayTaskIdToExecTasks(displayTaskId string, execTasksToUpdate []string) error { +func AddDisplayTaskIdToExecTasks(ctx context.Context, displayTaskId string, execTasksToUpdate []string) error { if len(execTasksToUpdate) == 0 { return nil } - _, err := UpdateAll(bson.M{ + _, err := UpdateAll(ctx, bson.M{ IdKey: bson.M{"$in": execTasksToUpdate}, }, bson.M{"$set": bson.M{ diff --git a/model/task/task_test.go b/model/task/task_test.go index 377b9774fbf..787f8e26b00 100644 --- a/model/task/task_test.go +++ b/model/task/task_test.go @@ -2178,18 +2178,18 @@ func TestUpdateDependsOn(t *testing.T) { assert.NoError(t, t2.Insert()) var err error - assert.NoError(t, t1.UpdateDependsOn(evergreen.TaskFailed, []string{"t3", "t4"})) + assert.NoError(t, t1.UpdateDependsOn(ctx, evergreen.TaskFailed, []string{"t3", "t4"})) t2, err = FindOneId(ctx, "t2") assert.NoError(t, err) assert.Len(t, t2.DependsOn, 2) - assert.NoError(t, t1.UpdateDependsOn(evergreen.TaskSucceeded, []string{"t3", "t4"})) + assert.NoError(t, t1.UpdateDependsOn(ctx, evergreen.TaskSucceeded, []string{"t3", "t4"})) t2, err = FindOneId(ctx, "t2") assert.NoError(t, err) assert.Len(t, t2.DependsOn, 4) t.Run("AddingSelfDependencyShouldNoop", func(t *testing.T) { - assert.NoError(t, t1.UpdateDependsOn(evergreen.TaskSucceeded, []string{t1.Id})) + assert.NoError(t, t1.UpdateDependsOn(ctx, evergreen.TaskSucceeded, []string{t1.Id})) dbTask1, err := FindOneId(ctx, t1.Id) assert.NoError(t, err) require.NotZero(t, dbTask1) @@ -2353,6 +2353,9 @@ func TestGetRecursiveDependenciesDown(t *testing.T) { } func TestDeactivateDependencies(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + require.NoError(t, db.ClearCollections(Collection, event.EventCollection)) tasks := []Task{ @@ -2368,7 +2371,7 @@ func TestDeactivateDependencies(t *testing.T) { } updatedIDs := []string{"t4", "t5"} - err := DeactivateDependencies([]string{"t0"}, "") + err := DeactivateDependencies(ctx, []string{"t0"}, "") assert.NoError(t, err) dbTasks, err := FindAll(All) @@ -2390,6 +2393,9 @@ func TestDeactivateDependencies(t *testing.T) { } func TestActivateDeactivatedDependencies(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + require.NoError(t, db.ClearCollections(Collection, event.EventCollection)) tasks := []Task{ @@ -2406,7 +2412,7 @@ func TestActivateDeactivatedDependencies(t *testing.T) { updatedIDs := []string{"t3", "t4"} depTasksToUpdate, depTaskIDsToUpdate, err := getDependencyTaskIdsToActivate([]string{"t0"}, true) require.NoError(t, err) - err = activateDeactivatedDependencies(depTasksToUpdate, depTaskIDsToUpdate, "") + err = activateDeactivatedDependencies(ctx, depTasksToUpdate, depTaskIDsToUpdate, "") assert.NoError(t, err) dbTasks, err := FindAll(All) @@ -2478,7 +2484,7 @@ func TestActivateTasks(t *testing.T) { } updatedIDs := []string{"t0", "t3", "t4"} - err := ActivateTasks([]Task{tasks[0]}, time.Time{}, true, u.Id) + err := ActivateTasks(ctx, []Task{tasks[0]}, time.Time{}, true, u.Id) assert.NoError(t, err) u, err = user.FindOne(user.ById(u.Id)) @@ -2509,7 +2515,7 @@ func TestActivateTasks(t *testing.T) { } } - err = ActivateTasks([]Task{tasks[1]}, time.Time{}, true, u.Id) + err = ActivateTasks(ctx, []Task{tasks[1]}, time.Time{}, true, u.Id) require.Error(t, err) assert.Contains(t, err.Error(), fmt.Sprintf("cannot schedule %d tasks, maximum hourly per-user limit is %d", 102, 100)) }) @@ -2524,7 +2530,7 @@ func TestActivateTasks(t *testing.T) { } require.NoError(t, task.Insert()) - err := ActivateTasks([]Task{task}, time.Now(), true, "abyssinian") + err := ActivateTasks(ctx, []Task{task}, time.Now(), true, "abyssinian") assert.NoError(t, err) events, err := event.FindAllByResourceID(task.Id) @@ -2541,6 +2547,9 @@ func TestActivateTasks(t *testing.T) { } func TestDeactivateTasks(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + require.NoError(t, db.ClearCollections(Collection, event.EventCollection)) tasks := []Task{ @@ -2558,7 +2567,7 @@ func TestDeactivateTasks(t *testing.T) { } updatedIDs := []string{"t0", "t4", "t5", "t6", "t7"} - err := DeactivateTasks([]Task{tasks[0]}, true, "") + err := DeactivateTasks(ctx, []Task{tasks[0]}, true, "") assert.NoError(t, err) dbTasks, err := FindAll(All) @@ -2895,7 +2904,7 @@ func TestMarkTasksAsContainerDeallocated(t *testing.T) { taskIDs = append(taskIDs, tsk.Id) } - require.NoError(t, MarkTasksAsContainerDeallocated(taskIDs)) + require.NoError(t, MarkTasksAsContainerDeallocated(ctx, taskIDs)) checkTasksUnallocated(t, taskIDs) }, "NoopsWithHostTask": func(t *testing.T, tasks []Task) { @@ -2906,7 +2915,7 @@ func TestMarkTasksAsContainerDeallocated(t *testing.T) { taskIDs = append(taskIDs, tsk.Id) } - require.NoError(t, MarkTasksAsContainerDeallocated(taskIDs)) + require.NoError(t, MarkTasksAsContainerDeallocated(ctx, taskIDs)) checkTasksUnallocated(t, taskIDs[1:]) dbHostTask, err := FindOneId(ctx, tasks[0].Id) require.NoError(t, err) @@ -2922,7 +2931,7 @@ func TestMarkTasksAsContainerDeallocated(t *testing.T) { taskIDs = append(taskIDs, tsk.Id) } - require.NoError(t, MarkTasksAsContainerDeallocated(taskIDs)) + require.NoError(t, MarkTasksAsContainerDeallocated(ctx, taskIDs)) checkTasksUnallocated(t, taskIDs) }, "DoesNotUpdateNonexistentTask": func(t *testing.T, tasks []Task) { @@ -2932,7 +2941,7 @@ func TestMarkTasksAsContainerDeallocated(t *testing.T) { taskIDs = append(taskIDs, tsk.Id) } - require.NoError(t, MarkTasksAsContainerDeallocated(taskIDs)) + require.NoError(t, MarkTasksAsContainerDeallocated(ctx, taskIDs)) checkTasksUnallocated(t, taskIDs[1:]) dbTask, err := FindOneId(ctx, tasks[0].Id) @@ -3947,7 +3956,7 @@ func TestAddDisplayTaskIdToExecTasks(t *testing.T) { assert.NoError(t, t2.Insert()) assert.NoError(t, t3.Insert()) - assert.NoError(t, AddDisplayTaskIdToExecTasks("dt", []string{t1.Id, t2.Id})) + assert.NoError(t, AddDisplayTaskIdToExecTasks(ctx, "dt", []string{t1.Id, t2.Id})) var err error t1, err = FindOneId(ctx, t1.Id) @@ -5325,7 +5334,7 @@ func TestResetTasks(t *testing.T) { } assert.NoError(t, t0.Insert()) - assert.NoError(t, ResetTasks([]Task{t0}, "user")) + assert.NoError(t, ResetTasks(ctx, []Task{t0}, "user")) dbTask, err := FindOneId(ctx, t0.Id) assert.NoError(t, err) assert.False(t, dbTask.UnattainableDependency) @@ -5347,7 +5356,7 @@ func TestResetTasks(t *testing.T) { } assert.NoError(t, t0.Insert()) - assert.NoError(t, ResetTasks([]Task{t0}, "")) + assert.NoError(t, ResetTasks(ctx, []Task{t0}, "")) dbTask, err := FindOneId(ctx, t0.Id) assert.NoError(t, err) assert.True(t, dbTask.UnattainableDependency) @@ -5367,7 +5376,7 @@ func TestResetTasks(t *testing.T) { } assert.NoError(t, t0.Insert()) - assert.NoError(t, ResetTasks([]Task{t0}, "")) + assert.NoError(t, ResetTasks(ctx, []Task{t0}, "")) dbTask, err := FindOneId(ctx, t0.Id) assert.NoError(t, err) assert.False(t, dbTask.UnattainableDependency) diff --git a/model/task_lifecycle.go b/model/task_lifecycle.go index 2449a59bb34..f17b2cdc95e 100644 --- a/model/task_lifecycle.go +++ b/model/task_lifecycle.go @@ -84,7 +84,7 @@ func SetActiveState(ctx context.Context, caller string, active bool, tasks ...ta } if active { - if err := task.ActivateTasks(tasksToActivate, time.Now(), true, caller); err != nil { + if err := task.ActivateTasks(ctx, tasksToActivate, time.Now(), true, caller); err != nil { return errors.Wrap(err, "activating tasks") } versionIdsToActivate := []string{} @@ -102,7 +102,7 @@ func SetActiveState(ctx context.Context, caller string, active bool, tasks ...ta return errors.Wrap(err, "marking builds as activated") } } else { - if err := task.DeactivateTasks(tasksToActivate, true, caller); err != nil { + if err := task.DeactivateTasks(ctx, tasksToActivate, true, caller); err != nil { return errors.Wrap(err, "deactivating task") } } @@ -133,7 +133,7 @@ func SetActiveStateById(ctx context.Context, id, user string, active bool) error return SetActiveState(ctx, user, active, *t) } -func DisableTasks(caller string, tasks ...task.Task) error { +func DisableTasks(ctx context.Context, caller string, tasks ...task.Task) error { if len(tasks) == 0 { return nil } @@ -147,7 +147,7 @@ func DisableTasks(caller string, tasks ...task.Task) error { execTaskIDs = append(execTaskIDs, t.ExecutionTasks...) } - _, err := task.UpdateAll( + _, err := task.UpdateAll(ctx, task.ByIds(append(taskIDs, execTaskIDs...)), bson.M{"$set": bson.M{task.PriorityKey: evergreen.DisabledTaskPriority}}, ) @@ -166,7 +166,7 @@ func DisableTasks(caller string, tasks ...task.Task) error { event.LogTaskPriority(t.Id, t.Execution, caller, evergreen.DisabledTaskPriority) } - if err := task.DeactivateTasks(tasks, true, caller); err != nil { + if err := task.DeactivateTasks(ctx, tasks, true, caller); err != nil { return errors.Wrap(err, "deactivating dependencies") } @@ -196,7 +196,7 @@ func findMissingTasks(taskIDs []string, tasksPresent map[string]struct{}) ([]tas // DisableStaleContainerTasks disables all container tasks that have been // scheduled to run for a long time without actually dispatching the task. -func DisableStaleContainerTasks(caller string) error { +func DisableStaleContainerTasks(ctx context.Context, caller string) error { query := task.ScheduledContainerTasksQuery() query[task.ActivatedTimeKey] = bson.M{"$lte": time.Now().Add(-task.UnschedulableThreshold)} @@ -211,7 +211,7 @@ func DisableStaleContainerTasks(caller string) error { "caller": caller, }) - if err := DisableTasks(caller, tasks...); err != nil { + if err := DisableTasks(ctx, caller, tasks...); err != nil { return errors.Wrap(err, "disabled stale container tasks") } @@ -1844,7 +1844,7 @@ func MarkTasksReset(ctx context.Context, taskIds []string, caller string) error return errors.WithStack(err) } - if err = task.ResetTasks(tasks, caller); err != nil { + if err = task.ResetTasks(ctx, tasks, caller); err != nil { return errors.Wrap(err, "resetting tasks in database") } diff --git a/model/task_lifecycle_test.go b/model/task_lifecycle_test.go index 5e3e8985d43..c749ce6f0a8 100644 --- a/model/task_lifecycle_test.go +++ b/model/task_lifecycle_test.go @@ -82,7 +82,7 @@ func TestDisableStaleContainerTasks(t *testing.T) { tsk.ActivatedTime = time.Now().Add(-9000 * 24 * time.Hour) require.NoError(t, tsk.Insert()) - require.NoError(t, DisableStaleContainerTasks(t.Name())) + require.NoError(t, DisableStaleContainerTasks(ctx, t.Name())) dbTask, err := task.FindOneId(ctx, tsk.Id) require.NoError(t, err) @@ -95,7 +95,7 @@ func TestDisableStaleContainerTasks(t *testing.T) { tsk.ContainerAllocatedTime = time.Now().Add(-5000 * 24 * time.Hour) require.NoError(t, tsk.Insert()) - require.NoError(t, DisableStaleContainerTasks(t.Name())) + require.NoError(t, DisableStaleContainerTasks(ctx, t.Name())) dbTask, err := task.FindOneId(ctx, tsk.Id) require.NoError(t, err) @@ -106,7 +106,7 @@ func TestDisableStaleContainerTasks(t *testing.T) { tsk.ActivatedTime = time.Now() require.NoError(t, tsk.Insert()) - require.NoError(t, DisableStaleContainerTasks(t.Name())) + require.NoError(t, DisableStaleContainerTasks(ctx, t.Name())) dbTask, err := task.FindOneId(ctx, tsk.Id) require.NoError(t, err) @@ -119,7 +119,7 @@ func TestDisableStaleContainerTasks(t *testing.T) { tsk.Status = evergreen.TaskSucceeded require.NoError(t, tsk.Insert()) - require.NoError(t, DisableStaleContainerTasks(t.Name())) + require.NoError(t, DisableStaleContainerTasks(ctx, t.Name())) dbTask, err := task.FindOneId(ctx, tsk.Id) require.NoError(t, err) @@ -132,7 +132,7 @@ func TestDisableStaleContainerTasks(t *testing.T) { tsk.ExecutionPlatform = task.ExecutionPlatformHost require.NoError(t, tsk.Insert()) - require.NoError(t, DisableStaleContainerTasks(t.Name())) + require.NoError(t, DisableStaleContainerTasks(ctx, t.Name())) dbTask, err := task.FindOneId(ctx, tsk.Id) require.NoError(t, err) @@ -178,7 +178,7 @@ func TestDisableOneTask(t *testing.T) { for funcName, disable := range map[string]disableFunc{ "DisableTasks": func(t *testing.T, tsk task.Task) error { - return DisableTasks(t.Name(), tsk) + return DisableTasks(ctx, t.Name(), tsk) }, } { t.Run(funcName, func(t *testing.T) { @@ -314,7 +314,7 @@ func TestDisableManyTasks(t *testing.T) { require.NoError(t, et2.Insert()) require.NoError(t, et3.Insert()) - require.NoError(t, DisableTasks(t.Name(), et1, et2)) + require.NoError(t, DisableTasks(ctx, t.Name(), et1, et2)) dbDisplayTask, err := task.FindOneId(ctx, dt.Id) require.NoError(t, err) @@ -391,7 +391,7 @@ func TestDisableManyTasks(t *testing.T) { require.NoError(t, et3.Insert()) require.NoError(t, et4.Insert()) - require.NoError(t, DisableTasks(t.Name(), et1, et3, dt2)) + require.NoError(t, DisableTasks(ctx, t.Name(), et1, et3, dt2)) dbDisplayTask1, err := task.FindOneId(ctx, dt1.Id) require.NoError(t, err) diff --git a/units/check_blocked_tasks.go b/units/check_blocked_tasks.go index 798e1be7835..2fa097aeecb 100644 --- a/units/check_blocked_tasks.go +++ b/units/check_blocked_tasks.go @@ -157,7 +157,7 @@ func checkUnmarkedBlockingTasks(ctx context.Context, t *task.Task, dependencyCac deactivatedBlockingTasks, err := t.GetDeactivatedBlockingDependencies(ctx, dependencyCaches) catcher.Wrap(err, "getting blocked status") if err == nil && len(deactivatedBlockingTasks) > 0 { - err = task.DeactivateDependencies(deactivatedBlockingTasks, evergreen.CheckBlockedTasksActivator) + err = task.DeactivateDependencies(ctx, deactivatedBlockingTasks, evergreen.CheckBlockedTasksActivator) catcher.Add(err) } diff --git a/units/crons.go b/units/crons.go index 879da7fa689..d3cd70f62c8 100644 --- a/units/crons.go +++ b/units/crons.go @@ -1101,7 +1101,7 @@ func podAllocatorJobs(ctx context.Context, _ evergreen.Environment, ts time.Time return nil, nil } - if err := model.DisableStaleContainerTasks(evergreen.StaleContainerTaskMonitor); err != nil { + if err := model.DisableStaleContainerTasks(ctx, evergreen.StaleContainerTaskMonitor); err != nil { grip.Error(message.WrapError(err, message.Fields{ "message": "could not disable stale container tasks", "context": "pod allocation", diff --git a/units/task_stranded_cleanup.go b/units/task_stranded_cleanup.go index f9bb5aeff43..272acbb2e23 100644 --- a/units/task_stranded_cleanup.go +++ b/units/task_stranded_cleanup.go @@ -113,7 +113,7 @@ func (j *taskStrandedCleanupJob) fixTasksStuckDispatching(ctx context.Context) e } } if len(tasksToDeactivate) > 0 { - err = task.DeactivateTasks(tasksToDeactivate, true, j.ID()) + err = task.DeactivateTasks(ctx, tasksToDeactivate, true, j.ID()) catcher.Wrapf(err, "deactivating tasks exceeding the unschedulable threshold") grip.Info(message.Fields{ From a8cf22447d5b1403bff22c1d9f67e24db58eb025 Mon Sep 17 00:00:00 2001 From: Zackary Santana <64446617+ZackarySantana@users.noreply.github.com> Date: Thu, 23 Jan 2025 09:46:39 -0500 Subject: [PATCH 07/11] fix: merge main breakage --- scheduler/utilization_based_host_allocator.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scheduler/utilization_based_host_allocator.go b/scheduler/utilization_based_host_allocator.go index 40a5fdf2f2b..202ae7b3d98 100644 --- a/scheduler/utilization_based_host_allocator.go +++ b/scheduler/utilization_based_host_allocator.go @@ -165,7 +165,7 @@ func evalHostUtilization(ctx context.Context, d distro.Distro, taskGroupData Tas // summing their estimated time left to completion, and dividing that number by maxDurationThreshold. // That estimate is then multiplied by the futureHostFraction coefficient, which is a fraction that allows us // to tune the final estimate up or down. - numFreeHosts, err := calcExistingFreeHosts(ctx, existingHosts, futureHostFraction, maxDurationThreshold) + expectedNumFreeHosts, err := calcExistingFreeHosts(ctx, existingHosts, futureHostFraction, maxDurationThreshold) if err != nil { return numNewHosts, expectedNumFreeHosts, err } From 8aaf92f86374098c883a19e2bd37863e63aa98d3 Mon Sep 17 00:00:00 2001 From: Zackary Santana <64446617+ZackarySantana@users.noreply.github.com> Date: Thu, 23 Jan 2025 09:54:10 -0500 Subject: [PATCH 08/11] feat: fix context usage for update functions --- db/db_utils.go | 30 ++++++++++++++++++------------ 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/db/db_utils.go b/db/db_utils.go index c3d811f1783..8e1ef24833a 100644 --- a/db/db_utils.go +++ b/db/db_utils.go @@ -235,15 +235,18 @@ func Update(collection string, query interface{}, update interface{}) error { // Update updates one matching document in the collection. func UpdateContext(ctx context.Context, collection string, query interface{}, update interface{}) error { - session, db, err := GetGlobalSessionFactory().GetContextSession(ctx) + res, err := evergreen.GetEnvironment().DB().Collection(collection).UpdateOne(ctx, + query, + update, + ) if err != nil { - grip.Errorf("error establishing db connection: %+v", err) - - return err + return errors.Wrapf(err, "updating task") + } + if res.MatchedCount == 0 { + return db.ErrNotFound } - defer session.Close() - return db.C(collection).Update(query, update) + return nil } func UpdateAllContext(ctx context.Context, collection string, query interface{}, update interface{}) (*db.ChangeInfo, error) { @@ -263,15 +266,18 @@ func UpdateAllContext(ctx context.Context, collection string, query interface{}, }) } - session, db, err := GetGlobalSessionFactory().GetContextSession(ctx) + res, err := evergreen.GetEnvironment().DB().Collection(collection).UpdateMany(ctx, + query, + update, + ) if err != nil { - grip.Errorf("error establishing db connection: %+v", err) - - return nil, err + return nil, errors.Wrapf(err, "updating task") + } + if res.MatchedCount == 0 { + return nil, db.ErrNotFound } - defer session.Close() - return db.C(collection).UpdateAll(query, update) + return &db.ChangeInfo{Updated: int(res.ModifiedCount)}, nil } // UpdateId updates one _id-matching document in the collection. From 2500578c4ec4727cfa3a4c80091cc566e2598a2c Mon Sep 17 00:00:00 2001 From: Zackary Santana <64446617+ZackarySantana@users.noreply.github.com> Date: Thu, 23 Jan 2025 14:20:08 -0500 Subject: [PATCH 09/11] fix: remove extra no documents matched --- db/db_utils.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/db/db_utils.go b/db/db_utils.go index 8e1ef24833a..ff8ff49b8e4 100644 --- a/db/db_utils.go +++ b/db/db_utils.go @@ -273,9 +273,6 @@ func UpdateAllContext(ctx context.Context, collection string, query interface{}, if err != nil { return nil, errors.Wrapf(err, "updating task") } - if res.MatchedCount == 0 { - return nil, db.ErrNotFound - } return &db.ChangeInfo{Updated: int(res.ModifiedCount)}, nil } From 781912cb14550f609f6a134125da586fa501b5b1 Mon Sep 17 00:00:00 2001 From: Zackary Santana <64446617+ZackarySantana@users.noreply.github.com> Date: Thu, 23 Jan 2025 14:41:16 -0500 Subject: [PATCH 10/11] fix: add set keyword for query --- model/task/task_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/model/task/task_test.go b/model/task/task_test.go index 787f8e26b00..be859a638fd 100644 --- a/model/task/task_test.go +++ b/model/task/task_test.go @@ -4555,7 +4555,7 @@ func (s *TaskConnectorFetchByIdSuite) TestFindByIdAndExecution() { err := UpdateOne( ctx, bson.M{IdKey: "task_1"}, - bson.M{CanResetKey: false}, + bson.M{"$set": bson.M{CanResetKey: false}}, ) s.NoError(err) testTask1.Execution += 1 From 2ff65335b545c05257e5576fa372bd61b3cb3ca9 Mon Sep 17 00:00:00 2001 From: Zackary Santana <64446617+ZackarySantana@users.noreply.github.com> Date: Thu, 23 Jan 2025 14:47:39 -0500 Subject: [PATCH 11/11] fix: test usage with len --- scheduler/planner_test.go | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/scheduler/planner_test.go b/scheduler/planner_test.go index b8196a28a4c..703e75a5f0c 100644 --- a/scheduler/planner_test.go +++ b/scheduler/planner_test.go @@ -119,7 +119,7 @@ func TestPlanner(t *testing.T) { cache := UnitCache{} one := task.Task{Id: "one"} cache.Create("one", one) - assert.Empty(t, cache.Export(ctx)) + assert.Zero(t, cache.Export(ctx).Len()) }) t.Run("ExportPropogatesTasks", func(t *testing.T) { cache := UnitCache{} @@ -128,7 +128,7 @@ func TestPlanner(t *testing.T) { cache.Create("one", one).SetDistro(&distro.Distro{}) cache.Create("two", two).SetDistro(&distro.Distro{}) plan := cache.Export(ctx) - assert.Len(t, plan, 2) + assert.Equal(t, plan.Len(), 2) for _, ts := range plan.units { ts.SetDistro(&distro.Distro{}) require.Len(t, ts.tasks, 1) @@ -143,7 +143,7 @@ func TestPlanner(t *testing.T) { cache.Create("one", one).SetDistro(&distro.Distro{}) cache.Create("two", one).SetDistro(&distro.Distro{}) plan := cache.Export(ctx) - assert.Len(t, plan, 1) + assert.Equal(t, plan.Len(), 1) }) }) t.Run("Unit", func(t *testing.T) { @@ -172,9 +172,9 @@ func TestPlanner(t *testing.T) { t.Run("HashCaches", func(t *testing.T) { unit := NewUnit(task.Task{Id: "foo"}) hash := unit.ID() - assert.Len(t, unit.Export(ctx), 1) + assert.Equal(t, unit.Export(ctx).Len(), 1) unit.Add(task.Task{Id: "bar"}) - assert.Len(t, unit.Export(ctx), 2) + assert.Equal(t, unit.Export(ctx).Len(), 2) newHash := unit.ID() assert.Equal(t, hash, newHash) }) @@ -392,7 +392,7 @@ func TestPlanner(t *testing.T) { }) t.Run("PrepareTaskPlan", func(t *testing.T) { t.Run("Noop", func(t *testing.T) { - assert.Empty(t, PrepareTasksForPlanning(ctx, &distro.Distro{}, []task.Task{})) + assert.Zero(t, PrepareTasksForPlanning(ctx, &distro.Distro{}, []task.Task{}).Len()) }) t.Run("TaskGroupsGrouped", func(t *testing.T) { plan := PrepareTasksForPlanning(ctx, &distro.Distro{}, []task.Task{ @@ -401,7 +401,7 @@ func TestPlanner(t *testing.T) { {Id: "three"}, }) - assert.Len(t, plan, 2) + assert.Equal(t, plan.Len(), 2) assert.Len(t, plan.Export(ctx), 3) }) t.Run("VersionsGrouped", func(t *testing.T) { @@ -415,7 +415,7 @@ func TestPlanner(t *testing.T) { {Id: "three", Version: "second"}, }) - assert.Len(t, plan, 2) + assert.Equal(t, plan.Len(), 2) assert.Len(t, plan.Export(ctx), 3) }) t.Run("VersionsAndTaskGroupsGrouped", func(t *testing.T) { @@ -432,7 +432,7 @@ func TestPlanner(t *testing.T) { {Id: "extra", Version: "first", Priority: 1}, }) - assert.Len(t, plan, 3) + assert.Equal(t, plan.Len(), 3) tasks := plan.Export(ctx) assert.Len(t, tasks, 6) assert.Equal(t, "one", tasks[0].TaskGroup) @@ -446,7 +446,7 @@ func TestPlanner(t *testing.T) { {Id: "other", DependsOn: []task.Dependency{{TaskId: "two"}}}, }) - require.Len(t, plan, 4, "keys:%s", plan.Keys()) + require.Equal(t, plan.Len(), 4, "keys:%s", plan.Keys()) tasks := plan.Export(ctx) require.Len(t, tasks, 4) assert.Equal(t, "three", tasks[3].Id) @@ -464,7 +464,7 @@ func TestPlanner(t *testing.T) { {Id: "two", DependsOn: []task.Dependency{{TaskId: "missing"}}}, }) - assert.Len(t, plan, 3) + assert.Equal(t, plan.Len(), 3) assert.Len(t, plan.Export(ctx), 3) }) })