diff --git a/internal/app/module_test.go b/internal/app/module_test.go index 0c6773b6..d5f832fe 100644 --- a/internal/app/module_test.go +++ b/internal/app/module_test.go @@ -72,7 +72,7 @@ func TestModule_MultipleFormat(t *testing.T) { tm.Send(tea.KeyMsg{Type: tea.KeyCtrlA}) tm.Type("f") waitFor(t, tm, func(s string) bool { - return matchPattern(t, "TaskGroup.*format", s) && + return matchPattern(t, "TaskGroup.*fmt", s) && matchPattern(t, `modules/a.*exited`, s) && matchPattern(t, `modules/b.*exited`, s) && matchPattern(t, `modules/c.*exited`, s) @@ -139,7 +139,7 @@ func TestModuleList_ReloadWorkspacesMultipleModules(t *testing.T) { tm.Send(tea.KeyMsg{Type: tea.KeyCtrlW}) waitFor(t, tm, func(s string) bool { - return matchPattern(t, "TaskGroup.*reload-workspace", s) && + return matchPattern(t, "TaskGroup.*workspace list", s) && matchPattern(t, "modules/a.*exited", s) && matchPattern(t, "modules/b.*exited", s) && matchPattern(t, "modules/c.*exited", s) @@ -320,7 +320,6 @@ func TestModule_MultipleDestroy(t *testing.T) { tm.Send(tea.KeyMsg{Type: tea.KeyCtrlA}) tm.Type("i") waitFor(t, tm, func(s string) bool { - t.Log(s) return matchPattern(t, "TaskGroup.*init", s) && matchPattern(t, `modules/a.*exited`, s) && matchPattern(t, `modules/b.*exited`, s) && @@ -332,7 +331,6 @@ func TestModule_MultipleDestroy(t *testing.T) { // Expect three modules to be listed, along with their default workspace. waitFor(t, tm, func(s string) bool { - t.Log(s) return matchPattern(t, "modules/a.*default", s) && matchPattern(t, "modules/b.*default", s) && matchPattern(t, "modules/c.*default", s) diff --git a/internal/app/terragrunt_test.go b/internal/app/terragrunt_test.go index 92064250..222cb56e 100644 --- a/internal/app/terragrunt_test.go +++ b/internal/app/terragrunt_test.go @@ -106,7 +106,6 @@ func TestTerragrunt_Dependencies(t *testing.T) { // Expect 6 apply tasks. waitFor(t, tm, func(s string) bool { - t.Log(s) return matchPattern(t, "TaskGroup.*apply.*6/6", s) && matchPattern(t, `modules/vpc.*default.*\+0~0-0`, s) && matchPattern(t, `modules/redis.*default.*\+0~0-0`, s) && diff --git a/internal/app/workspace_test.go b/internal/app/workspace_test.go index 5a2a954a..1cdbdcb4 100644 --- a/internal/app/workspace_test.go +++ b/internal/app/workspace_test.go @@ -210,7 +210,6 @@ func TestWorkspace_MultipleDestroy(t *testing.T) { tm.Send(tea.KeyMsg{Type: tea.KeyCtrlA}) tm.Type("i") waitFor(t, tm, func(s string) bool { - t.Log(s) return matchPattern(t, "TaskGroup.*init", s) && matchPattern(t, `modules/a.*exited`, s) && matchPattern(t, `modules/b.*exited`, s) && @@ -286,8 +285,8 @@ func TestWorkspace_Delete(t *testing.T) { tm := setupAndInitModuleWithTwoWorkspaces(t) - // Filter workspaces to only show dev workspace. This is the only to ensure - // the dev workspace is currently highlighted. + // Filter workspaces to only show dev workspace. This is the only way to + // ensure the dev workspace is currently highlighted. // Focus filter widget tm.Type("/") @@ -320,7 +319,7 @@ func TestWorkspace_Delete(t *testing.T) { tm.Type("y") waitFor(t, tm, func(s string) bool { - return matchPattern(t, `Task.*workspace delete.*dev.*modules/a.*exited`, s) && + return matchPattern(t, `Task.*workspace delete.*modules/a.*exited`, s) && strings.Contains(s, `Deleted workspace "dev"!`) }) } diff --git a/internal/module/service.go b/internal/module/service.go index 22d1d4a2..660ebffd 100644 --- a/internal/module/service.go +++ b/internal/module/service.go @@ -33,7 +33,7 @@ type ServiceOptions struct { } type taskCreator interface { - Create(spec task.CreateOptions) (*task.Task, error) + Create(spec task.Spec) (*task.Task, error) } type moduleTable interface { @@ -111,7 +111,7 @@ func (s *Service) Reload() (added []string, removed []string, err error) { } func (s *Service) loadTerragruntDependencies() error { - task, err := s.tasks.Create(task.CreateOptions{ + task, err := s.tasks.Create(task.Spec{ Parent: resource.GlobalResource, Command: []string{"graph-dependencies"}, Wait: true, @@ -194,32 +194,35 @@ func (s *Service) stripWorkdirFromPath(path string) (string, error) { } // Init invokes terraform init on the module. -func (s *Service) Init(moduleID resource.ID) (*task.Task, error) { - mod, err := s.Get(moduleID) - if err != nil { - return nil, fmt.Errorf("initializing module: %w", err) - } - - // create asynchronous task that runs terraform init - tsk, err := s.CreateTask(mod, task.CreateOptions{ +func (s *Service) Init(moduleID resource.ID) (task.Spec, error) { + return s.updateSpec(moduleID, task.Spec{ Command: []string{"init"}, Args: []string{"-input=false"}, Blocking: true, // The terraform plugin cache is not concurrency-safe, so only allow one // init task to run at any given time. Exclusive: s.pluginCache, - AfterCreate: func(*task.Task) { + AfterCreate: func(task *task.Task) { // Trigger a workspace reload if the module doesn't yet have a // current workspace + mod := task.Module().(*Module) if mod.CurrentWorkspaceID == nil { s.Publish(resource.UpdatedEvent, mod) } }, }) - if err != nil { - return nil, err - } - return tsk, nil +} + +func (s *Service) Format(moduleID resource.ID) (task.Spec, error) { + return s.updateSpec(moduleID, task.Spec{ + Command: []string{"fmt"}, + }) +} + +func (s *Service) Validate(moduleID resource.ID) (task.Spec, error) { + return s.updateSpec(moduleID, task.Spec{ + Command: []string{"validate"}, + }) } func (s *Service) List() []*Module { @@ -252,31 +255,13 @@ func (s *Service) SetCurrent(moduleID, workspaceID resource.ID) error { return nil } -func (s *Service) Format(moduleID resource.ID) (*task.Task, error) { +// updateSpec updates the task spec with common module settings. +func (s *Service) updateSpec(moduleID resource.ID, spec task.Spec) (task.Spec, error) { mod, err := s.table.Get(moduleID) if err != nil { - return nil, fmt.Errorf("formatting module: %w", err) + return task.Spec{}, err } - - return s.CreateTask(mod, task.CreateOptions{ - Command: []string{"fmt"}, - }) -} - -func (s *Service) Validate(moduleID resource.ID) (*task.Task, error) { - mod, err := s.table.Get(moduleID) - if err != nil { - return nil, fmt.Errorf("validating module: %w", err) - } - - return s.CreateTask(mod, task.CreateOptions{ - Command: []string{"validate"}, - }) -} - -// TODO: move this logic into task.Create -func (s *Service) CreateTask(mod *Module, opts task.CreateOptions) (*task.Task, error) { - opts.Parent = mod - opts.Path = mod.Path - return s.tasks.Create(opts) + spec.Parent = mod + spec.Path = mod.Path + return spec, nil } diff --git a/internal/run/service.go b/internal/run/service.go index 4289b39b..e5f32c64 100644 --- a/internal/run/service.go +++ b/internal/run/service.go @@ -1,7 +1,6 @@ package run import ( - "errors" "fmt" "slices" @@ -70,21 +69,26 @@ func NewService(opts ServiceOptions) *Service { } // Plan creates a plan task. -func (s *Service) Plan(workspaceID resource.ID, opts CreateOptions) (*task.Task, error) { - task, err := s.plan(workspaceID, opts) +func (s *Service) Plan(workspaceID resource.ID, opts CreateOptions) (task.Spec, error) { + spec, err := s.plan(workspaceID, opts) if err != nil { - s.logger.Error("creating plan task", "error", err) - return nil, err + s.logger.Error("creating plan spec", "error", err) + return task.Spec{}, err } - return task, nil + return spec, nil } -func (s *Service) plan(workspaceID resource.ID, opts CreateOptions) (*task.Task, error) { +func (s *Service) plan(workspaceID resource.ID, opts CreateOptions) (task.Spec, error) { run, err := s.newRun(workspaceID, opts) if err != nil { - return nil, err + return task.Spec{}, err } - task, err := s.createTask(run, task.CreateOptions{ + s.table.Add(run.ID, run) + + return task.Spec{ + Parent: run, + Path: run.ModulePath(), + Env: []string{workspace.TerraformEnv(run.WorkspaceName())}, Command: []string{"plan"}, Args: run.planArgs(), // TODO: explain why plan is blocking (?) @@ -107,12 +111,7 @@ func (s *Service) plan(workspaceID resource.ID, opts CreateOptions) (*task.Task, s.logger.Error("finishing plan", "error", err, "run", run) } }, - }) - if err != nil { - return nil, err - } - s.table.Add(run.ID, run) - return task, nil + }, nil } // Apply creates a task for a terraform apply. @@ -122,45 +121,7 @@ func (s *Service) plan(workspaceID resource.ID, opts CreateOptions) (*task.Task, // // If opts is nil, then it will apply an existing plan. The ID must specify an // existing run that has successfully created a plan. -func (s *Service) Apply(id resource.ID, opts *CreateOptions) (*task.Task, error) { - spec, _, err := s.createApplySpec(id, opts) - if err != nil { - return nil, err - } - return s.tasks.Create(spec) -} - -// MultiApply creates a task group of one or more apply tasks. See Apply() for -// info on parameters. -// -// You cannot apply a combination of destory and non-destroy plans, because that -// is incompatible with the dependency graph that is created to order the tasks. -func (s *Service) MultiApply(opts *CreateOptions, ids ...resource.ID) (*task.Group, error) { - if len(ids) == 0 { - return nil, errors.New("no IDs specified") - } - var destroy *bool - specs := make([]task.CreateOptions, 0, len(ids)) - for _, id := range ids { - spec, run, err := s.createApplySpec(id, opts) - if err != nil { - return nil, err - } - if destroy == nil { - destroy = &run.Destroy - } else if *destroy != run.Destroy { - return nil, errors.New("cannot apply a combination of destroy and non-destroy plans") - } - - specs = append(specs, spec) - } - // All tasks should have the same description, so use the first one. - desc := specs[0].Description - return s.tasks.CreateDependencyGroup(desc, *destroy, specs...) -} - -func (s *Service) createApplySpec(id resource.ID, opts *CreateOptions) (task.CreateOptions, *Run, error) { - // Create or retrieve existing run. +func (s *Service) Apply(id resource.ID, opts *CreateOptions) (task.Spec, error) { var ( run *Run err error @@ -177,15 +138,22 @@ func (s *Service) createApplySpec(id resource.ID, opts *CreateOptions) (task.Cre } } if err != nil { - return task.CreateOptions{}, nil, err + return task.Spec{}, err } s.table.Add(run.ID, run) - spec := task.CreateOptions{ + spec := task.Spec{ + Parent: run, + Path: run.ModulePath(), Command: []string{"apply"}, Args: run.applyArgs(), + Env: []string{workspace.TerraformEnv(run.WorkspaceName())}, Blocking: true, Description: ApplyTaskDescription(run.Destroy), + // If terragrunt is in use then respect module dependencies. + RespectModuleDependencies: s.terragrunt, + // Module dependencies are reversed for a destroy. + InverseDependencyOrder: run.Destroy, AfterQueued: func(*task.Task) { run.updateStatus(ApplyQueued) }, @@ -205,14 +173,19 @@ func (s *Service) createApplySpec(id resource.ID, opts *CreateOptions) (task.Cre } if !s.disableReloadAfterApply { - s.states.Reload(run.WorkspaceID()) + spec, err := s.states.Reload(run.WorkspaceID()) + if err != nil { + s.logger.Error("reloading state following apply", "error", err, "run", run) + return + } + if _, err := s.tasks.Create(spec); err != nil { + s.logger.Error("reloading state following apply", "error", err, "run", run) + return + } } }, } - if err := s.addWorkspaceAndPathToTaskSpec(run, &spec); err != nil { - return task.CreateOptions{}, nil, err - } - return spec, run, nil + return spec, nil } func (s *Service) Get(runID resource.ID) (*Run, error) { @@ -281,28 +254,3 @@ func (s *Service) delete(id resource.ID) error { s.table.Delete(id) return nil } - -func (s *Service) createTask(run *Run, opts task.CreateOptions) (*task.Task, error) { - if err := s.addWorkspaceAndPathToTaskSpec(run, &opts); err != nil { - return nil, err - } - return s.tasks.Create(opts) -} - -func (s *Service) addWorkspaceAndPathToTaskSpec(run *Run, opts *task.CreateOptions) error { - opts.Parent = run - - ws, err := s.workspaces.Get(run.WorkspaceID()) - if err != nil { - return err - } - opts.Env = []string{ws.TerraformEnv()} - - mod, err := s.modules.Get(ws.ModuleID()) - if err != nil { - return err - } - opts.Path = mod.Path - - return nil -} diff --git a/internal/state/service.go b/internal/state/service.go index 785946a3..3fb1fcec 100644 --- a/internal/state/service.go +++ b/internal/state/service.go @@ -43,7 +43,7 @@ func NewService(opts ServiceOptions) *Service { go func() { for event := range opts.Workspaces.Subscribe() { if event.Type == resource.CreatedEvent { - _, _ = svc.Reload(event.Payload.ID) + _, _ = svc.createReloadTask(event.Payload.ID) } } }() @@ -72,8 +72,8 @@ func (s *Service) GetResource(resourceID resource.ID) (*Resource, error) { // Reload creates a task to repopulate the local cache of the state of the given // workspace. -func (s *Service) Reload(workspaceID resource.ID) (*task.Task, error) { - return s.createTask(workspaceID, task.CreateOptions{ +func (s *Service) Reload(workspaceID resource.ID) (task.Spec, error) { + return s.createTaskSpec(workspaceID, task.Spec{ Command: []string{"state", "pull"}, JSON: true, AfterExited: func(t *task.Task) { @@ -102,12 +102,20 @@ func (s *Service) Reload(workspaceID resource.ID) (*task.Task, error) { }) } -func (s *Service) Delete(workspaceID resource.ID, addrs ...ResourceAddress) (*task.Task, error) { +func (s *Service) createReloadTask(workspaceID resource.ID) (*task.Task, error) { + spec, err := s.Reload(workspaceID) + if err != nil { + return nil, err + } + return s.tasks.Create(spec) +} + +func (s *Service) Delete(workspaceID resource.ID, addrs ...ResourceAddress) (task.Spec, error) { addrStrings := make([]string, len(addrs)) for i, addr := range addrs { addrStrings[i] = string(addr) } - return s.createTask(workspaceID, task.CreateOptions{ + return s.createTaskSpec(workspaceID, task.Spec{ Blocking: true, Command: []string{"state", "rm"}, Args: addrStrings, @@ -115,13 +123,13 @@ func (s *Service) Delete(workspaceID resource.ID, addrs ...ResourceAddress) (*ta s.logger.Error("deleting resources", "error", t.Err, "resources", addrs) }, AfterExited: func(t *task.Task) { - s.Reload(workspaceID) + s.createReloadTask(workspaceID) }, }) } -func (s *Service) Taint(workspaceID resource.ID, addr ResourceAddress) (*task.Task, error) { - return s.createTask(workspaceID, task.CreateOptions{ +func (s *Service) Taint(workspaceID resource.ID, addr ResourceAddress) (task.Spec, error) { + return s.createTaskSpec(workspaceID, task.Spec{ Blocking: true, Command: []string{"taint"}, Args: []string{string(addr)}, @@ -129,13 +137,13 @@ func (s *Service) Taint(workspaceID resource.ID, addr ResourceAddress) (*task.Ta s.logger.Error("tainting resource", "error", t.Err, "resource", addr) }, AfterExited: func(t *task.Task) { - s.Reload(workspaceID) + s.createReloadTask(workspaceID) }, }) } -func (s *Service) Untaint(workspaceID resource.ID, addr ResourceAddress) (*task.Task, error) { - return s.createTask(workspaceID, task.CreateOptions{ +func (s *Service) Untaint(workspaceID resource.ID, addr ResourceAddress) (task.Spec, error) { + return s.createTaskSpec(workspaceID, task.Spec{ Blocking: true, Command: []string{"untaint"}, Args: []string{string(addr)}, @@ -143,13 +151,13 @@ func (s *Service) Untaint(workspaceID resource.ID, addr ResourceAddress) (*task. s.logger.Error("untainting resource", "error", t.Err, "resource", addr) }, AfterExited: func(t *task.Task) { - s.Reload(workspaceID) + s.createReloadTask(workspaceID) }, }) } -func (s *Service) Move(workspaceID resource.ID, src, dest ResourceAddress) (*task.Task, error) { - return s.createTask(workspaceID, task.CreateOptions{ +func (s *Service) Move(workspaceID resource.ID, src, dest ResourceAddress) (task.Spec, error) { + return s.createTaskSpec(workspaceID, task.Spec{ Blocking: true, Command: []string{"state", "mv"}, Args: []string{string(src), string(dest)}, @@ -157,20 +165,20 @@ func (s *Service) Move(workspaceID resource.ID, src, dest ResourceAddress) (*tas s.logger.Error("moving resource", "error", t.Err, "resources", src) }, AfterExited: func(t *task.Task) { - s.Reload(workspaceID) + s.createReloadTask(workspaceID) }, }) } // TODO: move this logic into task.Create -func (s *Service) createTask(workspaceID resource.ID, opts task.CreateOptions) (*task.Task, error) { +func (s *Service) createTaskSpec(workspaceID resource.ID, opts task.Spec) (task.Spec, error) { ws, err := s.workspaces.Get(workspaceID) if err != nil { - return nil, err + return task.Spec{}, err } opts.Parent = ws opts.Env = []string{ws.TerraformEnv()} opts.Path = ws.ModulePath() - return s.tasks.Create(opts) + return opts, nil } diff --git a/internal/task/enqueuer_test.go b/internal/task/enqueuer_test.go index 460c1161..902da7e2 100644 --- a/internal/task/enqueuer_test.go +++ b/internal/task/enqueuer_test.go @@ -19,24 +19,24 @@ func TestEnqueuer(t *testing.T) { run1 := resource.New(resource.Run, ws1) run2 := resource.New(resource.Run, ws1) - mod1Task1 := f.newTask(CreateOptions{Parent: mod1}) - mod1TaskBlocking1 := f.newTask(CreateOptions{Parent: mod1, Blocking: true}) + mod1Task1 := f.newTask(Spec{Parent: mod1}) + mod1TaskBlocking1 := f.newTask(Spec{Parent: mod1, Blocking: true}) - ws1Task1 := f.newTask(CreateOptions{Parent: ws1}) - ws1Task2 := f.newTask(CreateOptions{Parent: ws1}) - ws1TaskBlocking1 := f.newTask(CreateOptions{Parent: ws1, Blocking: true}) - ws1TaskBlocking2 := f.newTask(CreateOptions{Parent: ws1, Blocking: true}) - ws1TaskBlocking3 := f.newTask(CreateOptions{Parent: ws1, Blocking: true}) - ws1TaskImmediate := f.newTask(CreateOptions{Parent: ws1, Immediate: true}) - ws1TaskDependOnTask1 := f.newTask(CreateOptions{Parent: ws1, DependsOn: []resource.ID{ws1Task1.ID}}) + ws1Task1 := f.newTask(Spec{Parent: ws1}) + ws1Task2 := f.newTask(Spec{Parent: ws1}) + ws1TaskBlocking1 := f.newTask(Spec{Parent: ws1, Blocking: true}) + ws1TaskBlocking2 := f.newTask(Spec{Parent: ws1, Blocking: true}) + ws1TaskBlocking3 := f.newTask(Spec{Parent: ws1, Blocking: true}) + ws1TaskImmediate := f.newTask(Spec{Parent: ws1, Immediate: true}) + ws1TaskDependOnTask1 := f.newTask(Spec{Parent: ws1, DependsOn: []resource.ID{ws1Task1.ID}}) - ws1TaskCompleted := f.newTask(CreateOptions{Parent: ws1}) + ws1TaskCompleted := f.newTask(Spec{Parent: ws1}) ws1TaskCompleted.updateState(Exited) - ws1TaskDependOnCompletedTask := f.newTask(CreateOptions{Parent: ws1, DependsOn: []resource.ID{ws1TaskCompleted.ID}}) + ws1TaskDependOnCompletedTask := f.newTask(Spec{Parent: ws1, DependsOn: []resource.ID{ws1TaskCompleted.ID}}) - run1TaskBlocking1 := f.newTask(CreateOptions{Parent: run1, Blocking: true}) - run2Task1 := f.newTask(CreateOptions{Parent: run2}) + run1TaskBlocking1 := f.newTask(Spec{Parent: run1, Blocking: true}) + run2Task1 := f.newTask(Spec{Parent: run2}) tests := []struct { name string diff --git a/internal/task/group.go b/internal/task/group.go index 9ba1a72b..5aeaac72 100644 --- a/internal/task/group.go +++ b/internal/task/group.go @@ -1,7 +1,6 @@ package task import ( - "errors" "slices" "time" @@ -17,37 +16,6 @@ type Group struct { CreateErrors []error } -// Func is a function that creates a task. -type Func func(resource.ID) (*Task, error) - -// newGroup creates a task group, invoking the provided function on each id to -// each task. If the task is successfully created it is added to the group; -// otherwise the error is added to the group. -func newGroup(cmd string, fn Func, ids ...resource.ID) (*Group, error) { - g := NewEmptyGroup(cmd) - for _, id := range ids { - task, err := fn(id) - if err != nil { - g.CreateErrors = append(g.CreateErrors, err) - } else { - g.Tasks = append(g.Tasks, task) - } - } - // If no tasks were created, then return error. - if len(g.Tasks) == 0 { - return nil, errors.New("all tasks failed to be created") - } - return g, nil -} - -func NewEmptyGroup(cmd string) *Group { - return &Group{ - Common: resource.New(resource.TaskGroup, resource.GlobalResource), - Created: time.Now(), - Command: cmd, - } -} - func (g *Group) String() string { return g.Command } func (g *Group) IncludesTask(taskID resource.ID) bool { diff --git a/internal/task/group_builder.go b/internal/task/group_builder.go index 2ed0b687..ec5266ee 100644 --- a/internal/task/group_builder.go +++ b/internal/task/group_builder.go @@ -7,19 +7,19 @@ import ( ) type taskCreator interface { - Create(spec CreateOptions) (*Task, error) + Create(spec Spec) (*Task, error) } -// newGroupWithDependencies constructs a graph from the given task specs. -func newGroupWithDependencies(svc taskCreator, cmd string, reverse bool, specs ...CreateOptions) (*Group, error) { - b := groupBuilder{ - g: NewEmptyGroup(cmd), - nodes: make(map[resource.ID]*groupBuilderNode), - tasks: svc, +// createDependentTasks creates tasks whilst respecting their modules' +// dependencies. +func createDependentTasks(svc taskCreator, reverse bool, specs ...Spec) ([]*Task, error) { + b := dependencyGraphBuilder{ + nodes: make(map[resource.ID]*dependencyGraphNode), + taskCreator: svc, } - // Build dependency graph. Each node in the graph is a module and the specs - // that belong to that module. Once the graph is built and dependencies are - // established, only then are tasks created from the specs. + // Build dependency graph. Each node in the graph is a module together with + // the specs that belong to that module. Once the graph is built and + // dependencies are established, only then are tasks created from the specs. // // Specs that don't belong to a module don't have any dependencies so tasks // are built from these specs immediately. @@ -28,7 +28,7 @@ func newGroupWithDependencies(svc taskCreator, cmd string, reverse bool, specs . modID := mod.GetID() node, ok := b.nodes[modID] if !ok { - node = &groupBuilderNode{module: mod} + node = &dependencyGraphNode{module: mod} } node.specs = append(node.specs, spec) b.nodes[modID] = node @@ -55,32 +55,33 @@ func newGroupWithDependencies(svc taskCreator, cmd string, reverse bool, specs . } } - if len(b.g.Tasks) == 0 { - return b.g, fmt.Errorf("failed to create all %d tasks; see logs", len(b.g.CreateErrors)) + if len(b.tasks) == 0 { + return nil, fmt.Errorf("failed to create all %d tasks; see logs", len(b.createErrors)) } - return b.g, nil + return b.tasks, nil } -// groupBuilder builds a task group. -type groupBuilder struct { - g *Group - tasks taskCreator - nodes map[resource.ID]*groupBuilderNode +// dependencyGraphBuilder builds a graph of dependencies +type dependencyGraphBuilder struct { + tasks []*Task + createErrors []error + nodes map[resource.ID]*dependencyGraphNode + + taskCreator } -// groupBuilderNode represents a group of task specs that belong to the same -// module. -type groupBuilderNode struct { +// dependencyGraphNode represents a module in a dependency graph +type dependencyGraphNode struct { module resource.Resource - specs []CreateOptions + specs []Spec created []resource.ID - in, out []*groupBuilderNode + in, out []*dependencyGraphNode visited bool tasksCreated bool } // visit nodes recursively, populating the in and out degrees. -func (b *groupBuilder) visit(id resource.ID, n *groupBuilderNode) { +func (b *dependencyGraphBuilder) visit(id resource.ID, n *dependencyGraphNode) { n.visited = true for _, id := range n.module.Dependencies() { @@ -94,7 +95,7 @@ func (b *groupBuilder) visit(id resource.ID, n *groupBuilderNode) { } } -func (b *groupBuilder) visitAndCreateTasks(n *groupBuilderNode) { +func (b *dependencyGraphBuilder) visitAndCreateTasks(n *dependencyGraphNode) { n.tasksCreated = true var dependsOn []resource.ID @@ -114,7 +115,7 @@ func (b *groupBuilder) visitAndCreateTasks(n *groupBuilderNode) { } } -func (b *groupBuilder) visitAndCreateTasksInReverse(n *groupBuilderNode) { +func (b *dependencyGraphBuilder) visitAndCreateTasksInReverse(n *dependencyGraphNode) { n.tasksCreated = true var dependsOn []resource.ID @@ -134,12 +135,12 @@ func (b *groupBuilder) visitAndCreateTasksInReverse(n *groupBuilderNode) { } } -func (b *groupBuilder) createTask(spec CreateOptions) *Task { - task, err := b.tasks.Create(spec) +func (b *dependencyGraphBuilder) createTask(spec Spec) *Task { + task, err := b.Create(spec) if err != nil { - b.g.CreateErrors = append(b.g.CreateErrors, err) + b.createErrors = append(b.createErrors, err) } else { - b.g.Tasks = append(b.g.Tasks, task) + b.tasks = append(b.tasks, task) } return task } diff --git a/internal/task/group_builder_test.go b/internal/task/group_builder_test.go index 3988ea29..5ce170ba 100644 --- a/internal/task/group_builder_test.go +++ b/internal/task/group_builder_test.go @@ -10,7 +10,7 @@ import ( type fakeTaskCreator struct{} -func (f *fakeTaskCreator) Create(spec CreateOptions) (*Task, error) { +func (f *fakeTaskCreator) Create(spec Spec) (*Task, error) { return (&factory{}).newTask(spec), nil } @@ -23,15 +23,15 @@ func TestNewGroupWithDependencies(t *testing.T) { frontend := resource.New(resource.Module, resource.GlobalResource).WithDependencies(backend.ID, vpc.ID) mq := resource.New(resource.Module, resource.GlobalResource) - vpcSpec := CreateOptions{Parent: vpc, Path: "vpc"} - mysqlSpec := CreateOptions{Parent: mysql, Path: "mysql"} - redisSpec := CreateOptions{Parent: redis, Path: "redis"} - backendSpec := CreateOptions{Parent: backend, Path: "backend"} - frontendSpec := CreateOptions{Parent: frontend, Path: "frontend"} - mqSpec := CreateOptions{Parent: mq, Path: "mq"} + vpcSpec := Spec{Parent: vpc, Path: "vpc"} + mysqlSpec := Spec{Parent: mysql, Path: "mysql"} + redisSpec := Spec{Parent: redis, Path: "redis"} + backendSpec := Spec{Parent: backend, Path: "backend"} + frontendSpec := Spec{Parent: frontend, Path: "frontend"} + mqSpec := Spec{Parent: mq, Path: "mq"} t.Run("normal order", func(t *testing.T) { - got, err := newGroupWithDependencies(&fakeTaskCreator{}, "apply", false, + got, err := createDependentTasks(&fakeTaskCreator{}, false, vpcSpec, mysqlSpec, redisSpec, @@ -41,18 +41,18 @@ func TestNewGroupWithDependencies(t *testing.T) { ) require.NoError(t, err) - if assert.Len(t, got.Tasks, 6) { - vpcTask := hasDependencies(t, got.Tasks, "vpc") // 0 dependencies - mysqlTask := hasDependencies(t, got.Tasks, "mysql", vpcTask) - redisTask := hasDependencies(t, got.Tasks, "redis", vpcTask) - backendTask := hasDependencies(t, got.Tasks, "backend", vpcTask, mysqlTask, redisTask) - _ = hasDependencies(t, got.Tasks, "frontend", vpcTask, backendTask) - _ = hasDependencies(t, got.Tasks, "mq") + if assert.Len(t, got, 6) { + vpcTask := hasDependencies(t, got, "vpc") // 0 dependencies + mysqlTask := hasDependencies(t, got, "mysql", vpcTask) + redisTask := hasDependencies(t, got, "redis", vpcTask) + backendTask := hasDependencies(t, got, "backend", vpcTask, mysqlTask, redisTask) + _ = hasDependencies(t, got, "frontend", vpcTask, backendTask) + _ = hasDependencies(t, got, "mq") } }) t.Run("reverse order", func(t *testing.T) { - got, err := newGroupWithDependencies(&fakeTaskCreator{}, "apply", true, + got, err := createDependentTasks(&fakeTaskCreator{}, true, vpcSpec, mysqlSpec, redisSpec, @@ -62,13 +62,13 @@ func TestNewGroupWithDependencies(t *testing.T) { ) require.NoError(t, err) - if assert.Len(t, got.Tasks, 6) { - frontendTask := hasDependencies(t, got.Tasks, "frontend") // 0 dependencies - backendTask := hasDependencies(t, got.Tasks, "backend", frontendTask) - mysqlTask := hasDependencies(t, got.Tasks, "mysql", backendTask) - redisTask := hasDependencies(t, got.Tasks, "redis", backendTask) - _ = hasDependencies(t, got.Tasks, "vpc", mysqlTask, redisTask, backendTask, frontendTask) - _ = hasDependencies(t, got.Tasks, "mq") + if assert.Len(t, got, 6) { + frontendTask := hasDependencies(t, got, "frontend") // 0 dependencies + backendTask := hasDependencies(t, got, "backend", frontendTask) + mysqlTask := hasDependencies(t, got, "mysql", backendTask) + redisTask := hasDependencies(t, got, "redis", backendTask) + _ = hasDependencies(t, got, "vpc", mysqlTask, redisTask, backendTask, frontendTask) + _ = hasDependencies(t, got, "mq") } }) } diff --git a/internal/task/service.go b/internal/task/service.go index 892d48a3..e038d2e2 100644 --- a/internal/task/service.go +++ b/internal/task/service.go @@ -2,7 +2,9 @@ package task import ( "errors" + "fmt" "slices" + "time" "github.com/leg100/pug/internal" "github.com/leg100/pug/internal/logging" @@ -59,8 +61,8 @@ func NewService(opts ServiceOptions) *Service { // Create a task. The task is placed into a pending state and requires enqueuing // before it'll be processed. -func (s *Service) Create(opts CreateOptions) (*Task, error) { - task := s.newTask(opts) +func (s *Service) Create(spec Spec) (*Task, error) { + task := s.newTask(spec) s.logger.Debug("created task", "task", task) @@ -69,8 +71,8 @@ func (s *Service) Create(opts CreateOptions) (*Task, error) { // Increment counter of number of live tasks *s.counter++ - if opts.AfterCreate != nil { - opts.AfterCreate(task) + if spec.AfterCreate != nil { + spec.AfterCreate(task) } wait := make(chan error, 1) @@ -83,43 +85,80 @@ func (s *Service) Create(opts CreateOptions) (*Task, error) { } s.logger.Debug("completed task", "task", task) }() - if opts.Wait { + if spec.Wait { return task, <-wait } return task, nil } -// CreateGroup creates a task group, creating tasks by invoking the provided -// func with each of the provided IDs. -func (s *Service) CreateGroup(cmd string, fn Func, ids ...resource.ID) (*Group, error) { - group, err := newGroup(cmd, fn, ids...) - if err != nil { - return nil, err - } - - s.logger.Debug("created task group", "group", group) - - // Add to db - s.AddGroup(group) - - return group, nil -} - -func (s *Service) CreateDependencyGroup(cmd string, reverse bool, opts ...CreateOptions) (*Group, error) { - if len(opts) == 0 { +// Create a task group from one or more task specs. An error is returned if zero +// specs are provided, or if it fails to create at least one task. +func (s *Service) CreateGroup(specs ...Spec) (*Group, error) { + if len(specs) == 0 { return nil, errors.New("no specs provided") } + g := &Group{ + Common: resource.New(resource.TaskGroup, resource.GlobalResource), + Created: time.Now(), + } + // Validate specifications. There are some settings that are incompatible + // with one another within a task group. + var ( + respectModuleDependencies *bool + inverseDependencyOrder *bool + ) + for _, spec := range specs { + // All RespectModuleDependencies settings must be the same + if respectModuleDependencies == nil { + respectModuleDependencies = &spec.RespectModuleDependencies + } else if *respectModuleDependencies != spec.RespectModuleDependencies { + return nil, fmt.Errorf("not all specs share same respect-module-dependencies setting") + } + // All InverseDependencyOrder settings must be the same + if inverseDependencyOrder == nil { + inverseDependencyOrder = &spec.InverseDependencyOrder + } else if *inverseDependencyOrder != spec.InverseDependencyOrder { + return nil, fmt.Errorf("not all specs share same inverse-dependency-order setting") + } + } + if *respectModuleDependencies { + tasks, err := createDependentTasks(s, *inverseDependencyOrder, specs...) + if err != nil { + return nil, err + } + g.Tasks = tasks + } else { + for _, spec := range specs { + task, err := s.Create(spec) + if err != nil { + g.CreateErrors = append(g.CreateErrors, err) + continue + } + g.Tasks = append(g.Tasks, task) + } + } + if len(g.Tasks) == 0 { + return g, errors.New("all tasks failed to be created") + } - group, err := newGroupWithDependencies(s, cmd, reverse, opts...) - if err != nil { - return nil, err + for _, task := range g.Tasks { + if g.Command == "" { + g.Command = task.String() + } else if g.Command != task.String() { + // Detected that not all tasks have the same command, so name the + // task group to reflect that multiple commands comprise the group. + // + // TODO: make a constant + g.Command = "multi" + } } - s.logger.Debug("created task group", "group", group) + + s.logger.Debug("created task group", "group", g) // Add to db - s.AddGroup(group) + s.AddGroup(g) - return group, nil + return g, nil } // AddGroup adds a task group to the DB. @@ -127,16 +166,6 @@ func (s *Service) AddGroup(group *Group) { s.groups.Add(group.ID, group) } -// Retry creates a new task that has all the properties of the task with the -// given ID. -func (s *Service) Retry(taskID resource.ID) (*Task, error) { - task, err := s.Get(taskID) - if err != nil { - return nil, err - } - return s.Create(task.createOptions) -} - // Enqueue moves the task onto the global queue for processing. func (s *Service) Enqueue(taskID resource.ID) (*Task, error) { task, err := s.tasks.Update(taskID, func(existing *Task) error { diff --git a/internal/task/spec.go b/internal/task/spec.go new file mode 100644 index 00000000..4b6d40a5 --- /dev/null +++ b/internal/task/spec.go @@ -0,0 +1,68 @@ +package task + +import "github.com/leg100/pug/internal/resource" + +// Spec is a specification for creating a task. +type Spec struct { + // Resource that the task belongs to. + Parent resource.Resource + // Program command and any sub commands, e.g. plan, state rm, etc. + Command []string + // Args to pass to program. + Args []string + // Path relative to the pug working directory in which to run the command. + Path string + // Environment variables. + Env []string + // A blocking task blocks other tasks from running on the module or + // workspace. + Blocking bool + // Globally exclusive task - at most only one such task can be running + Exclusive bool + // Set to true to indicate that the task produces JSON output + JSON bool + // Skip queue and immediately start task + Immediate bool + // Wait blocks until the task has finished + Wait bool + // DependsOn are other tasks that all must successfully exit before the + // task can be enqueued. If any of the other tasks are canceled or error + // then the task will be canceled. + DependsOn []resource.ID + // Description assigns an optional description to the task to display to the + // user, overriding the default of displaying the command. + Description string + // RespectModuleDependencies when true ensures the task respects its + // module's dependencies. i.e. if module A depends on module B, + // and a task is specified for both modules then the task for module A is + // only started once the task for module B has completed. This option + // only makes sense in the context of a task group, which constructs tasks + // from multiple specs. All specs must set RespectModuleDependencies to + // the same value otherwise an error is raised. + RespectModuleDependencies bool + // InverseDependencyOrder inverts the order of module dependencies, i.e. if + // module A depends on module B, then a task specified for module B will + // only be started once any tasks specified on module A have completed. This + // is useful when carrying out a `terraform destroy`. This option only takes + // effect when RespectModuleDependencies is true, and the spec is specified + // as part of a task group. All specs in the task group must set + // InverseDependencyOrder to the same value otherwise an error is raised. + InverseDependencyOrder bool + // Call this function after the task has successfully finished + AfterExited func(*Task) + // Call this function after the task is enqueued. + AfterQueued func(*Task) + // Call this function after the task starts running. + AfterRunning func(*Task) + // Call this function after the task fails with an error + AfterError func(*Task) + // Call this function after the task is successfully canceled + AfterCanceled func(*Task) + // Call this function after the task is successfully created + AfterCreate func(*Task) + // Call this function after the task terminates for whatever reason. + AfterFinish func(*Task) +} + +// SpecFunc is a function that creates a spec. +type SpecFunc func(resource.ID) (Spec, error) diff --git a/internal/task/task.go b/internal/task/task.go index fee48cd4..43fa0e5e 100644 --- a/internal/task/task.go +++ b/internal/task/task.go @@ -59,9 +59,9 @@ type Task struct { // and out of a status. timestamps map[Status]statusTimestamps - // Retain a copy of the options used to originally create the task so that + // Retain a copy of the Spec used to originally create the task so that // the task can be retried. - createOptions CreateOptions + Spec Spec // Call this function after the task has successfully finished AfterExited func(*Task) @@ -96,61 +96,16 @@ type factory struct { terragrunt bool } -type CreateOptions struct { - // Resource that the task belongs to. - Parent resource.Resource - // Program command and any sub commands, e.g. plan, state rm, etc. - Command []string - // Args to pass to program. - Args []string - // Path relative to the pug working directory in which to run the command. - Path string - // Environment variables. - Env []string - // A blocking task blocks other tasks from running on the module or - // workspace. - Blocking bool - // Globally exclusive task - at most only one such task can be running - Exclusive bool - // Set to true to indicate that the task produces JSON output - JSON bool - // Skip queue and immediately start task - Immediate bool - // Wait blocks until the task has finished - Wait bool - // DependsOn are other tasks that all must successfully exit before the - // task can be enqueued. If any of the other tasks are canceled or error - // then the task will be canceled. - DependsOn []resource.ID - // Description assigns an optional description to the task to display to the - // user, overriding the default of displaying the command. - Description string - // Call this function after the task has successfully finished - AfterExited func(*Task) - // Call this function after the task is enqueued. - AfterQueued func(*Task) - // Call this function after the task starts running. - AfterRunning func(*Task) - // Call this function after the task fails with an error - AfterError func(*Task) - // Call this function after the task is successfully canceled - AfterCanceled func(*Task) - // Call this function after the task is successfully created - AfterCreate func(*Task) - // Call this function after the task terminates for whatever reason. - AfterFinish func(*Task) -} - // TODO: check presence of mandatory options -func (f *factory) newTask(opts CreateOptions) *Task { +func (f *factory) newTask(spec Spec) *Task { // In terragrunt mode add default terragrunt flags - args := append(f.userArgs, opts.Args...) + args := append(f.userArgs, spec.Args...) if f.terragrunt { args = append(args, "--terragrunt-non-interactive") } return &Task{ - Common: resource.New(resource.Task, opts.Parent), + Common: resource.New(resource.Task, spec.Parent), State: Pending, Created: time.Now(), Updated: time.Now(), @@ -158,23 +113,23 @@ func (f *factory) newTask(opts CreateOptions) *Task { stdout: newBuffer(), combined: newBuffer(), program: f.program, - Command: opts.Command, - Path: filepath.Join(f.workdir.String(), opts.Path), + Command: spec.Command, + Path: filepath.Join(f.workdir.String(), spec.Path), Args: args, - AdditionalEnv: append(f.userEnvs, opts.Env...), - JSON: opts.JSON, - Blocking: opts.Blocking, - DependsOn: opts.DependsOn, - Immediate: opts.Immediate, - exclusive: opts.Exclusive, - description: opts.Description, - createOptions: opts, - AfterExited: opts.AfterExited, - AfterError: opts.AfterError, - AfterCanceled: opts.AfterCanceled, - AfterRunning: opts.AfterRunning, - AfterQueued: opts.AfterQueued, - AfterFinish: opts.AfterFinish, + AdditionalEnv: append(f.userEnvs, spec.Env...), + JSON: spec.JSON, + Blocking: spec.Blocking, + DependsOn: spec.DependsOn, + Immediate: spec.Immediate, + exclusive: spec.Exclusive, + description: spec.Description, + Spec: spec, + AfterExited: spec.AfterExited, + AfterError: spec.AfterError, + AfterCanceled: spec.AfterCanceled, + AfterRunning: spec.AfterRunning, + AfterQueued: spec.AfterQueued, + AfterFinish: spec.AfterFinish, // Publish an event whenever task state is updated afterUpdate: func(t *Task) { if f.publisher != nil { diff --git a/internal/task/task_test.go b/internal/task/task_test.go index d78df34f..9939aa8b 100644 --- a/internal/task/task_test.go +++ b/internal/task/task_test.go @@ -19,7 +19,7 @@ func TestTask_NewReader(t *testing.T) { program: "./testdata/task", publisher: &fakePublisher[*Task]{}, } - task := f.newTask(CreateOptions{}) + task := f.newTask(Spec{}) task.updateState(Queued) waitfn, err := task.start(context.Background()) require.NoError(t, err) @@ -53,7 +53,7 @@ func TestTask_cancel(t *testing.T) { program: "./testdata/killme", publisher: &fakePublisher[*Task]{}, } - task := f.newTask(CreateOptions{}) + task := f.newTask(Spec{}) task.updateState(Queued) done := make(chan struct{}) diff --git a/internal/testdata/configs/envs/staging/terraform.tfstate b/internal/testdata/configs/envs/staging/terraform.tfstate index 22ba13c4..8ef6cc09 100644 --- a/internal/testdata/configs/envs/staging/terraform.tfstate +++ b/internal/testdata/configs/envs/staging/terraform.tfstate @@ -1,6 +1,6 @@ { "version": 4, - "terraform_version": "1.6.0", + "terraform_version": "1.9.1", "serial": 1, "lineage": "a9b1d343-b395-8326-f452-8ea7f01e9454", "outputs": {}, diff --git a/internal/tui/helpers.go b/internal/tui/helpers.go index e39e795d..223adc5f 100644 --- a/internal/tui/helpers.go +++ b/internal/tui/helpers.go @@ -213,52 +213,65 @@ func (h *Helpers) GroupReport(group *task.Group, table bool) string { return s } -func (h *Helpers) CreateTasks(cmd string, fn task.Func, ids ...resource.ID) tea.Cmd { +// CreateTasks repeatedly invokes fn with each id in ids, creating a task for +// each invocation. If there is more than one id then a task group is created +// and the user sent to the task group's page; otherwise if only id is provided, +// the user is sent to the task's page. +func (h *Helpers) CreateTasks(fn task.SpecFunc, ids ...resource.ID) tea.Cmd { return func() tea.Msg { switch len(ids) { case 0: return nil case 1: - task, err := fn(ids[0]) + spec, err := fn(ids[0]) + if err != nil { + return ReportError(fmt.Errorf("creating task: %w", err)) + } + task, err := h.Tasks.Create(spec) if err != nil { return ReportError(fmt.Errorf("creating task: %w", err)) } return NewNavigationMsg(TaskKind, WithParent(task)) default: - group, err := h.Tasks.CreateGroup(cmd, fn, ids...) - if err != nil { - return ReportError(fmt.Errorf("creating task group: %w", err)) + specs := make([]task.Spec, 0, len(ids)) + for _, id := range ids { + spec, err := fn(id) + if err != nil { + h.Logger.Error("creating task spec", "error", err, "id", id) + continue + } + specs = append(specs, spec) } - return NewNavigationMsg(TaskGroupKind, WithParent(group)) + return h.createTaskGroup(specs...) } } } -func (h *Helpers) CreateApplyTasks(opts *run.CreateOptions, ids ...resource.ID) tea.Cmd { +func (h *Helpers) CreateTasksWithSpecs(specs ...task.Spec) tea.Cmd { return func() tea.Msg { - switch len(ids) { + switch len(specs) { case 0: return nil case 1: - // Only one task is to be created. If successful send user directly to task - // page. Otherwise report an error. - task, err := h.Runs.Apply(ids[0], opts) + task, err := h.Tasks.Create(specs[0]) if err != nil { - return ReportError(fmt.Errorf("creating apply task: %w", err)) + return ReportError(fmt.Errorf("creating task: %w", err)) } return NewNavigationMsg(TaskKind, WithParent(task)) default: - // More than one task is to be created. If successful send user to - // task group page. - group, err := h.Runs.MultiApply(opts, ids...) - if err != nil { - return ReportError(fmt.Errorf("creating apply task group: %w", err)) - } - return NewNavigationMsg(TaskGroupKind, WithParent(group)) + return h.createTaskGroup(specs...) } } } +func (h *Helpers) createTaskGroup(specs ...task.Spec) tea.Msg { + group, err := h.Tasks.CreateGroup(specs...) + if err != nil { + return ReportError(fmt.Errorf("creating task group: %w", err)) + } + return NewNavigationMsg(TaskGroupKind, WithParent(group)) +} + func (h *Helpers) Move(workspaceID resource.ID, from state.ResourceAddress) tea.Cmd { return CmdHandler(PromptMsg{ Prompt: "Enter destination address: ", @@ -267,10 +280,10 @@ func (h *Helpers) Move(workspaceID resource.ID, from state.ResourceAddress) tea. if v == "" { return nil } - fn := func(workspaceID resource.ID) (*task.Task, error) { + fn := func(workspaceID resource.ID) (task.Spec, error) { return h.States.Move(workspaceID, from, state.ResourceAddress(v)) } - return h.CreateTasks("state-mv", fn, workspaceID) + return h.CreateTasks(fn, workspaceID) }, Key: key.NewBinding(key.WithKeys("enter"), key.WithHelp("enter", "confirm")), Cancel: key.NewBinding(key.WithKeys("esc"), key.WithHelp("esc", "cancel")), diff --git a/internal/tui/module/list.go b/internal/tui/module/list.go index 2655e4f6..f7c17fa5 100644 --- a/internal/tui/module/list.go +++ b/internal/tui/module/list.go @@ -1,7 +1,6 @@ package module import ( - "errors" "fmt" "strings" @@ -130,16 +129,16 @@ func (m list) Update(msg tea.Msg) (tea.Model, tea.Cmd) { return m, tui.OpenEditor(row.Value.FullPath()) } case key.Matches(msg, keys.Common.Init): - cmd := m.helpers.CreateTasks("init", m.Modules.Init, m.table.SelectedOrCurrentIDs()...) + cmd := m.helpers.CreateTasks(m.Modules.Init, m.table.SelectedOrCurrentIDs()...) return m, cmd case key.Matches(msg, keys.Common.Validate): - cmd := m.helpers.CreateTasks("validate", m.Modules.Validate, m.table.SelectedOrCurrentIDs()...) + cmd := m.helpers.CreateTasks(m.Modules.Validate, m.table.SelectedOrCurrentIDs()...) return m, cmd case key.Matches(msg, keys.Common.Format): - cmd := m.helpers.CreateTasks("format", m.Modules.Format, m.table.SelectedOrCurrentIDs()...) + cmd := m.helpers.CreateTasks(m.Modules.Format, m.table.SelectedOrCurrentIDs()...) return m, cmd case key.Matches(msg, localKeys.ReloadWorkspaces): - cmd := m.helpers.CreateTasks("reload-workspace", m.Workspaces.Reload, m.table.SelectedOrCurrentIDs()...) + cmd := m.helpers.CreateTasks(m.Workspaces.Reload, m.table.SelectedOrCurrentIDs()...) return m, cmd case key.Matches(msg, keys.Common.State): if row, ok := m.table.CurrentRow(); ok { @@ -151,27 +150,43 @@ func (m list) Update(msg tea.Msg) (tea.Model, tea.Cmd) { createRunOptions.Destroy = true fallthrough case key.Matches(msg, keys.Common.Plan): - workspaceIDs, err := m.pruneModulesWithoutCurrentWorkspace() + // Create specs here, de-selecting any modules where an error is + // returned. + specs, err := m.table.Prune(func(mod *module.Module) (task.Spec, error) { + if workspaceID := mod.CurrentWorkspaceID; workspaceID == nil { + return task.Spec{}, fmt.Errorf("module %s does not have a current workspace", mod) + } else { + return m.Runs.Plan(*workspaceID, createRunOptions) + } + }) if err != nil { - return m, tui.ReportError(fmt.Errorf("deselected items: %w", err)) - } - fn := func(workspaceID resource.ID) (*task.Task, error) { - return m.Runs.Plan(workspaceID, createRunOptions) + // Modules were de-selected, so report error and give user + // another opportunity to plan any remaining modules. + return m, tui.ReportError(err) } - desc := run.PlanTaskDescription(createRunOptions.Destroy) - return m, m.helpers.CreateTasks(desc, fn, workspaceIDs...) + return m, m.helpers.CreateTasksWithSpecs(specs...) case key.Matches(msg, keys.Common.Destroy): createRunOptions.Destroy = true applyPrompt = "Destroy resources of %d modules?" fallthrough case key.Matches(msg, keys.Common.Apply): - workspaceIDs, err := m.pruneModulesWithoutCurrentWorkspace() + // Create specs here, de-selecting any modules where an error is + // returned. + specs, err := m.table.Prune(func(mod *module.Module) (task.Spec, error) { + if workspaceID := mod.CurrentWorkspaceID; workspaceID == nil { + return task.Spec{}, fmt.Errorf("module %s does not have a current workspace", mod) + } else { + return m.Runs.Apply(*workspaceID, &createRunOptions) + } + }) if err != nil { - return m, tui.ReportError(fmt.Errorf("deselected items: %w", err)) + // Modules were de-selected, so report error and give user + // another opportunity to apply any remaining modules. + return m, tui.ReportError(err) } return m, tui.YesNoPrompt( - fmt.Sprintf(applyPrompt, len(workspaceIDs)), - m.helpers.CreateApplyTasks(&createRunOptions, workspaceIDs...), + fmt.Sprintf(applyPrompt, len(specs)), + m.helpers.CreateTasksWithSpecs(specs...), ) } } @@ -181,19 +196,6 @@ func (m list) Update(msg tea.Msg) (tea.Model, tea.Cmd) { return m, tea.Batch(cmds...) } -func (m *list) pruneModulesWithoutCurrentWorkspace() ([]resource.ID, error) { - workspaceIDs, err := m.table.Prune(func(mod *module.Module) (resource.ID, bool) { - if workspaceID := mod.CurrentWorkspaceID; workspaceID != nil { - return *workspaceID, false - } - return resource.ID{}, true - }) - if err != nil { - return nil, errors.New("module(s) do not have a current workspace") - } - return workspaceIDs, nil -} - func (m list) Title() string { return tui.Breadcrumbs("Modules", resource.GlobalResource) } diff --git a/internal/tui/services.go b/internal/tui/services.go index b5b91ab4..82a60745 100644 --- a/internal/tui/services.go +++ b/internal/tui/services.go @@ -13,43 +13,40 @@ type ModuleService interface { Get(id resource.ID) (*module.Module, error) List() []*module.Module Reload() ([]string, []string, error) - Init(moduleID resource.ID) (*task.Task, error) - Format(moduleID resource.ID) (*task.Task, error) - Validate(moduleID resource.ID) (*task.Task, error) + Init(moduleID resource.ID) (task.Spec, error) + Format(moduleID resource.ID) (task.Spec, error) + Validate(moduleID resource.ID) (task.Spec, error) SetCurrent(moduleID, workspaceID resource.ID) error } type WorkspaceService interface { - Reload(moduleID resource.ID) (*task.Task, error) + Reload(moduleID resource.ID) (task.Spec, error) Get(id resource.ID) (*workspace.Workspace, error) List(opts workspace.ListOptions) []*workspace.Workspace SelectWorkspace(moduleID, workspaceID resource.ID) error - Delete(id resource.ID) (*task.Task, error) + Delete(id resource.ID) (task.Spec, error) } type StateService interface { - Reload(workspaceID resource.ID) (*task.Task, error) + Reload(workspaceID resource.ID) (task.Spec, error) Get(workspaceID resource.ID) (*state.State, error) GetResource(resourceID resource.ID) (*state.Resource, error) - Delete(workspaceID resource.ID, addrs ...state.ResourceAddress) (*task.Task, error) - Taint(workspaceID resource.ID, addr state.ResourceAddress) (*task.Task, error) - Untaint(workspaceID resource.ID, addr state.ResourceAddress) (*task.Task, error) - Move(workspaceID resource.ID, src, dest state.ResourceAddress) (*task.Task, error) + Delete(workspaceID resource.ID, addrs ...state.ResourceAddress) (task.Spec, error) + Taint(workspaceID resource.ID, addr state.ResourceAddress) (task.Spec, error) + Untaint(workspaceID resource.ID, addr state.ResourceAddress) (task.Spec, error) + Move(workspaceID resource.ID, src, dest state.ResourceAddress) (task.Spec, error) } type RunService interface { Get(id resource.ID) (*run.Run, error) List(opts run.ListOptions) []*run.Run - Plan(workspaceID resource.ID, opts run.CreateOptions) (*task.Task, error) - Apply(id resource.ID, opts *run.CreateOptions) (*task.Task, error) - MultiApply(opts *run.CreateOptions, ids ...resource.ID) (*task.Group, error) + Plan(workspaceID resource.ID, opts run.CreateOptions) (task.Spec, error) + Apply(id resource.ID, opts *run.CreateOptions) (task.Spec, error) } type TaskService interface { - Create(opts task.CreateOptions) (*task.Task, error) - CreateGroup(cmd string, fn task.Func, ids ...resource.ID) (*task.Group, error) - CreateDependencyGroup(cmd string, reverse bool, opts ...task.CreateOptions) (*task.Group, error) - Retry(taskID resource.ID) (*task.Task, error) + Create(opts task.Spec) (*task.Task, error) + CreateGroup(specs ...task.Spec) (*task.Group, error) Counter() int Get(taskID resource.ID) (*task.Task, error) GetGroup(groupID resource.ID) (*task.Group, error) diff --git a/internal/tui/table/table.go b/internal/tui/table/table.go index 1d5d8b74..62f3a066 100644 --- a/internal/tui/table/table.go +++ b/internal/tui/table/table.go @@ -16,6 +16,7 @@ import ( "github.com/leg100/go-runewidth" "github.com/leg100/pug/internal" "github.com/leg100/pug/internal/resource" + "github.com/leg100/pug/internal/task" "github.com/leg100/pug/internal/tui" "github.com/leg100/pug/internal/tui/keys" "golang.org/x/exp/maps" @@ -689,36 +690,36 @@ func (m *Model[V]) renderRow(rowIdx int) string { // it returns false then the resulting id is returned. // // If there are no rows in the table then a nil error is returned. -func (m *Model[V]) Prune(fn func(value V) (resource.ID, bool)) ([]resource.ID, error) { +func (m *Model[V]) Prune(fn func(value V) (task.Spec, error)) ([]task.Spec, error) { rows := m.SelectedOrCurrent() switch len(rows) { case 0: return nil, errors.New("no rows in table") case 1: // current row, no selections - id, prune := fn(rows[0].Value) - if prune { + spec, err := fn(rows[0].Value) + if err != nil { // the single current row is to be pruned, so report this as an // error return nil, fmt.Errorf("action is not applicable to the current row") } - return []resource.ID{id}, nil + return []task.Spec{spec}, nil default: // one or more selections: iterate thru and prune accordingly. var ( - ids []resource.ID + ids []task.Spec before = len(m.Selected) pruned int ) for k, v := range m.Selected { - id, prune := fn(v) - if prune { + spec, err := fn(v) + if err != nil { // De-select m.ToggleSelectionByID(k) pruned++ continue } - ids = append(ids, id) + ids = append(ids, spec) } switch { case len(ids) == 0: diff --git a/internal/tui/task/list.go b/internal/tui/task/list.go index 5ed9fbdf..2102c078 100644 --- a/internal/tui/task/list.go +++ b/internal/tui/task/list.go @@ -9,7 +9,7 @@ import ( tea "github.com/charmbracelet/bubbletea" "github.com/leg100/pug/internal/resource" runpkg "github.com/leg100/pug/internal/run" - "github.com/leg100/pug/internal/task" + taskpkg "github.com/leg100/pug/internal/task" "github.com/leg100/pug/internal/tui" "github.com/leg100/pug/internal/tui/keys" "github.com/leg100/pug/internal/tui/split" @@ -30,7 +30,7 @@ var ( statusColumn = table.Column{ Key: "task_status", Title: "STATUS", - Width: task.MaxStatusLen, + Width: taskpkg.MaxStatusLen, } ageColumn = table.Column{ Key: "age", @@ -83,7 +83,7 @@ func (mm *ListMaker) Make(_ resource.ID, width, height int) (tea.Model, error) { ageColumn, } - renderer := func(t *task.Task) table.RenderedRow { + renderer := func(t *taskpkg.Task) table.RenderedRow { row := table.RenderedRow{ taskIDColumn.Key: t.ID.String(), table.ModuleColumn.Key: mm.Helpers.ModulePath(t), @@ -105,10 +105,10 @@ func (mm *ListMaker) Make(_ resource.ID, width, height int) (tea.Model, error) { return row } - splitModel := split.New(split.Options[*task.Task]{ + splitModel := split.New(split.Options[*taskpkg.Task]{ Columns: columns, Renderer: renderer, - TableOptions: []table.Option[*task.Task]{table.WithSortFunc(task.ByState)}, + TableOptions: []table.Option[*taskpkg.Task]{table.WithSortFunc(taskpkg.ByState)}, Width: width, Height: height, Maker: mm.TaskMaker, @@ -123,7 +123,7 @@ func (mm *ListMaker) Make(_ resource.ID, width, height int) (tea.Model, error) { } type List struct { - split.Model[*task.Task] + split.Model[*taskpkg.Task] runs tui.RunService tasks tui.TaskService @@ -132,8 +132,8 @@ type List struct { func (m List) Init() tea.Cmd { return func() tea.Msg { - tasks := m.tasks.List(task.ListOptions{}) - return table.BulkInsertMsg[*task.Task](tasks) + tasks := m.tasks.List(taskpkg.ListOptions{}) + return table.BulkInsertMsg[*taskpkg.Task](tasks) } } @@ -149,13 +149,20 @@ func (m List) Update(msg tea.Msg) (tea.Model, tea.Cmd) { return m, tui.NavigateTo(tui.TaskKind, tui.WithParent(row.Value)) } case key.Matches(msg, keys.Common.Apply): - runIDs, err := m.pruneApplyableTasks() + specs, err := m.Table.Prune(func(task *taskpkg.Task) (taskpkg.Spec, error) { + // Task must belong to a run in order to be applied. + res := task.Run() + if res == nil { + return taskpkg.Spec{}, errors.New("task does not belong to a run") + } + return m.runs.Apply(res.GetID(), nil) + }) if err != nil { return m, tui.ReportError(fmt.Errorf("applying tasks: %w", err)) } return m, tui.YesNoPrompt( - fmt.Sprintf("Apply %d plans?", len(runIDs)), - m.helpers.CreateApplyTasks(nil, runIDs...), + fmt.Sprintf("Apply %d plans?", len(specs)), + m.helpers.CreateTasksWithSpecs(specs...), ) case key.Matches(msg, keys.Common.State): if row, ok := m.Table.CurrentRow(); ok { @@ -166,22 +173,14 @@ func (m List) Update(msg tea.Msg) (tea.Model, tea.Cmd) { } } case key.Matches(msg, keys.Common.Retry): - // If all tasks have the same stringified identifier (which reveals - // the underlying command), then use that. Otherwise use the string - // "multi". - var cmd string - for _, task := range m.Table.SelectedOrCurrent() { - if cmd == "" { - cmd = task.Value.String() - } else if cmd != task.Value.String() { - cmd = "multi" - break - } + rows := m.Table.SelectedOrCurrent() + specs := make([]taskpkg.Spec, len(rows)) + for i, row := range rows { + specs[i] = row.Value.Spec } - taskIDs := m.Table.SelectedOrCurrentIDs() return m, tui.YesNoPrompt( - fmt.Sprintf("Retry %d tasks?", len(taskIDs)), - m.helpers.CreateTasks(cmd, m.tasks.Retry, taskIDs...), + fmt.Sprintf("Retry %d tasks?", len(rows)), + m.helpers.CreateTasksWithSpecs(specs...), ) } } @@ -204,20 +203,3 @@ func (m List) HelpBindings() []key.Binding { } return append(bindings, keys.KeyMapToSlice(split.Keys)...) } - -// pruneApplyableTasks removes from the selection any tasks that cannot be -// applied, i.e all tasks other than those that are a plan and are in the -// planned state. The run ID of each task after pruning is returned. -func (m *List) pruneApplyableTasks() ([]resource.ID, error) { - return m.Table.Prune(func(task *task.Task) (resource.ID, bool) { - rr := task.Run() - if rr == nil { - return resource.ID{}, true - } - run := rr.(*runpkg.Run) - if run.Status != runpkg.Planned { - return resource.ID{}, true - } - return run.ID, false - }) -} diff --git a/internal/tui/task/model.go b/internal/tui/task/model.go index c1ab0dbb..9d11fff8 100644 --- a/internal/tui/task/model.go +++ b/internal/tui/task/model.go @@ -139,13 +139,13 @@ func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) { return m, cancel(m.tasks, m.task.ID) case key.Matches(msg, keys.Common.Apply): if m.run != nil { - // Only trigger an apply if run is in the planned state - if m.run.Status != run.Planned { - return m, nil + spec, err := m.runs.Apply(m.run.ID, nil) + if err != nil { + return m, tui.ReportError(fmt.Errorf("create apply task: %w", err)) } return m, tui.YesNoPrompt( "Apply plan?", - m.helpers.CreateApplyTasks(nil, m.run.ID), + m.helpers.CreateTasksWithSpecs(spec), ) } case key.Matches(msg, keys.Common.State): @@ -157,7 +157,7 @@ func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) { case key.Matches(msg, keys.Common.Retry): return m, tui.YesNoPrompt( "Retry task?", - m.helpers.CreateTasks("retry", m.tasks.Retry, m.task.ID), + m.helpers.CreateTasksWithSpecs(m.task.Spec), ) } case toggleAutoscrollMsg: diff --git a/internal/tui/workspace/list.go b/internal/tui/workspace/list.go index 26387f08..e8f96627 100644 --- a/internal/tui/workspace/list.go +++ b/internal/tui/workspace/list.go @@ -101,16 +101,16 @@ func (m list) Update(msg tea.Msg) (tea.Model, tea.Cmd) { } return m, tui.YesNoPrompt( fmt.Sprintf("Delete %d workspace(s)?", len(workspaceIDs)), - m.helpers.CreateTasks("delete-workspace", m.Workspaces.Delete, workspaceIDs...), + m.helpers.CreateTasks(m.Workspaces.Delete, workspaceIDs...), ) case key.Matches(msg, keys.Common.Init): - cmd := m.helpers.CreateTasks("init", m.Modules.Init, m.selectedOrCurrentModuleIDs()...) + cmd := m.helpers.CreateTasks(m.Modules.Init, m.selectedOrCurrentModuleIDs()...) return m, cmd case key.Matches(msg, keys.Common.Format): - cmd := m.helpers.CreateTasks("format", m.Modules.Format, m.selectedOrCurrentModuleIDs()...) + cmd := m.helpers.CreateTasks(m.Modules.Format, m.selectedOrCurrentModuleIDs()...) return m, cmd case key.Matches(msg, keys.Common.Validate): - cmd := m.helpers.CreateTasks("validate", m.Modules.Validate, m.selectedOrCurrentModuleIDs()...) + cmd := m.helpers.CreateTasks(m.Modules.Validate, m.selectedOrCurrentModuleIDs()...) return m, cmd case key.Matches(msg, localKeys.SetCurrent): if row, ok := m.table.CurrentRow(); ok { @@ -126,20 +126,22 @@ func (m list) Update(msg tea.Msg) (tea.Model, tea.Cmd) { fallthrough case key.Matches(msg, keys.Common.Plan): workspaceIDs := m.table.SelectedOrCurrentIDs() - fn := func(workspaceID resource.ID) (*task.Task, error) { + fn := func(workspaceID resource.ID) (task.Spec, error) { return m.Runs.Plan(workspaceID, createRunOptions) } - desc := run.PlanTaskDescription(createRunOptions.Destroy) - return m, m.helpers.CreateTasks(desc, fn, workspaceIDs...) + return m, m.helpers.CreateTasks(fn, workspaceIDs...) case key.Matches(msg, keys.Common.Destroy): createRunOptions.Destroy = true applyPrompt = "Destroy resources of %d workspaces?" fallthrough case key.Matches(msg, keys.Common.Apply): workspaceIDs := m.table.SelectedOrCurrentIDs() + fn := func(workspaceID resource.ID) (task.Spec, error) { + return m.Runs.Apply(workspaceID, &createRunOptions) + } return m, tui.YesNoPrompt( fmt.Sprintf(applyPrompt, len(workspaceIDs)), - m.helpers.CreateApplyTasks(&createRunOptions, workspaceIDs...), + m.helpers.CreateTasks(fn, workspaceIDs...), ) case key.Matches(msg, keys.Common.State): if row, ok := m.table.CurrentRow(); ok { diff --git a/internal/tui/workspace/resource.go b/internal/tui/workspace/resource.go index 7e7d8ab4..a74569e6 100644 --- a/internal/tui/workspace/resource.go +++ b/internal/tui/workspace/resource.go @@ -74,24 +74,24 @@ func (m resourceModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) { case tea.KeyMsg: switch { case key.Matches(msg, resourcesKeys.Taint): - fn := func(workspaceID resource.ID) (*task.Task, error) { + fn := func(workspaceID resource.ID) (task.Spec, error) { return m.states.Taint(workspaceID, m.resource.Address) } - return m, m.helpers.CreateTasks("taint", fn, m.resource.Workspace().GetID()) + return m, m.helpers.CreateTasks(fn, m.resource.Workspace().GetID()) case key.Matches(msg, resourcesKeys.Untaint): - fn := func(workspaceID resource.ID) (*task.Task, error) { + fn := func(workspaceID resource.ID) (task.Spec, error) { return m.states.Untaint(workspaceID, m.resource.Address) } - return m, m.helpers.CreateTasks("untaint", fn, m.resource.Workspace().GetID()) + return m, m.helpers.CreateTasks(fn, m.resource.Workspace().GetID()) case key.Matches(msg, resourcesKeys.Move): return m, m.helpers.Move(m.resource.Workspace().GetID(), m.resource.Address) case key.Matches(msg, keys.Common.Delete): - fn := func(workspaceID resource.ID) (*task.Task, error) { + fn := func(workspaceID resource.ID) (task.Spec, error) { return m.states.Delete(workspaceID, m.resource.Address) } return m, tui.YesNoPrompt( "Delete resource?", - m.helpers.CreateTasks("state-rm", fn, m.resource.Workspace().GetID()), + m.helpers.CreateTasks(fn, m.resource.Workspace().GetID()), ) case key.Matches(msg, keys.Common.PlanDestroy): // Create a targeted destroy plan. @@ -100,10 +100,10 @@ func (m resourceModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) { case key.Matches(msg, keys.Common.Plan): // Create a targeted plan. createRunOptions.TargetAddrs = []state.ResourceAddress{m.resource.Address} - fn := func(workspaceID resource.ID) (*task.Task, error) { + fn := func(workspaceID resource.ID) (task.Spec, error) { return m.runs.Plan(workspaceID, createRunOptions) } - return m, m.helpers.CreateTasks("plan", fn, m.resource.Workspace().GetID()) + return m, m.helpers.CreateTasks(fn, m.resource.Workspace().GetID()) } case tea.WindowSizeMsg: m.viewport.SetDimensions(m.viewportWidth(msg.Width), m.viewportHeight(msg.Height)) diff --git a/internal/tui/workspace/resource_list.go b/internal/tui/workspace/resource_list.go index 0f0cfe05..7569130a 100644 --- a/internal/tui/workspace/resource_list.go +++ b/internal/tui/workspace/resource_list.go @@ -135,10 +135,15 @@ func (m resourceList) Update(msg tea.Msg) (tea.Model, tea.Cmd) { m.reloading = true return m, func() tea.Msg { msg := reloadedMsg{workspaceID: m.workspace.GetID()} - if task, err := m.states.Reload(msg.workspaceID); err != nil { - msg.err = err - } else if err := task.Wait(); err != nil { + if spec, err := m.states.Reload(msg.workspaceID); err != nil { msg.err = err + } else { + task, err := m.helpers.Tasks.Create(spec) + if err != nil { + msg.err = err + } else if err := task.Wait(); err != nil { + msg.err = err + } } return msg } @@ -148,19 +153,19 @@ func (m resourceList) Update(msg tea.Msg) (tea.Model, tea.Cmd) { // no rows; do nothing return m, nil } - fn := func(workspaceID resource.ID) (*task.Task, error) { + fn := func(workspaceID resource.ID) (task.Spec, error) { return m.states.Delete(workspaceID, addrs...) } return m, tui.YesNoPrompt( fmt.Sprintf("Delete %d resource(s)?", len(addrs)), - m.helpers.CreateTasks("state-rm", fn, m.workspace.GetID()), + m.helpers.CreateTasks(fn, m.workspace.GetID()), ) case key.Matches(msg, resourcesKeys.Taint): addrs := m.selectedOrCurrentAddresses() - return m, m.createStateCommand("taint", m.states.Taint, addrs...) + return m, m.createStateCommand(m.states.Taint, addrs...) case key.Matches(msg, resourcesKeys.Untaint): addrs := m.selectedOrCurrentAddresses() - return m, m.createStateCommand("untaint", m.states.Untaint, addrs...) + return m, m.createStateCommand(m.states.Untaint, addrs...) case key.Matches(msg, resourcesKeys.Move): if row, ok := m.Table.CurrentRow(); ok { from := row.Value.Address @@ -175,10 +180,10 @@ func (m resourceList) Update(msg tea.Msg) (tea.Model, tea.Cmd) { createRunOptions.TargetAddrs = m.selectedOrCurrentAddresses() // NOTE: even if the user hasn't selected any rows, we still proceed // to create a run without targeted resources. - fn := func(workspaceID resource.ID) (*task.Task, error) { + fn := func(workspaceID resource.ID) (task.Spec, error) { return m.runs.Plan(workspaceID, createRunOptions) } - return m, m.helpers.CreateTasks("plan", fn, m.workspace.GetID()) + return m, m.helpers.CreateTasks(fn, m.workspace.GetID()) } case initState: if msg.WorkspaceID != m.workspace.GetID() { diff --git a/internal/tui/workspace/state_func.go b/internal/tui/workspace/state_func.go index 8d97b3be..bf98460d 100644 --- a/internal/tui/workspace/state_func.go +++ b/internal/tui/workspace/state_func.go @@ -7,16 +7,16 @@ import ( "github.com/leg100/pug/internal/task" ) -type stateFunc func(workspaceID resource.ID, addr state.ResourceAddress) (*task.Task, error) +type stateFunc func(workspaceID resource.ID, addr state.ResourceAddress) (task.Spec, error) -func (m resourceList) createStateCommand(name string, fn stateFunc, addrs ...state.ResourceAddress) tea.Cmd { +func (m resourceList) createStateCommand(fn stateFunc, addrs ...state.ResourceAddress) tea.Cmd { // Make N copies of the workspace ID where N is the number of addresses workspaceIDs := make([]resource.ID, len(addrs)) for i := range workspaceIDs { workspaceIDs[i] = m.workspace.GetID() } f := newStateTaskFunc(fn, addrs...) - return m.helpers.CreateTasks(name, f.createTask, workspaceIDs...) + return m.helpers.CreateTasks(f.createTask, workspaceIDs...) } func newStateTaskFunc(fn stateFunc, addrs ...state.ResourceAddress) *stateTaskFunc { @@ -32,7 +32,7 @@ type stateTaskFunc struct { i int } -func (f *stateTaskFunc) createTask(workspaceID resource.ID) (*task.Task, error) { +func (f *stateTaskFunc) createTask(workspaceID resource.ID) (task.Spec, error) { t, err := f.fn(workspaceID, f.addrs[f.i]) f.i++ return t, err diff --git a/internal/workspace/service.go b/internal/workspace/service.go index 78d2c54b..4b34145e 100644 --- a/internal/workspace/service.go +++ b/internal/workspace/service.go @@ -44,7 +44,6 @@ type modules interface { SetCurrent(moduleID, workspaceID resource.ID) error Reload() ([]string, []string, error) List() []*module.Module - CreateTask(mod *module.Module, opts task.CreateOptions) (*task.Task, error) } type moduleSubscription interface { @@ -73,32 +72,47 @@ func NewService(opts ServiceOptions) *Service { func (s *Service) LoadWorkspacesUponModuleLoad(modules moduleSubscription) { sub := modules.Subscribe() + reload := func(moduleID resource.ID) error { + spec, err := s.Reload(moduleID) + if err != nil { + return err + } + _, err = s.tasks.Create(spec) + return err + } + go func() { for event := range sub { switch event.Type { case resource.CreatedEvent: - s.Reload(event.Payload.ID) + if err := reload(event.Payload.ID); err != nil { + s.logger.Error("reloading workspaces", "module", event.Payload) + } case resource.UpdatedEvent: if event.Payload.CurrentWorkspaceID != nil { // Module already has a current workspace; no need to reload // workspaces continue } - s.Reload(event.Payload.ID) + if err := reload(event.Payload.ID); err != nil { + s.logger.Error("reloading workspaces", "module", event.Payload) + } } } }() } -// Reload invokes `terraform workspace list` on a module and updates pug with -// the results, adding any newly discovered workspaces and pruning any -// workspaces no longer found to exist. -func (s *Service) Reload(moduleID resource.ID) (*task.Task, error) { +// Reload returns a task spec that runs `terraform workspace list` on a +// module and updates pug with the results, adding any newly discovered +// workspaces and pruning any workspaces no longer found to exist. +func (s *Service) Reload(moduleID resource.ID) (task.Spec, error) { mod, err := s.modules.Get(moduleID) if err != nil { - return nil, err + return task.Spec{}, err } - task, err := s.modules.CreateTask(mod, task.CreateOptions{ + return task.Spec{ + Parent: mod, + Path: mod.Path, Command: []string{"workspace", "list"}, AfterError: func(t *task.Task) { s.logger.Error("reloading workspaces", "error", t.Err, "module", mod, "task", t) @@ -116,12 +130,7 @@ func (s *Service) Reload(moduleID resource.ID) (*task.Task, error) { } s.logger.Info("reloaded workspaces", "added", added, "removed", removed, "module", mod) }, - }) - if err != nil { - s.logger.Error("reloading workspaces", "error", err, "module", mod) - return nil, err - } - return task, nil + }, nil } // resetWorkspaces resets the workspaces for a module, adding newly discovered @@ -195,17 +204,18 @@ func parseList(r io.Reader) (list []string, current string, err error) { } // Create a workspace. Asynchronous. -func (s *Service) Create(path, name string) (*Workspace, *task.Task, error) { +func (s *Service) Create(path, name string) (task.Spec, error) { mod, err := s.modules.GetByPath(path) if err != nil { - return nil, nil, fmt.Errorf("checking for module: %s: %w", path, err) + return task.Spec{}, err } ws, err := New(mod, name) if err != nil { - return nil, nil, err + return task.Spec{}, err } - - task, err := s.createTask(ws, task.CreateOptions{ + return task.Spec{ + Parent: mod, + Path: mod.Path, Command: []string{"workspace", "new"}, Args: []string{name}, AfterExited: func(*task.Task) { @@ -216,11 +226,7 @@ func (s *Service) Create(path, name string) (*Workspace, *task.Task, error) { s.logger.Error("creating workspace: %w", err) } }, - }) - if err != nil { - return nil, nil, err - } - return ws, task, nil + }, nil } func (s *Service) Get(workspaceID resource.ID) (*Workspace, error) { @@ -284,18 +290,22 @@ func (s *Service) selectWorkspace(moduleID, workspaceID resource.ID) error { if err != nil { return err } + mod, err := s.modules.Get(ws.ModuleID()) + if err != nil { + return err + } // Create task to immediately set workspace as current workspace for module. - task, err := s.createTask(ws, task.CreateOptions{ + _, err = s.tasks.Create(task.Spec{ + Parent: mod, + Path: mod.Path, Command: []string{"workspace", "select"}, Args: []string{ws.Name}, Immediate: true, + Wait: true, }) if err != nil { return err } - if err := task.Wait(); err != nil { - return err - } // Now task has finished successfully, update the current workspace in pug // as well. if err := s.modules.SetCurrent(moduleID, workspaceID); err != nil { @@ -305,30 +315,23 @@ func (s *Service) selectWorkspace(moduleID, workspaceID resource.ID) error { } // Delete a workspace. Asynchronous. -func (s *Service) Delete(id resource.ID) (*task.Task, error) { - ws, err := s.table.Get(id) +func (s *Service) Delete(workspaceID resource.ID) (task.Spec, error) { + ws, err := s.table.Get(workspaceID) + if err != nil { + return task.Spec{}, fmt.Errorf("deleting workspace: %w", err) + } + mod, err := s.modules.Get(ws.ModuleID()) if err != nil { - return nil, fmt.Errorf("deleting workspace: %w", err) + return task.Spec{}, err } - return s.createTask(ws, task.CreateOptions{ + return task.Spec{ + Parent: mod, + Path: mod.Path, Command: []string{"workspace", "delete"}, Args: []string{ws.Name}, Blocking: true, AfterExited: func(*task.Task) { s.table.Delete(ws.ID) }, - }) -} - -// TODO: move this logic into task.Create -func (s *Service) createTask(ws *Workspace, opts task.CreateOptions) (*task.Task, error) { - opts.Parent = ws - - mod, err := s.modules.Get(ws.ModuleID()) - if err != nil { - return nil, err - } - opts.Path = mod.Path - - return s.tasks.Create(opts) + }, nil } diff --git a/internal/workspace/workspace.go b/internal/workspace/workspace.go index 94676126..71428fc9 100644 --- a/internal/workspace/workspace.go +++ b/internal/workspace/workspace.go @@ -43,7 +43,7 @@ func (ws *Workspace) ModulePath() string { } func (ws *Workspace) TerraformEnv() string { - return fmt.Sprintf("TF_WORKSPACE=%s", ws.Name) + return TerraformEnv(ws.Name) } func (ws *Workspace) LogValue() slog.Value { @@ -51,3 +51,7 @@ func (ws *Workspace) LogValue() slog.Value { slog.String("name", ws.Name), ) } + +func TerraformEnv(workspaceName string) string { + return fmt.Sprintf("TF_WORKSPACE=%s", workspaceName) +}