From a4de284391f1aece872b86efe06f0661d0a161fe Mon Sep 17 00:00:00 2001 From: Yusuke Sakurai Date: Wed, 26 Jun 2024 12:01:56 +0900 Subject: [PATCH] a --- canary_task.go | 45 ++++++++++++++++++++++++++++----------------- canary_task_test.go | 45 +++++++++++++++++++++++++++++++++++++++++++++ rollout.go | 19 ++++++++++++------- timeout/timeout.go | 2 ++ 4 files changed, 87 insertions(+), 24 deletions(-) create mode 100644 canary_task_test.go diff --git a/canary_task.go b/canary_task.go index 723bc5c..502085a 100644 --- a/canary_task.go +++ b/canary_task.go @@ -26,12 +26,23 @@ type CanaryTarget struct { type CanaryTask struct { *cage - td *ecstypes.TaskDefinition - lb *ecstypes.LoadBalancer - networkConfiguration *ecstypes.NetworkConfiguration - platformVersion *string - taskArn *string - target *CanaryTarget + *CanaryTaskInput + taskArn *string + target *CanaryTarget +} + +type CanaryTaskInput struct { + TaskDefinition *ecstypes.TaskDefinition + LoadBalancer *ecstypes.LoadBalancer + NetworkConfiguration *ecstypes.NetworkConfiguration + PlatformVersion *string +} + +func NewCanaryTask(c *cage, input *CanaryTaskInput) *CanaryTask { + return &CanaryTask{ + cage: c, + CanaryTaskInput: input, + } } func (c *CanaryTask) Start(ctx context.Context) error { @@ -40,8 +51,8 @@ func (c *CanaryTask) Start(ctx context.Context) error { startTask := &ecs.StartTaskInput{ Cluster: &c.Env.Cluster, Group: aws.String(fmt.Sprintf("cage:canary-task:%s", c.Env.Service)), - NetworkConfiguration: c.networkConfiguration, - TaskDefinition: c.td.TaskDefinitionArn, + NetworkConfiguration: c.NetworkConfiguration, + TaskDefinition: c.TaskDefinition.TaskDefinitionArn, ContainerInstances: []string{c.Env.CanaryInstanceArn}, } if o, err := c.Ecs.StartTask(ctx, startTask); err != nil { @@ -54,10 +65,10 @@ func (c *CanaryTask) Start(ctx context.Context) error { if o, err := c.Ecs.RunTask(ctx, &ecs.RunTaskInput{ Cluster: &c.Env.Cluster, Group: aws.String(fmt.Sprintf("cage:canary-task:%s", c.Env.Service)), - NetworkConfiguration: c.networkConfiguration, - TaskDefinition: c.td.TaskDefinitionArn, + NetworkConfiguration: c.NetworkConfiguration, + TaskDefinition: c.TaskDefinition.TaskDefinitionArn, LaunchType: ecstypes.LaunchTypeFargate, - PlatformVersion: c.platformVersion, + PlatformVersion: c.PlatformVersion, }); err != nil { return err } else { @@ -81,7 +92,7 @@ func (c *CanaryTask) Wait(ctx context.Context) error { } log.Info("🤩 canary task container(s) is healthy!") log.Infof("canary task '%s' ensured.", *c.taskArn) - if c.lb == nil { + if c.LoadBalancer == nil { log.Infof("no load balancer is attached to service '%s'. skip registration to target group", c.Env.Service) return c.waitForIdleDuration(ctx) } else { @@ -131,7 +142,7 @@ func (c *CanaryTask) waitForIdleDuration(ctx context.Context) error { func (c *CanaryTask) waitUntilHealthCeheckPassed(ctx context.Context) error { log.Infof("😷 ensuring canary task container(s) to become healthy...") containerHasHealthChecks := map[string]struct{}{} - for _, definition := range c.td.ContainerDefinitions { + for _, definition := range c.TaskDefinition.ContainerDefinitions { if definition.HealthCheck != nil { containerHasHealthChecks[*definition.Name] = struct{}{} } @@ -185,8 +196,8 @@ func (c *CanaryTask) registerToTargetGroup(ctx context.Context) error { var targetId *string var targetPort *int32 var subnet ec2types.Subnet - for _, container := range c.td.ContainerDefinitions { - if *container.Name == *c.lb.ContainerName { + for _, container := range c.TaskDefinition.ContainerDefinitions { + if *container.Name == *c.LoadBalancer.ContainerName { targetPort = container.PortMappings[0].HostPort } } @@ -241,7 +252,7 @@ func (c *CanaryTask) registerToTargetGroup(ctx context.Context) error { log.Infof("canary task was placed: instanceId = '%s', hostPort = '%d', az = '%s'", *targetId, *targetPort, *subnet.AvailabilityZone) } if _, err := c.Alb.RegisterTargets(ctx, &elbv2.RegisterTargetsInput{ - TargetGroupArn: c.lb.TargetGroupArn, + TargetGroupArn: c.LoadBalancer.TargetGroupArn, Targets: []elbv2types.TargetDescription{{ AvailabilityZone: subnet.AvailabilityZone, Id: targetId, @@ -251,7 +262,7 @@ func (c *CanaryTask) registerToTargetGroup(ctx context.Context) error { return err } c.target = &CanaryTarget{ - targetGroupArn: c.lb.TargetGroupArn, + targetGroupArn: c.LoadBalancer.TargetGroupArn, targetId: targetId, targetPort: targetPort, availabilityZone: subnet.AvailabilityZone, diff --git a/canary_task_test.go b/canary_task_test.go new file mode 100644 index 0000000..bb8fbb9 --- /dev/null +++ b/canary_task_test.go @@ -0,0 +1,45 @@ +package cage_test + +import ( + "context" + "testing" + + "github.com/aws/aws-sdk-go-v2/aws" + ecstypes "github.com/aws/aws-sdk-go-v2/service/ecs/types" + "github.com/golang/mock/gomock" + cage "github.com/loilo-inc/canarycage" + "github.com/loilo-inc/canarycage/mocks/mock_awsiface" + "github.com/loilo-inc/canarycage/test" + "github.com/loilo-inc/canarycage/timeout" + "github.com/stretchr/testify/assert" +) + +func TestCanaryTask_Stop(t *testing.T) { + t.Run("shold stop task even if deregister task failed", func(t *testing.T) { + ctrl := gomock.NewController(t) + ecsmock := mock_awsiface.NewMockEcsClient(ctrl) + albmock := mock_awsiface.NewMockAlbClient(ctrl) + mocker := test.NewMockContext() + env := test.DefaultEnvars() + c := &cage.CageExport{ + Input: &cage.Input{ + Env: env, + Alb: albmock, + Ecs: ecsmock, + }, + Timeout: timeout.Default, + } + task := cage.NewCanaryTask(c, &cage.CanaryTaskInput{ + LoadBalancer: &ecstypes.LoadBalancer{ + ContainerName: aws.String("container"), + ContainerPort: aws.Int32(80), + TargetGroupArn: aws.String("target-group-arn"), + }, + TaskDefinition: &ecstypes.TaskDefinition{ + TaskDefinitionArn: aws.String("task-definition-arn"), + }, + }) + err := task.Stop(context.TODO()) + assert.NoError(t, err) + }) +} diff --git a/rollout.go b/rollout.go index b0aaa20..7e76bbf 100644 --- a/rollout.go +++ b/rollout.go @@ -53,7 +53,7 @@ func (c *cage) RollOut(ctx context.Context, input *RollOutInput) (*RollOutResult } log.Infof("starting canary task...") canaryTasks, startCanaryTaskErr := c.StartCanaryTasks(ctx, nextTaskDefinition, input) - // ensure canary task stopped after rolling out either success or failure + // ensure canary tasks stopped after rolling out either success or failure defer func() { _ = recover() eg := errgroup.Group{} @@ -148,18 +148,23 @@ func (c *cage) StartCanaryTasks( } var results []*CanaryTask if len(loadBalancers) == 0 { - task := &CanaryTask{ - c, nextTaskDefinition, nil, networkConfiguration, platformVersion, nil, nil, - } + task := NewCanaryTask(c, &CanaryTaskInput{ + TaskDefinition: nextTaskDefinition, + NetworkConfiguration: networkConfiguration, + PlatformVersion: platformVersion, + }) results = append(results, task) if err := task.Start(ctx); err != nil { return results, err } } else { for _, lb := range loadBalancers { - task := &CanaryTask{ - c, nextTaskDefinition, &lb, networkConfiguration, platformVersion, nil, nil, - } + task := NewCanaryTask(c, &CanaryTaskInput{ + TaskDefinition: nextTaskDefinition, + LoadBalancer: &lb, + NetworkConfiguration: networkConfiguration, + PlatformVersion: platformVersion, + }) results = append(results, task) if err := task.Start(ctx); err != nil { return results, err diff --git a/timeout/timeout.go b/timeout/timeout.go index e9e7b1a..bee7975 100644 --- a/timeout/timeout.go +++ b/timeout/timeout.go @@ -21,6 +21,8 @@ type manager struct { DefaultTimeout time.Duration } +var Default = NewManager(10*time.Minute, &Input{}) + func NewManager( defaultTimeout time.Duration, input *Input,