diff --git a/helm/aws-load-balancer-controller/Chart.yaml b/helm/aws-load-balancer-controller/Chart.yaml index e16be6bf6..9a0e2c217 100644 --- a/helm/aws-load-balancer-controller/Chart.yaml +++ b/helm/aws-load-balancer-controller/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 name: aws-load-balancer-controller description: AWS Load Balancer Controller Helm chart for Kubernetes -version: 1.7.0 +version: 1.7.1 appVersion: v2.7.0 home: https://github.com/aws/eks-charts icon: https://raw.githubusercontent.com/aws/eks-charts/master/docs/logo/aws.png diff --git a/helm/aws-load-balancer-controller/README.md b/helm/aws-load-balancer-controller/README.md index 6e485a33e..5dd580324 100644 --- a/helm/aws-load-balancer-controller/README.md +++ b/helm/aws-load-balancer-controller/README.md @@ -267,3 +267,5 @@ The default values set by the application itself can be confirmed [here](https:/ | `controllerConfig.featureGates` | set of `key: value` pairs that describe AWS load balance controller features | `{}` | | `ingressClassConfig.default` | If `true`, the ingressclass will be the default class of the cluster. | `false` | | `enableServiceMutatorWebhook` | If `false`, disable the Service Mutator webhook which makes all new services of type LoadBalancer reconciled by the lb controller | `true` | +| `autoscaling` | If `autoscaling.enabled=true`, enable the HPA on the controller mainly to survive load induced failure by the calls to the `aws-load-balancer-webhook-service`. Please keep in mind that the controller pods have `priorityClassName: system-cluster-critical`, enabling HPA may lead to the eviction of other low-priority pods in the node | `false` | +| `serviceTargetENISGTags` | set of `key=value` pairs of AWS tags in addition to cluster name for finding the target ENI security group to which to add inbound rules from NLBs | None | diff --git a/helm/aws-load-balancer-controller/templates/deployment.yaml b/helm/aws-load-balancer-controller/templates/deployment.yaml index 3984bf450..c1bed7b86 100644 --- a/helm/aws-load-balancer-controller/templates/deployment.yaml +++ b/helm/aws-load-balancer-controller/templates/deployment.yaml @@ -156,6 +156,9 @@ spec: {{- if ne .Values.defaultTargetType "instance" }} - --default-target-type={{ .Values.defaultTargetType }} {{- end }} + {{- if .Values.serviceTargetENISGTags }} + - --service-target-eni-security-group-tags={{ .Values.serviceTargetENISGTags }} + {{- end }} {{- if or .Values.env .Values.envSecretName }} env: {{- if .Values.env}} diff --git a/helm/aws-load-balancer-controller/values.yaml b/helm/aws-load-balancer-controller/values.yaml index 4145743b7..03d45e7b9 100644 --- a/helm/aws-load-balancer-controller/values.yaml +++ b/helm/aws-load-balancer-controller/values.yaml @@ -15,6 +15,11 @@ imagePullSecrets: [] nameOverride: "" fullnameOverride: "" +# AWS LBC only has 1 main working pod, other pods are just standby +# the purpose of enable hpa is to survive load induced failure by the calls to the aws-load-balancer-webhook-service +# since the calls from kube-apiserver are sent round-robin to all replicas, and the failure policy on those webhooks is Fail +# if the pods become overloaded and do not respond within the timeout that could block the creation of pods, targetgroupbindings or ingresses +# Please keep in mind that the controller pods have `priorityClassName: system-cluster-critical`, enabling HPA may lead to the eviction of other low-priority pods in the node autoscaling: enabled: false minReplicas: 1 @@ -380,3 +385,6 @@ ingressClassConfig: # enableServiceMutatorWebhook allows you enable the webhook which makes this controller the default for all new services of type LoadBalancer enableServiceMutatorWebhook: true + +# serviceTargetENISGTags specifies AWS tags, in addition to the cluster tags, for finding the target ENI SG to which to add inbound rules from NLBs. +serviceTargetENISGTags: diff --git a/pkg/deploy/elbv2/listener_manager.go b/pkg/deploy/elbv2/listener_manager.go index 0fff98231..22400da9d 100644 --- a/pkg/deploy/elbv2/listener_manager.go +++ b/pkg/deploy/elbv2/listener_manager.go @@ -161,7 +161,7 @@ func (m *defaultListenerManager) updateSDKListenerWithExtraCertificates(ctx cont sdkLS ListenerWithTags, isNewSDKListener bool) error { // if TLS is not supported, we shouldn't update if resLS.Spec.SSLPolicy == nil && sdkLS.Listener.SslPolicy == nil { - m.logger.V(1).Info("Res and Sdk Listener don't have SSL Policy set, we skip updating extra certs for non-TLS listener.") + m.logger.V(1).Info("Res and Sdk Listener don't have SSL Policy set, skip updating extra certs for non-TLS listener.") return nil } diff --git a/pkg/deploy/elbv2/tagging_manager.go b/pkg/deploy/elbv2/tagging_manager.go index bea61659a..33caef61e 100644 --- a/pkg/deploy/elbv2/tagging_manager.go +++ b/pkg/deploy/elbv2/tagging_manager.go @@ -259,6 +259,7 @@ func (m *defaultTaggingManager) ListListenerRules(ctx context.Context, lsARN str // TODO: we can refactor this by store provisioned LB's ARN as annotations on Ingress/Service, thus avoid this heavy lookup calls when RGT is not available. func (m *defaultTaggingManager) ListLoadBalancers(ctx context.Context, tagFilters ...tracking.TagFilter) ([]LoadBalancerWithTags, error) { if m.featureGates.Enabled(config.EnableRGTAPI) { + m.logger.V(1).Info("ResourceGroupTagging enabled, list the load balancers via RGT API") return m.listLoadBalancersRGT(ctx, tagFilters) } return m.listLoadBalancersNative(ctx, tagFilters) @@ -266,6 +267,7 @@ func (m *defaultTaggingManager) ListLoadBalancers(ctx context.Context, tagFilter func (m *defaultTaggingManager) ListTargetGroups(ctx context.Context, tagFilters ...tracking.TagFilter) ([]TargetGroupWithTags, error) { if m.featureGates.Enabled(config.EnableRGTAPI) { + m.logger.V(1).Info("ResourceGroupTagging enabled, list the target groups via RGT API") return m.listTargetGroupsRGT(ctx, tagFilters) } return m.listTargetGroupsNative(ctx, tagFilters)