diff --git a/apis/elbv2/v1alpha1/targetgroupbinding_types.go b/apis/elbv2/v1alpha1/targetgroupbinding_types.go
index eba7f01b8e..78c4f5b8ea 100644
--- a/apis/elbv2/v1alpha1/targetgroupbinding_types.go
+++ b/apis/elbv2/v1alpha1/targetgroupbinding_types.go
@@ -109,6 +109,10 @@ type TargetGroupBindingSpec struct {
// targetGroupARN is the Amazon Resource Name (ARN) for the TargetGroup.
TargetGroupARN string `json:"targetGroupARN"`
+ // MultiClusterTargetGroup Denotes if the TargetGroup is shared among multiple clusters
+ // +optional
+ MultiClusterTargetGroup bool `json:"multiClusterTargetGroup,omitempty"`
+
// targetType is the TargetType of TargetGroup. If unspecified, it will be automatically inferred.
// +optional
TargetType *TargetType `json:"targetType,omitempty"`
diff --git a/apis/elbv2/v1beta1/targetgroupbinding_types.go b/apis/elbv2/v1beta1/targetgroupbinding_types.go
index 0ec065709b..437f2d3a62 100644
--- a/apis/elbv2/v1beta1/targetgroupbinding_types.go
+++ b/apis/elbv2/v1beta1/targetgroupbinding_types.go
@@ -127,6 +127,10 @@ type TargetGroupBindingSpec struct {
// +kubebuilder:validation:MinLength=1
TargetGroupARN string `json:"targetGroupARN"`
+ // MultiClusterTargetGroup Denotes if the TargetGroup is shared among multiple clusters
+ // +optional
+ MultiClusterTargetGroup bool `json:"multiClusterTargetGroup,omitempty"`
+
// targetType is the TargetType of TargetGroup. If unspecified, it will be automatically inferred.
// +optional
TargetType *TargetType `json:"targetType,omitempty"`
diff --git a/config/crd/bases/elbv2.k8s.aws_targetgroupbindings.yaml b/config/crd/bases/elbv2.k8s.aws_targetgroupbindings.yaml
index c5bfd257a0..8f2dd41d00 100644
--- a/config/crd/bases/elbv2.k8s.aws_targetgroupbindings.yaml
+++ b/config/crd/bases/elbv2.k8s.aws_targetgroupbindings.yaml
@@ -60,6 +60,10 @@ spec:
spec:
description: TargetGroupBindingSpec defines the desired state of TargetGroupBinding
properties:
+ multiClusterTargetGroup:
+ description: MultiClusterTargetGroup Denotes if the TargetGroup is
+ shared among multiple clusters
+ type: boolean
networking:
description: networking provides the networking setup for ELBV2 LoadBalancer
to access targets in TargetGroup.
@@ -233,6 +237,10 @@ spec:
- ipv4
- ipv6
type: string
+ multiClusterTargetGroup:
+ description: MultiClusterTargetGroup Denotes if the TargetGroup is
+ shared among multiple clusters
+ type: boolean
networking:
description: networking defines the networking rules to allow ELBV2
LoadBalancer to access targets in TargetGroup.
diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml
index c9d2abe927..d21f142f44 100644
--- a/config/rbac/role.yaml
+++ b/config/rbac/role.yaml
@@ -4,6 +4,15 @@ kind: ClusterRole
metadata:
name: controller-role
rules:
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - create
+ - delete
+ - get
+ - update
- apiGroups:
- ""
resources:
diff --git a/controllers/elbv2/targetgroupbinding_controller.go b/controllers/elbv2/targetgroupbinding_controller.go
index 0f48d18cb2..d80b90982c 100644
--- a/controllers/elbv2/targetgroupbinding_controller.go
+++ b/controllers/elbv2/targetgroupbinding_controller.go
@@ -87,6 +87,7 @@ type targetGroupBindingReconciler struct {
// +kubebuilder:rbac:groups="",resources=nodes,verbs=get;list;watch
// +kubebuilder:rbac:groups="",resources=endpoints,verbs=get;list;watch
// +kubebuilder:rbac:groups="",resources=namespaces,verbs=get;list;watch
+// +kubebuilder:rbac:groups="",resources=configmaps,verbs=get;delete;create;update
// +kubebuilder:rbac:groups="",resources=events,verbs=create;patch
// +kubebuilder:rbac:groups="discovery.k8s.io",resources=endpointslices,verbs=get;list;watch
diff --git a/docs/guide/ingress/annotations.md b/docs/guide/ingress/annotations.md
index 90e110c14b..e7f6e4295e 100644
--- a/docs/guide/ingress/annotations.md
+++ b/docs/guide/ingress/annotations.md
@@ -15,50 +15,51 @@ You can add annotations to kubernetes Ingress and Service objects to customize t
## Annotations
| Name | Type |Default| Location | MergeBehavior |
-|-------------------------------------------------------------------------------------------------------|-----------------------------|------|-----------------|-----------|
-| [alb.ingress.kubernetes.io/load-balancer-name](#load-balancer-name) | string |N/A| Ingress | Exclusive |
-| [alb.ingress.kubernetes.io/group.name](#group.name) | string |N/A| Ingress | N/A |
-| [alb.ingress.kubernetes.io/group.order](#group.order) | integer |0| Ingress | N/A |
-| [alb.ingress.kubernetes.io/tags](#tags) | stringMap |N/A| Ingress,Service | Merge |
-| [alb.ingress.kubernetes.io/ip-address-type](#ip-address-type) | ipv4 \| dualstack \| dualstack-without-public-ipv4 |ipv4| Ingress | Exclusive |
-| [alb.ingress.kubernetes.io/scheme](#scheme) | internal \| internet-facing |internal| Ingress | Exclusive |
-| [alb.ingress.kubernetes.io/subnets](#subnets) | stringList |N/A| Ingress | Exclusive |
-| [alb.ingress.kubernetes.io/security-groups](#security-groups) | stringList |N/A| Ingress | Exclusive |
-| [alb.ingress.kubernetes.io/manage-backend-security-group-rules](#manage-backend-security-group-rules) | boolean |N/A| Ingress | Exclusive |
-| [alb.ingress.kubernetes.io/customer-owned-ipv4-pool](#customer-owned-ipv4-pool) | string |N/A| Ingress | Exclusive |
-| [alb.ingress.kubernetes.io/load-balancer-attributes](#load-balancer-attributes) | stringMap |N/A| Ingress | Exclusive |
-| [alb.ingress.kubernetes.io/wafv2-acl-arn](#wafv2-acl-arn) | string |N/A| Ingress | Exclusive |
-| [alb.ingress.kubernetes.io/waf-acl-id](#waf-acl-id) | string |N/A| Ingress | Exclusive |
-| [alb.ingress.kubernetes.io/shield-advanced-protection](#shield-advanced-protection) | boolean |N/A| Ingress | Exclusive |
-| [alb.ingress.kubernetes.io/listen-ports](#listen-ports) | json |'[{"HTTP": 80}]' \| '[{"HTTPS": 443}]'| Ingress | Merge |
-| [alb.ingress.kubernetes.io/ssl-redirect](#ssl-redirect) | integer |N/A| Ingress | Exclusive |
-| [alb.ingress.kubernetes.io/inbound-cidrs](#inbound-cidrs) | stringList |0.0.0.0/0, ::/0| Ingress | Exclusive |
-| [alb.ingress.kubernetes.io/security-group-prefix-lists](#security-group-prefix-lists) | stringList |pl-00000000, pl-1111111| Ingress | Exclusive |
-| [alb.ingress.kubernetes.io/certificate-arn](#certificate-arn) | stringList |N/A| Ingress | Merge |
-| [alb.ingress.kubernetes.io/ssl-policy](#ssl-policy) | string |ELBSecurityPolicy-2016-08| Ingress | Exclusive |
-| [alb.ingress.kubernetes.io/target-type](#target-type) | instance \| ip |instance| Ingress,Service | N/A |
-| [alb.ingress.kubernetes.io/backend-protocol](#backend-protocol) | HTTP \| HTTPS |HTTP| Ingress,Service | N/A |
-| [alb.ingress.kubernetes.io/backend-protocol-version](#backend-protocol-version) | string | HTTP1 | Ingress,Service | N/A |
-| [alb.ingress.kubernetes.io/target-group-attributes](#target-group-attributes) | stringMap |N/A| Ingress,Service | N/A |
-| [alb.ingress.kubernetes.io/healthcheck-port](#healthcheck-port) | integer \| traffic-port |traffic-port| Ingress,Service | N/A |
-| [alb.ingress.kubernetes.io/healthcheck-protocol](#healthcheck-protocol) | HTTP \| HTTPS |HTTP| Ingress,Service | N/A |
-| [alb.ingress.kubernetes.io/healthcheck-path](#healthcheck-path) | string |/ \| /AWS.ALB/healthcheck | Ingress,Service | N/A |
-| [alb.ingress.kubernetes.io/healthcheck-interval-seconds](#healthcheck-interval-seconds) | integer |'15'| Ingress,Service | N/A |
-| [alb.ingress.kubernetes.io/healthcheck-timeout-seconds](#healthcheck-timeout-seconds) | integer |'5'| Ingress,Service | N/A |
-| [alb.ingress.kubernetes.io/healthy-threshold-count](#healthy-threshold-count) | integer |'2'| Ingress,Service | N/A |
-| [alb.ingress.kubernetes.io/unhealthy-threshold-count](#unhealthy-threshold-count) | integer |'2'| Ingress,Service | N/A |
-| [alb.ingress.kubernetes.io/success-codes](#success-codes) | string |'200' \| '12' | Ingress,Service | N/A |
-| [alb.ingress.kubernetes.io/auth-type](#auth-type) | none\|oidc\|cognito |none| Ingress,Service | N/A |
-| [alb.ingress.kubernetes.io/auth-idp-cognito](#auth-idp-cognito) | json |N/A| Ingress,Service | N/A |
-| [alb.ingress.kubernetes.io/auth-idp-oidc](#auth-idp-oidc) | json |N/A| Ingress,Service | N/A |
-| [alb.ingress.kubernetes.io/auth-on-unauthenticated-request](#auth-on-unauthenticated-request) | authenticate\|allow\|deny |authenticate| Ingress,Service | N/A |
-| [alb.ingress.kubernetes.io/auth-scope](#auth-scope) | string |openid| Ingress,Service | N/A |
-| [alb.ingress.kubernetes.io/auth-session-cookie](#auth-session-cookie) | string |AWSELBAuthSessionCookie| Ingress,Service | N/A |
-| [alb.ingress.kubernetes.io/auth-session-timeout](#auth-session-timeout) | integer |'604800'| Ingress,Service | N/A |
-| [alb.ingress.kubernetes.io/actions.${action-name}](#actions) | json |N/A| Ingress | N/A |
-| [alb.ingress.kubernetes.io/conditions.${conditions-name}](#conditions) | json |N/A| Ingress | N/A |
-| [alb.ingress.kubernetes.io/target-node-labels](#target-node-labels) | stringMap |N/A| Ingress,Service | N/A |
-| [alb.ingress.kubernetes.io/mutual-authentication](#mutual-authentication) | json |N/A| Ingress |Exclusive|
+|-------------------------------------------------------------------------------------------------------|-----------------------------|------|-----------------|---------------|
+| [alb.ingress.kubernetes.io/load-balancer-name](#load-balancer-name) | string |N/A| Ingress | Exclusive |
+| [alb.ingress.kubernetes.io/group.name](#group.name) | string |N/A| Ingress | N/A |
+| [alb.ingress.kubernetes.io/group.order](#group.order) | integer |0| Ingress | N/A |
+| [alb.ingress.kubernetes.io/tags](#tags) | stringMap |N/A| Ingress,Service | Merge |
+| [alb.ingress.kubernetes.io/ip-address-type](#ip-address-type) | ipv4 \| dualstack \| dualstack-without-public-ipv4 |ipv4| Ingress | Exclusive |
+| [alb.ingress.kubernetes.io/scheme](#scheme) | internal \| internet-facing |internal| Ingress | Exclusive |
+| [alb.ingress.kubernetes.io/subnets](#subnets) | stringList |N/A| Ingress | Exclusive |
+| [alb.ingress.kubernetes.io/security-groups](#security-groups) | stringList |N/A| Ingress | Exclusive |
+| [alb.ingress.kubernetes.io/manage-backend-security-group-rules](#manage-backend-security-group-rules) | boolean |N/A| Ingress | Exclusive |
+| [alb.ingress.kubernetes.io/customer-owned-ipv4-pool](#customer-owned-ipv4-pool) | string |N/A| Ingress | Exclusive |
+| [alb.ingress.kubernetes.io/load-balancer-attributes](#load-balancer-attributes) | stringMap |N/A| Ingress | Exclusive |
+| [alb.ingress.kubernetes.io/wafv2-acl-arn](#wafv2-acl-arn) | string |N/A| Ingress | Exclusive |
+| [alb.ingress.kubernetes.io/waf-acl-id](#waf-acl-id) | string |N/A| Ingress | Exclusive |
+| [alb.ingress.kubernetes.io/shield-advanced-protection](#shield-advanced-protection) | boolean |N/A| Ingress | Exclusive |
+| [alb.ingress.kubernetes.io/listen-ports](#listen-ports) | json |'[{"HTTP": 80}]' \| '[{"HTTPS": 443}]'| Ingress | Merge |
+| [alb.ingress.kubernetes.io/ssl-redirect](#ssl-redirect) | integer |N/A| Ingress | Exclusive |
+| [alb.ingress.kubernetes.io/inbound-cidrs](#inbound-cidrs) | stringList |0.0.0.0/0, ::/0| Ingress | Exclusive |
+| [alb.ingress.kubernetes.io/security-group-prefix-lists](#security-group-prefix-lists) | stringList |pl-00000000, pl-1111111| Ingress | Exclusive |
+| [alb.ingress.kubernetes.io/certificate-arn](#certificate-arn) | stringList |N/A| Ingress | Merge |
+| [alb.ingress.kubernetes.io/ssl-policy](#ssl-policy) | string |ELBSecurityPolicy-2016-08| Ingress | Exclusive |
+| [alb.ingress.kubernetes.io/target-type](#target-type) | instance \| ip |instance| Ingress,Service | N/A |
+| [alb.ingress.kubernetes.io/backend-protocol](#backend-protocol) | HTTP \| HTTPS |HTTP| Ingress,Service | N/A |
+| [alb.ingress.kubernetes.io/backend-protocol-version](#backend-protocol-version) | string | HTTP1 | Ingress,Service | N/A |
+| [alb.ingress.kubernetes.io/target-group-attributes](#target-group-attributes) | stringMap |N/A| Ingress,Service | N/A |
+| [alb.ingress.kubernetes.io/healthcheck-port](#healthcheck-port) | integer \| traffic-port |traffic-port| Ingress,Service | N/A |
+| [alb.ingress.kubernetes.io/healthcheck-protocol](#healthcheck-protocol) | HTTP \| HTTPS |HTTP| Ingress,Service | N/A |
+| [alb.ingress.kubernetes.io/healthcheck-path](#healthcheck-path) | string |/ \| /AWS.ALB/healthcheck | Ingress,Service | N/A |
+| [alb.ingress.kubernetes.io/healthcheck-interval-seconds](#healthcheck-interval-seconds) | integer |'15'| Ingress,Service | N/A |
+| [alb.ingress.kubernetes.io/healthcheck-timeout-seconds](#healthcheck-timeout-seconds) | integer |'5'| Ingress,Service | N/A |
+| [alb.ingress.kubernetes.io/healthy-threshold-count](#healthy-threshold-count) | integer |'2'| Ingress,Service | N/A |
+| [alb.ingress.kubernetes.io/unhealthy-threshold-count](#unhealthy-threshold-count) | integer |'2'| Ingress,Service | N/A |
+| [alb.ingress.kubernetes.io/success-codes](#success-codes) | string |'200' \| '12' | Ingress,Service | N/A |
+| [alb.ingress.kubernetes.io/auth-type](#auth-type) | none\|oidc\|cognito |none| Ingress,Service | N/A |
+| [alb.ingress.kubernetes.io/auth-idp-cognito](#auth-idp-cognito) | json |N/A| Ingress,Service | N/A |
+| [alb.ingress.kubernetes.io/auth-idp-oidc](#auth-idp-oidc) | json |N/A| Ingress,Service | N/A |
+| [alb.ingress.kubernetes.io/auth-on-unauthenticated-request](#auth-on-unauthenticated-request) | authenticate\|allow\|deny |authenticate| Ingress,Service | N/A |
+| [alb.ingress.kubernetes.io/auth-scope](#auth-scope) | string |openid| Ingress,Service | N/A |
+| [alb.ingress.kubernetes.io/auth-session-cookie](#auth-session-cookie) | string |AWSELBAuthSessionCookie| Ingress,Service | N/A |
+| [alb.ingress.kubernetes.io/auth-session-timeout](#auth-session-timeout) | integer |'604800'| Ingress,Service | N/A |
+| [alb.ingress.kubernetes.io/actions.${action-name}](#actions) | json |N/A| Ingress | N/A |
+| [alb.ingress.kubernetes.io/conditions.${conditions-name}](#conditions) | json |N/A| Ingress | N/A |
+| [alb.ingress.kubernetes.io/target-node-labels](#target-node-labels) | stringMap |N/A| Ingress,Service | N/A |
+| [alb.ingress.kubernetes.io/mutual-authentication](#mutual-authentication) | json |N/A| Ingress | Exclusive |
+| [alb.ingress.kubernetes.io/multi-cluster-target-group](#multi-cluster-target-group) | boolean |N/A| Ingress, Service | N/A |
## IngressGroup
IngressGroup feature enables you to group multiple Ingress resources together.
@@ -887,6 +888,22 @@ Custom attributes to LoadBalancers and TargetGroups can be controlled with follo
alb.ingress.kubernetes.io/target-group-attributes: load_balancing.algorithm.type=weighted_random,load_balancing.algorithm.anomaly_mitigation=on
```
+
+- `alb.ingress.kubernetes.io/multi-cluster-target-group` Allows you to share the created Target Group ARN with other Load Balancer Controller managed clusters.
+
+ !!!warning ""
+ This feature does not offer any Deletion Protection. Deleting the resource will still delete the Target Group. If you need to support
+ Target Groups shared with multiple clusters, it's recommended to use an out-of-band Target Group that is not managed by a Load Balancer Controller.
+
+ !!!note ""
+ - It is not recommended to change this value frequently, if ever. The recommended way to set this value is on creation of the service or ingress.
+
+ !!!example
+ ```
+ alb.ingress.kubernetes.io/multi-cluster-target-group: "true"
+ ```
+
+
## Resource Tags
The AWS Load Balancer Controller automatically applies following tags to the AWS resources (ALB/TargetGroups/SecurityGroups/Listener/ListenerRule) it creates:
diff --git a/docs/guide/service/annotations.md b/docs/guide/service/annotations.md
index 60dda617fb..c9dc89f9b7 100644
--- a/docs/guide/service/annotations.md
+++ b/docs/guide/service/annotations.md
@@ -52,7 +52,8 @@
| [service.beta.kubernetes.io/aws-load-balancer-security-groups](#security-groups) | stringList | | |
| [service.beta.kubernetes.io/aws-load-balancer-manage-backend-security-group-rules](#manage-backend-sg-rules) | boolean | true | If `service.beta.kubernetes.io/aws-load-balancer-security-groups` is specified, this must also be explicitly specified otherwise it defaults to `false`. |
| [service.beta.kubernetes.io/aws-load-balancer-inbound-sg-rules-on-private-link-traffic](#update-security-settings) | string | |
-| [service.beta.kubernetes.io/aws-load-balancer-listener-attributes.${Protocol}-${Port}](#listener-attributes) | stringMap | |
+| [service.beta.kubernetes.io/aws-load-balancer-listener-attributes.${Protocol}-${Port}](#listener-attributes) | stringMap | |
+| [service.beta.kubernetes.io/aws-load-balancer-multi-cluster-target-group](#multi-cluster-target-group) | boolean | false | If specified, the controller will only operate on targets that exist within the cluster, ignoring targets from other sources. |
## Traffic Routing
Traffic Routing can be controlled with following annotations:
@@ -289,6 +290,20 @@ for proxy protocol v2 configuration.
service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled
```
+- `service.beta.kubernetes.io/aws-load-balancer-multi-cluster-target-group` Allows you to share the created Target Group ARN with other Load Balancer Controller managed clusters.
+
+ !!!warning ""
+ This feature does not offer any Deletion Protection. Deleting the service will still delete the Target Group. If you need to support
+ Target Groups shared with multiple clusters, it's recommended to use an out-of-band Target Group that is not managed by a Load Balancer Controller.
+
+ !!!note ""
+ - It is not recommended to change this value frequently, if ever. The recommended way to set this value is on creation of the service.
+
+ !!!example
+ ```
+ service.beta.kubernetes.io/aws-load-balancer-multi-cluster-target-group: "true"
+ ```
+
## AWS Resource Tags
The AWS Load Balancer Controller automatically applies following tags to the AWS resources it creates (NLB/TargetGroups/Listener/ListenerRule):
diff --git a/docs/guide/targetgroupbinding/targetgroupbinding.md b/docs/guide/targetgroupbinding/targetgroupbinding.md
index 36cdf065d3..8da8ff89ee 100644
--- a/docs/guide/targetgroupbinding/targetgroupbinding.md
+++ b/docs/guide/targetgroupbinding/targetgroupbinding.md
@@ -92,6 +92,37 @@ spec:
...
```
+## MultiCluster Target Group
+TargetGroupBinding CRD supports sharing the same target group ARN among multiple clusters. Setting this flag will ensure the controller only operates on targets within the cluster.
+
+!!!tip ""
+ The default value is false, meaning that the controller assumes full control over the target group ARN and will deregister any targets that are not found within the cluster.
+ To set this flag for TGBs managed by the controller use either:
+ ALB: alb.ingress.kubernetes.io/multi-cluster-target-group: "true"
+ NLB: service.beta.kubernetes.io/aws-load-balancer-multi-cluster-target-group: "true"
+
+
+!!!warning ""
+ It is not recommended to change this value after TGB creation. Changing between shared / not shared might lead to leaked targets.
+
+!!!warning ""
+ Only use this flag if you intend to share the target group ARN in multiple clusters. This flag will slow down reconciles and put a small additonal load on the kubernetes control plane.
+
+
+## Sample YAML
+```yaml
+apiVersion: elbv2.k8s.aws/v1beta1
+kind: TargetGroupBinding
+metadata:
+ name: my-tgb
+spec:
+ serviceRef:
+ name: awesome-service # route traffic to the awesome-service
+ port: 80
+ targetGroupARN:
+ multiClusterTargetGroup: "true"
+```
+
## Reference
See the [reference](./spec.md) for TargetGroupBinding CR
diff --git a/docs/guide/use_cases/blue_green/index.md b/docs/guide/use_cases/blue_green/index.md
index 30fb73e32e..e62ac37390 100644
--- a/docs/guide/use_cases/blue_green/index.md
+++ b/docs/guide/use_cases/blue_green/index.md
@@ -11,7 +11,7 @@ Multiple target groups can be attached to the same forward action of a listener
In addition to the weighted target group, AWS announced the advanced request routing feature in 2019. Advanced request routing gives developers the ability to write rules (and route traffic) based on standard and custom HTTP headers and methods, the request path, the query string, and the source IP address. This new feature simplifies the application architecture by eliminating the need for a proxy fleet for routing, blocks unwanted traffic at the load balancer, and enables the implementation of A/B testing.
## Overview
-The ALB is configured to split traffic using annotations on the ingress resrouces. More specifically, the [ingress annotation](../../../guide/ingress/annotations.md#actions) `alb.ingress.kubernetes.io/actions.${service-name}` configures custom actions on the listener.
+The ALB is configured to split traffic using annotations on the ingress resources. More specifically, the [ingress annotation](../../../guide/ingress/annotations.md#actions) `alb.ingress.kubernetes.io/actions.${service-name}` configures custom actions on the listener.
The body of the annotation is a JSON document that identifies an action type, and configures it. The supported [actions](https://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-listeners.html#rule-action-types) are `redirect`, `forward`, and `fixed-response`.
diff --git a/docs/guide/use_cases/multi_cluster/index.md b/docs/guide/use_cases/multi_cluster/index.md
new file mode 100644
index 0000000000..4fdc127c9a
--- /dev/null
+++ b/docs/guide/use_cases/multi_cluster/index.md
@@ -0,0 +1,121 @@
+# MultiCluster Target Groups
+
+The load balancer controller assumes full control over the configured target groups. When a target group is registered with the controller it de registers any targets not currently in the cluster. Target groups that have MultiCluster support enabled can be associated to multiple Kubernetes clusters or support arbitrary targets from other sources.
+
+
+## Overview
+
+When enabled, MultiCluster mode supports multiple methods, and every cluster associated with a target group has one of these methods. It's recommended to use new resources when configuring MutliCluster mode. There is a period of time when MultiCluster must take a snapshot of the cluster state in order to support the selected mode. This data is stored into ConfigMap, which resides in the same namespace as your load balancer resources. ConfigMap stores snapshots of managed targets at `aws-lbc-targets-$TARGET_GROUP_BINDING_NAME`
+
+When using an ALB, you must specify this annotation in the ingress or service:
+
+`alb.ingress.kubernetes.io/multi-cluster-target-group: "true"`
+
+When using an NLB, you specify this annotation in your service:
+
+`service.beta.kubernetes.io/aws-load-balancer-multi-cluster-target-group: "true"`
+
+When using any out-of-band TargetGroupBindings, you must specify this field in the spec:
+
+`multiClusterTargetGroup: true`
+
+
+### Example
+
+We will be setting up an echoserver in two clusters in order to demonstrate MultiCluster mode. See the full echoserver example in the 'Examples' tab.
+
+The following ingress configures the Target Group Binding as MultiCluster. We will take the created Target Group and share it in a second cluster.
+
+```
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: echoserver
+ namespace: echoserver
+ annotations:
+ alb.ingress.kubernetes.io/multi-cluster-target-group: "true"
+ alb.ingress.kubernetes.io/scheme: internet-facing
+ alb.ingress.kubernetes.io/tags: Environment=dev,Team=test
+spec:
+ ingressClassName: alb
+ rules:
+ - http:
+ paths:
+ - path: /
+ pathType: Exact
+ backend:
+ service:
+ name: echoserver
+ port:
+ number: 80
+```
+
+Verify that MultiCluster is enabled by verifying that the created Target Group Binding is marked as MultiCluster.
+
+```
+kubectl -n echoserver get targetgroupbinding k8s-echoserv-echoserv-cc0122e143 -o yaml
+apiVersion: elbv2.k8s.aws/v1beta1
+kind: TargetGroupBinding
+metadata:
+ annotations:
+ elbv2.k8s.aws/checkpoint: cKay81gadoTtBSg6uVVginqtmCVG-1ApTvYN4YLD37U/_4kBy3Yg64qrXzjvIb2LlC3O__ex1qjozynsqHXmPgo
+ elbv2.k8s.aws/checkpoint-timestamp: "1729021572"
+ creationTimestamp: "2024-10-15T19:46:06Z"
+ finalizers:
+ - elbv2.k8s.aws/resources
+ generation: 1
+ labels:
+ ingress.k8s.aws/stack-name: echoserver
+ ingress.k8s.aws/stack-namespace: echoserver
+ name: k8s-echoserv-echoserv-cc0122e143
+ namespace: echoserver
+ resourceVersion: "79121011"
+ uid: 9ceaa2ea-14bb-44a5-abb0-69c7d2aac52c
+spec:
+ ipAddressType: ipv4
+ multiClusterTargetGroup: true <<< HERE
+ networking:
+ ingress:
+ - from:
+ - securityGroup:
+ groupID: sg-06a2bd7d790ac1d2e
+ ports:
+ - port: 32197
+ protocol: TCP
+ serviceRef:
+ name: echoserver
+ port: 80
+ targetGroupARN: arn:aws:elasticloadbalancing:us-east-1:565768096483:targetgroup/k8s-echoserv-echoserv-cc0122e143/6816b87346280ee7
+ targetType: instance
+ vpcID: vpc-0a7ef5bd8943067a8
+```
+
+In another cluster, you can now register that Target Group ARN in a Target Group Binding.
+
+```
+apiVersion: elbv2.k8s.aws/v1beta1
+kind: TargetGroupBinding
+metadata:
+ name: MyTargetGroupBinding
+ namespace: echoserver
+spec:
+ serviceRef:
+ name: echoserver
+ port: 80
+ multiClusterTargetGroup: true
+ targetType: instance
+ ipAddressType: ipv4
+ networking:
+ ingress:
+ - from:
+ - securityGroup:
+ groupID: $SG_FROM_ABOVE
+ ports:
+ - port: 32197
+ protocol: TCP
+ targetGroupARN: $TG_FROM_ABOVE
+```
+
+The configured TargetGroup should have targets from both clusters available to service traffic.
+
+
diff --git a/helm/aws-load-balancer-controller/crds/crds.yaml b/helm/aws-load-balancer-controller/crds/crds.yaml
index e2d92380ba..55200c7974 100644
--- a/helm/aws-load-balancer-controller/crds/crds.yaml
+++ b/helm/aws-load-balancer-controller/crds/crds.yaml
@@ -392,6 +392,10 @@ spec:
- name
- port
type: object
+ multiClusterTargetGroup:
+ description: multiClusterTargetGroup Denotes if the TargetGroup is shared
+ among multiple clusters
+ type: boolean
targetGroupARN:
description: targetGroupARN is the Amazon Resource Name (ARN) for
the TargetGroup.
@@ -616,6 +620,10 @@ spec:
- name
- port
type: object
+ multiClusterTargetGroup:
+ description: multiClusterTargetGroup Denotes if the TargetGroup is shared
+ among multiple clusters
+ type: boolean
targetGroupARN:
description: targetGroupARN is the Amazon Resource Name (ARN) for
the TargetGroup.
diff --git a/helm/aws-load-balancer-controller/templates/rbac.yaml b/helm/aws-load-balancer-controller/templates/rbac.yaml
index fc3bda695e..0dcc68c774 100644
--- a/helm/aws-load-balancer-controller/templates/rbac.yaml
+++ b/helm/aws-load-balancer-controller/templates/rbac.yaml
@@ -75,6 +75,9 @@ rules:
- apiGroups: [""]
resources: [nodes, namespaces, endpoints]
verbs: [get, list, watch]
+- apiGroups: [""]
+ resources: [configmaps]
+ verbs: [get, delete, create, update]
{{- if .Values.clusterSecretsPermissions.allowAllSecrets }}
- apiGroups: [""]
resources: [secrets]
diff --git a/main.go b/main.go
index e3426cd699..b6484f24fd 100644
--- a/main.go
+++ b/main.go
@@ -111,8 +111,9 @@ func main() {
azInfoProvider := networking.NewDefaultAZInfoProvider(cloud.EC2(), ctrl.Log.WithName("az-info-provider"))
vpcInfoProvider := networking.NewDefaultVPCInfoProvider(cloud.EC2(), ctrl.Log.WithName("vpc-info-provider"))
subnetResolver := networking.NewDefaultSubnetsResolver(azInfoProvider, cloud.EC2(), cloud.VpcID(), controllerCFG.ClusterName, ctrl.Log.WithName("subnets-resolver"))
+ multiClusterManager := targetgroupbinding.NewMultiClusterManager(mgr.GetClient(), mgr.GetAPIReader(), ctrl.Log)
tgbResManager := targetgroupbinding.NewDefaultResourceManager(mgr.GetClient(), cloud.ELBV2(), cloud.EC2(),
- podInfoRepo, sgManager, sgReconciler, vpcInfoProvider,
+ podInfoRepo, sgManager, sgReconciler, vpcInfoProvider, multiClusterManager,
cloud.VpcID(), controllerCFG.ClusterName, controllerCFG.FeatureGates.Enabled(config.EndpointsFailOpen), controllerCFG.EnableEndpointSlices, controllerCFG.DisableRestrictedSGRules,
controllerCFG.ServiceTargetENISGTags, mgr.GetEventRecorderFor("targetGroupBinding"), ctrl.Log)
backendSGProvider := networking.NewBackendSGProvider(controllerCFG.ClusterName, controllerCFG.BackendSecurityGroup,
diff --git a/mkdocs.yml b/mkdocs.yml
index b7613725a8..1a34639f3d 100644
--- a/mkdocs.yml
+++ b/mkdocs.yml
@@ -36,6 +36,7 @@ nav:
- Externally Managed Load Balancer: guide/use_cases/self_managed_lb/index.md
- Frontend Security Groups: guide/use_cases/frontend_sg/index.md
- Blue/Green: guide/use_cases/blue_green/index.md
+ - MultiCluster Target Groups: guide/use_cases/multi_cluster/index.md
- Examples:
- EchoServer: examples/echo_server.md
- gRPCServer: examples/grpc_server.md
diff --git a/pkg/algorithm/maps.go b/pkg/algorithm/maps.go
index 49003d29be..5930f4d9df 100644
--- a/pkg/algorithm/maps.go
+++ b/pkg/algorithm/maps.go
@@ -1,5 +1,10 @@
package algorithm
+import (
+ "k8s.io/apimachinery/pkg/util/sets"
+ "strings"
+)
+
// MapFindFirst get from list of maps until first found.
func MapFindFirst(key string, maps ...map[string]string) (string, bool) {
for _, m := range maps {
@@ -47,3 +52,25 @@ func DiffStringMap(desired map[string]string, current map[string]string) (map[st
return modify, remove
}
+
+func CSVToStringSet(csv string) sets.Set[string] {
+ s := sets.Set[string]{}
+
+ if len(csv) == 0 {
+ return s
+ }
+
+ for _, v := range strings.Split(csv, ",") {
+ s.Insert(v)
+ }
+
+ return s
+}
+
+func StringSetToCSV(s sets.Set[string]) string {
+ keyList := make([]string, 0, len(s))
+ for k := range s {
+ keyList = append(keyList, k)
+ }
+ return strings.Join(keyList, ",")
+}
diff --git a/pkg/algorithm/maps_test.go b/pkg/algorithm/maps_test.go
index 9a8d839aae..11f469aea6 100644
--- a/pkg/algorithm/maps_test.go
+++ b/pkg/algorithm/maps_test.go
@@ -2,6 +2,7 @@ package algorithm
import (
"github.com/stretchr/testify/assert"
+ "k8s.io/apimachinery/pkg/util/sets"
"testing"
)
@@ -275,3 +276,85 @@ func TestDiffStringMap(t *testing.T) {
})
}
}
+
+func TestCSVToStringSet(t *testing.T) {
+ tests := []struct {
+ name string
+ input string
+ output sets.Set[string]
+ }{
+ {
+ name: "empty string",
+ input: "",
+ output: sets.Set[string]{},
+ },
+ {
+ name: "one entry",
+ input: "127.0.0.1:80",
+ output: sets.Set[string]{
+ "127.0.0.1:80": {},
+ },
+ },
+ {
+ name: "multiple entries",
+ input: "127.0.0.1:80,127.0.0.2:80,127.0.0.3:80,127.0.0.4:80,127.0.0.5:80",
+ output: sets.Set[string]{
+ "127.0.0.1:80": {},
+ "127.0.0.2:80": {},
+ "127.0.0.3:80": {},
+ "127.0.0.4:80": {},
+ "127.0.0.5:80": {},
+ },
+ },
+ {
+ name: "duplicate entries",
+ input: "127.0.0.1:80,127.0.0.2:80,127.0.0.1:80,127.0.0.1:80,127.0.0.1:80",
+ output: sets.Set[string]{
+ "127.0.0.1:80": {},
+ "127.0.0.2:80": {},
+ },
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ assert.Equal(t, tt.output, CSVToStringSet(tt.input))
+ })
+ }
+}
+
+func TestStringSetToCSV(t *testing.T) {
+ tests := []struct {
+ name string
+ input sets.Set[string]
+ output string
+ }{
+ {
+ name: "empty string",
+ input: sets.Set[string]{},
+ },
+ {
+ name: "one entry",
+ input: sets.Set[string]{
+ "127.0.0.1:80": {},
+ },
+ },
+ {
+ name: "multiple entries",
+ input: sets.Set[string]{
+ "127.0.0.1:80": {},
+ "127.0.0.2:80": {},
+ "127.0.0.3:80": {},
+ "127.0.0.4:80": {},
+ "127.0.0.5:80": {},
+ },
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ // Go doesn't guarantee ordering for map iteration, so we just re-insert into a map to validate correctness.
+ output := StringSetToCSV(tt.input)
+ recreatedSet := CSVToStringSet(output)
+ assert.Equal(t, tt.input, recreatedSet)
+ })
+ }
+}
diff --git a/pkg/annotations/constants.go b/pkg/annotations/constants.go
index c008f859ce..997034dfb3 100644
--- a/pkg/annotations/constants.go
+++ b/pkg/annotations/constants.go
@@ -56,6 +56,7 @@ const (
IngressSuffixMutualAuthentication = "mutual-authentication"
IngressSuffixSecurityGroupPrefixLists = "security-group-prefix-lists"
IngressSuffixlsAttsAnnotationPrefix = "listener-attributes"
+ IngressLBSuffixMultiClusterTargetGroup = "multi-cluster-target-group"
// NLB annotation suffixes
// prefixes service.beta.kubernetes.io, service.kubernetes.io
@@ -97,4 +98,5 @@ const (
SvcLBSuffixEnforceSGInboundRulesOnPrivateLinkTraffic = "aws-load-balancer-inbound-sg-rules-on-private-link-traffic"
SvcLBSuffixSecurityGroupPrefixLists = "aws-load-balancer-security-group-prefix-lists"
SvcLBSuffixlsAttsAnnotationPrefix = "aws-load-balancer-listener-attributes"
+ SvcLBSuffixMultiClusterTargetGroup = "aws-load-balancer-multi-cluster-target-group"
)
diff --git a/pkg/backend/endpoint_types.go b/pkg/backend/endpoint_types.go
index 4fb677fe94..f928ca4f0c 100644
--- a/pkg/backend/endpoint_types.go
+++ b/pkg/backend/endpoint_types.go
@@ -9,7 +9,7 @@ import (
)
type Endpoint interface {
- GetIdentifier() string
+ GetIdentifier(includeTimestamp bool) string
}
// An endpoint provided by pod directly.
@@ -22,8 +22,11 @@ type PodEndpoint struct {
Pod k8s.PodInfo
}
-func (e PodEndpoint) GetIdentifier() string {
- return fmt.Sprintf("%s:%d:%d", e.IP, e.Port, e.Pod.CreationTime.UnixMilli())
+func (e PodEndpoint) GetIdentifier(includeTimestamp bool) string {
+ if includeTimestamp {
+ return fmt.Sprintf("%s:%d:%d", e.IP, e.Port, e.Pod.CreationTime.UnixMilli())
+ }
+ return fmt.Sprintf("%s:%d", e.IP, e.Port)
}
// An endpoint provided by nodePort as traffic proxy.
@@ -36,8 +39,11 @@ type NodePortEndpoint struct {
Node *corev1.Node
}
-func (e NodePortEndpoint) GetIdentifier() string {
- return fmt.Sprintf("%s:%d:%d", e.InstanceID, e.Port, e.Node.CreationTimestamp.UnixMilli())
+func (e NodePortEndpoint) GetIdentifier(includeTimestamp bool) string {
+ if includeTimestamp {
+ return fmt.Sprintf("%s:%d:%d", e.InstanceID, e.Port, e.Node.CreationTimestamp.UnixMilli())
+ }
+ return fmt.Sprintf("%s:%d", e.InstanceID, e.Port)
}
type EndpointsData struct {
diff --git a/pkg/deploy/elbv2/target_group_binding_manager.go b/pkg/deploy/elbv2/target_group_binding_manager.go
index 1787177190..98afaa5c97 100644
--- a/pkg/deploy/elbv2/target_group_binding_manager.go
+++ b/pkg/deploy/elbv2/target_group_binding_manager.go
@@ -188,6 +188,7 @@ func buildK8sTargetGroupBindingSpec(ctx context.Context, resTGB *elbv2model.Targ
k8sTGBSpec.NodeSelector = resTGB.Spec.Template.Spec.NodeSelector
k8sTGBSpec.IPAddressType = &resTGB.Spec.Template.Spec.IPAddressType
k8sTGBSpec.VpcID = resTGB.Spec.Template.Spec.VpcID
+ k8sTGBSpec.MultiClusterTargetGroup = resTGB.Spec.Template.Spec.MultiClusterTargetGroup
return k8sTGBSpec, nil
}
diff --git a/pkg/ingress/model_build_target_group.go b/pkg/ingress/model_build_target_group.go
index 38b636df60..381fd39721 100644
--- a/pkg/ingress/model_build_target_group.go
+++ b/pkg/ingress/model_build_target_group.go
@@ -35,6 +35,7 @@ func (t *defaultModelBuildTask) buildTargetGroup(ctx context.Context,
if err != nil {
return nil, err
}
+
tgSpec, err := t.buildTargetGroupSpec(ctx, ing, svc, port, svcPort)
if err != nil {
return nil, err
@@ -45,23 +46,32 @@ func (t *defaultModelBuildTask) buildTargetGroup(ctx context.Context,
}
tg := elbv2model.NewTargetGroup(t.stack, tgResID, tgSpec)
t.tgByResID[tgResID] = tg
- _ = t.buildTargetGroupBinding(ctx, tg, svc, port, svcPort, nodeSelector)
- return tg, nil
+ _, err = t.buildTargetGroupBinding(ctx, tg, svc, port, svcPort, nodeSelector, ing)
+ return tg, err
}
-func (t *defaultModelBuildTask) buildTargetGroupBinding(ctx context.Context, tg *elbv2model.TargetGroup, svc *corev1.Service, port intstr.IntOrString, svcPort corev1.ServicePort, nodeSelector *metav1.LabelSelector) *elbv2model.TargetGroupBindingResource {
- tgbSpec := t.buildTargetGroupBindingSpec(ctx, tg, svc, port, svcPort, nodeSelector)
+func (t *defaultModelBuildTask) buildTargetGroupBinding(ctx context.Context, tg *elbv2model.TargetGroup, svc *corev1.Service, port intstr.IntOrString, svcPort corev1.ServicePort, nodeSelector *metav1.LabelSelector, ing ClassifiedIngress) (*elbv2model.TargetGroupBindingResource, error) {
+ tgbSpec, err := t.buildTargetGroupBindingSpec(ctx, tg, svc, port, svcPort, nodeSelector, ing)
+ if err != nil {
+ return nil, err
+ }
tgb := elbv2model.NewTargetGroupBindingResource(t.stack, tg.ID(), tgbSpec)
- return tgb
+ return tgb, nil
}
-func (t *defaultModelBuildTask) buildTargetGroupBindingSpec(ctx context.Context, tg *elbv2model.TargetGroup, svc *corev1.Service, port intstr.IntOrString, svcPort corev1.ServicePort, nodeSelector *metav1.LabelSelector) elbv2model.TargetGroupBindingResourceSpec {
+func (t *defaultModelBuildTask) buildTargetGroupBindingSpec(ctx context.Context, tg *elbv2model.TargetGroup, svc *corev1.Service, port intstr.IntOrString, svcPort corev1.ServicePort, nodeSelector *metav1.LabelSelector, ing ClassifiedIngress) (elbv2model.TargetGroupBindingResourceSpec, error) {
targetType := elbv2api.TargetType(tg.Spec.TargetType)
targetPort := svcPort.TargetPort
if targetType == elbv2api.TargetTypeInstance {
targetPort = intstr.FromInt(int(svcPort.NodePort))
}
tgbNetworking := t.buildTargetGroupBindingNetworking(ctx, targetPort, *tg.Spec.HealthCheckConfig.Port)
+
+ multiTg, err := t.buildTargetGroupBindingMultiClusterFlag(ing, svc)
+ if err != nil {
+ return elbv2model.TargetGroupBindingResourceSpec{}, err
+ }
+
return elbv2model.TargetGroupBindingResourceSpec{
Template: elbv2model.TargetGroupBindingTemplate{
ObjectMeta: metav1.ObjectMeta{
@@ -75,13 +85,14 @@ func (t *defaultModelBuildTask) buildTargetGroupBindingSpec(ctx context.Context,
Name: svc.Name,
Port: port,
},
- Networking: tgbNetworking,
- NodeSelector: nodeSelector,
- IPAddressType: elbv2api.TargetGroupIPAddressType(tg.Spec.IPAddressType),
- VpcID: t.vpcID,
+ Networking: tgbNetworking,
+ NodeSelector: nodeSelector,
+ IPAddressType: elbv2api.TargetGroupIPAddressType(tg.Spec.IPAddressType),
+ VpcID: t.vpcID,
+ MultiClusterTargetGroup: multiTg,
},
},
- }
+ }, nil
}
func (t *defaultModelBuildTask) buildTargetGroupBindingNetworking(ctx context.Context, targetPort intstr.IntOrString, healthCheckPort intstr.IntOrString) *elbv2model.TargetGroupBindingNetworking {
@@ -476,3 +487,28 @@ func (t *defaultModelBuildTask) buildTargetGroupBindingNodeSelector(_ context.Co
MatchLabels: targetNodeLabels,
}, nil
}
+
+func (t *defaultModelBuildTask) buildTargetGroupBindingMultiClusterFlag(ing ClassifiedIngress, svc *corev1.Service) (bool, error) {
+ enabled, err := t.getMultiClusterTgFlag(ing.Ing.Annotations)
+ if err != nil {
+ return false, err
+ }
+
+ if enabled {
+ return true, nil
+ }
+
+ return t.getMultiClusterTgFlag(svc.Annotations)
+}
+
+func (t *defaultModelBuildTask) getMultiClusterTgFlag(annotationMap map[string]string) (bool, error) {
+ var rawEnabled bool
+ exists, err := t.annotationParser.ParseBoolAnnotation(annotations.IngressLBSuffixMultiClusterTargetGroup, &rawEnabled, annotationMap)
+ if err != nil {
+ return false, err
+ }
+ if exists {
+ return rawEnabled, nil
+ }
+ return false, nil
+}
diff --git a/pkg/ingress/model_build_target_group_test.go b/pkg/ingress/model_build_target_group_test.go
index 4bcf05d6e8..a21609709e 100644
--- a/pkg/ingress/model_build_target_group_test.go
+++ b/pkg/ingress/model_build_target_group_test.go
@@ -732,3 +732,160 @@ func Test_defaultModelBuildTask_buildTargetGroupBindingNodeSelector(t *testing.T
})
}
}
+
+func Test_defaultModelBuildTask_buildTargetGroupBindingMultiClusterFlag(t *testing.T) {
+ tests := []struct {
+ name string
+ ing ClassifiedIngress
+ svc *corev1.Service
+ want bool
+ wantErr bool
+ }{
+ {
+ name: "no annotation",
+ ing: ClassifiedIngress{
+ Ing: &networking.Ingress{},
+ },
+ svc: &corev1.Service{},
+ want: false,
+ },
+ {
+ name: "ing annotation",
+ ing: ClassifiedIngress{
+ Ing: &networking.Ingress{
+ ObjectMeta: metav1.ObjectMeta{
+ Annotations: map[string]string{
+ "alb.ingress.kubernetes.io/multi-cluster-target-group": "false",
+ },
+ },
+ },
+ },
+ svc: &corev1.Service{},
+ want: false,
+ },
+ {
+ name: "svc annotation",
+ ing: ClassifiedIngress{
+ Ing: &networking.Ingress{},
+ },
+ svc: &corev1.Service{
+ ObjectMeta: metav1.ObjectMeta{
+ Annotations: map[string]string{
+ "alb.ingress.kubernetes.io/multi-cluster-target-group": "false",
+ },
+ },
+ },
+ want: false,
+ },
+ {
+ name: "ing true annotation",
+ ing: ClassifiedIngress{
+ Ing: &networking.Ingress{
+ ObjectMeta: metav1.ObjectMeta{
+ Annotations: map[string]string{
+ "alb.ingress.kubernetes.io/multi-cluster-target-group": "true",
+ },
+ },
+ },
+ },
+ svc: &corev1.Service{},
+ want: true,
+ },
+ {
+ name: "svc true annotation",
+ ing: ClassifiedIngress{
+ Ing: &networking.Ingress{},
+ },
+ svc: &corev1.Service{
+ ObjectMeta: metav1.ObjectMeta{
+ Annotations: map[string]string{
+ "alb.ingress.kubernetes.io/multi-cluster-target-group": "true",
+ },
+ },
+ },
+ want: true,
+ },
+ {
+ name: "mix true annotation - ing true",
+ ing: ClassifiedIngress{
+ Ing: &networking.Ingress{
+ ObjectMeta: metav1.ObjectMeta{
+ Annotations: map[string]string{
+ "alb.ingress.kubernetes.io/multi-cluster-target-group": "true",
+ },
+ },
+ },
+ },
+ svc: &corev1.Service{
+ ObjectMeta: metav1.ObjectMeta{
+ Annotations: map[string]string{
+ "alb.ingress.kubernetes.io/multi-cluster-target-group": "false",
+ },
+ },
+ },
+ want: true,
+ },
+ {
+ name: "mix true annotation - svc true",
+ ing: ClassifiedIngress{
+ Ing: &networking.Ingress{
+ ObjectMeta: metav1.ObjectMeta{
+ Annotations: map[string]string{
+ "alb.ingress.kubernetes.io/multi-cluster-target-group": "false",
+ },
+ },
+ },
+ },
+ svc: &corev1.Service{
+ ObjectMeta: metav1.ObjectMeta{
+ Annotations: map[string]string{
+ "alb.ingress.kubernetes.io/multi-cluster-target-group": "true",
+ },
+ },
+ },
+ want: true,
+ },
+ {
+ name: "not a bool svc",
+ ing: ClassifiedIngress{
+ Ing: &networking.Ingress{},
+ },
+ svc: &corev1.Service{
+ ObjectMeta: metav1.ObjectMeta{
+ Annotations: map[string]string{
+ "alb.ingress.kubernetes.io/multi-cluster-target-group": "cat",
+ },
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "not a bool ing",
+ ing: ClassifiedIngress{
+ Ing: &networking.Ingress{
+ ObjectMeta: metav1.ObjectMeta{
+ Annotations: map[string]string{
+ "alb.ingress.kubernetes.io/multi-cluster-target-group": "cat",
+ },
+ },
+ },
+ },
+ svc: &corev1.Service{},
+ wantErr: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ task := &defaultModelBuildTask{
+ annotationParser: annotations.NewSuffixAnnotationParser("alb.ingress.kubernetes.io"),
+ }
+ got, err := task.buildTargetGroupBindingMultiClusterFlag(tt.ing, tt.svc)
+ if tt.wantErr {
+ assert.Error(t, err)
+ } else {
+ assert.NoError(t, err)
+ assert.Equal(t, tt.want, got)
+ }
+ })
+ }
+}
diff --git a/pkg/model/elbv2/target_group_binding.go b/pkg/model/elbv2/target_group_binding.go
index 173eb81dec..4d98a629da 100644
--- a/pkg/model/elbv2/target_group_binding.go
+++ b/pkg/model/elbv2/target_group_binding.go
@@ -107,6 +107,10 @@ type TargetGroupBindingSpec struct {
// VpcID is the VPC of the TargetGroup. If unspecified, it will be automatically inferred.
// +optional
VpcID string `json:"vpcID,omitempty"`
+
+ // multiClusterTargetGroup Denotes if the TargetGroup is shared among multiple clusters
+ // +optional
+ MultiClusterTargetGroup bool `json:"multiClusterTargetGroup,omitempty"`
}
// Template for TargetGroupBinding Custom Resource.
diff --git a/pkg/service/model_build_target_group.go b/pkg/service/model_build_target_group.go
index f47d631e8e..831d6c6bcd 100644
--- a/pkg/service/model_build_target_group.go
+++ b/pkg/service/model_build_target_group.go
@@ -428,6 +428,12 @@ func (t *defaultModelBuildTask) buildTargetGroupBindingSpec(ctx context.Context,
if err != nil {
return elbv2model.TargetGroupBindingResourceSpec{}, err
}
+
+ multiTg, err := t.buildTargetGroupBindingMultiClusterFlag(t.service)
+ if err != nil {
+ return elbv2model.TargetGroupBindingResourceSpec{}, err
+ }
+
return elbv2model.TargetGroupBindingResourceSpec{
Template: elbv2model.TargetGroupBindingTemplate{
ObjectMeta: metav1.ObjectMeta{
@@ -441,10 +447,11 @@ func (t *defaultModelBuildTask) buildTargetGroupBindingSpec(ctx context.Context,
Name: t.service.Name,
Port: intstr.FromInt(int(port.Port)),
},
- Networking: tgbNetworking,
- NodeSelector: nodeSelector,
- IPAddressType: elbv2api.TargetGroupIPAddressType(targetGroup.Spec.IPAddressType),
- VpcID: t.vpcID,
+ Networking: tgbNetworking,
+ NodeSelector: nodeSelector,
+ IPAddressType: elbv2api.TargetGroupIPAddressType(targetGroup.Spec.IPAddressType),
+ VpcID: t.vpcID,
+ MultiClusterTargetGroup: multiTg,
},
},
}, nil
@@ -693,3 +700,15 @@ func (t *defaultModelBuildTask) buildManageSecurityGroupRulesFlagLegacy(_ contex
}
return true, nil
}
+
+func (t *defaultModelBuildTask) buildTargetGroupBindingMultiClusterFlag(svc *corev1.Service) (bool, error) {
+ var rawEnabled bool
+ exists, err := t.annotationParser.ParseBoolAnnotation(annotations.SvcLBSuffixMultiClusterTargetGroup, &rawEnabled, svc.Annotations)
+ if err != nil {
+ return false, err
+ }
+ if exists {
+ return rawEnabled, nil
+ }
+ return false, nil
+}
diff --git a/pkg/service/model_build_target_group_test.go b/pkg/service/model_build_target_group_test.go
index 7839f1dc28..fbf65903da 100644
--- a/pkg/service/model_build_target_group_test.go
+++ b/pkg/service/model_build_target_group_test.go
@@ -2022,3 +2022,65 @@ func Test_defaultModelBuilder_buildTargetGroupHealthCheckPort(t *testing.T) {
})
}
}
+
+func Test_defaultModelBuildTask_buildTargetGroupBindingMultiClusterFlag(t *testing.T) {
+ tests := []struct {
+ name string
+ svc *corev1.Service
+ want bool
+ wantErr bool
+ }{
+ {
+ name: "no annotation",
+ svc: &corev1.Service{},
+ want: false,
+ },
+ {
+ name: "false annotation",
+ svc: &corev1.Service{
+ ObjectMeta: metav1.ObjectMeta{
+ Annotations: map[string]string{
+ "service.beta.kubernetes.io/aws-load-balancer-multi-cluster-target-group": "false",
+ },
+ },
+ },
+ want: false,
+ },
+ {
+ name: "true annotation",
+ svc: &corev1.Service{
+ ObjectMeta: metav1.ObjectMeta{
+ Annotations: map[string]string{
+ "service.beta.kubernetes.io/aws-load-balancer-multi-cluster-target-group": "true",
+ },
+ },
+ },
+ want: true,
+ },
+ {
+ name: "not a bool",
+ svc: &corev1.Service{
+ ObjectMeta: metav1.ObjectMeta{
+ Annotations: map[string]string{
+ "service.beta.kubernetes.io/aws-load-balancer-multi-cluster-target-group": "cat",
+ },
+ },
+ },
+ wantErr: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ task := &defaultModelBuildTask{
+ annotationParser: annotations.NewSuffixAnnotationParser("service.beta.kubernetes.io"),
+ }
+ got, err := task.buildTargetGroupBindingMultiClusterFlag(tt.svc)
+ if tt.wantErr {
+ assert.Error(t, err)
+ } else {
+ assert.NoError(t, err)
+ assert.Equal(t, tt.want, got)
+ }
+ })
+ }
+}
diff --git a/pkg/targetgroupbinding/multicluster_manager.go b/pkg/targetgroupbinding/multicluster_manager.go
new file mode 100644
index 0000000000..3743feb402
--- /dev/null
+++ b/pkg/targetgroupbinding/multicluster_manager.go
@@ -0,0 +1,257 @@
+package targetgroupbinding
+
+import (
+ "context"
+ "fmt"
+ "github.com/go-logr/logr"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/util/sets"
+ elbv2api "sigs.k8s.io/aws-load-balancer-controller/apis/elbv2/v1beta1"
+ "sigs.k8s.io/aws-load-balancer-controller/pkg/algorithm"
+ "sigs.k8s.io/aws-load-balancer-controller/pkg/backend"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sync"
+)
+
+const (
+ trackedTargetsPrefix = "aws-lbc-targets-"
+ targetsKey = "targets"
+)
+
+// MultiClusterManager implements logic to support multiple LBCs managing the same Target Group.
+type MultiClusterManager interface {
+ // FilterTargetsForDeregistration Given a list of targets, filter the list down to only targets the cluster should operate on.
+ FilterTargetsForDeregistration(ctx context.Context, tgb *elbv2api.TargetGroupBinding, targetInfo []TargetInfo) ([]TargetInfo, bool, error)
+
+ // UpdateTrackedIPTargets Update the tracked target set in persistent storage
+ UpdateTrackedIPTargets(ctx context.Context, updateRequested bool, endpoints []backend.PodEndpoint, tgb *elbv2api.TargetGroupBinding) error
+
+ // UpdateTrackedInstanceTargets Update the tracked target set in persistent storage
+ UpdateTrackedInstanceTargets(ctx context.Context, updateRequested bool, endpoints []backend.NodePortEndpoint, tgb *elbv2api.TargetGroupBinding) error
+
+ // CleanUp Removes any resources used to implement multicluster support.
+ CleanUp(ctx context.Context, tgb *elbv2api.TargetGroupBinding) error
+}
+
+type multiClusterManagerImpl struct {
+ kubeClient client.Client
+ apiReader client.Reader
+ logger logr.Logger
+
+ configMapCache map[string]sets.Set[string]
+ configMapCacheMutex sync.RWMutex
+}
+
+// NewMultiClusterManager constructs a multicluster manager that is immediately ready to use.
+func NewMultiClusterManager(kubeClient client.Client, apiReader client.Reader, logger logr.Logger) MultiClusterManager {
+ return &multiClusterManagerImpl{
+ apiReader: apiReader,
+ kubeClient: kubeClient,
+ logger: logger,
+ configMapCacheMutex: sync.RWMutex{},
+ configMapCache: make(map[string]sets.Set[string]),
+ }
+}
+
+func (m *multiClusterManagerImpl) UpdateTrackedIPTargets(ctx context.Context, updateRequested bool, endpoints []backend.PodEndpoint, tgb *elbv2api.TargetGroupBinding) error {
+ endpointStringFn := func() []string {
+ endpointStrings := make([]string, 0, len(endpoints))
+
+ for _, ep := range endpoints {
+ endpointStrings = append(endpointStrings, ep.GetIdentifier(false))
+ }
+ return endpointStrings
+ }
+
+ return m.updateTrackedTargets(ctx, updateRequested, endpointStringFn, tgb)
+}
+
+func (m *multiClusterManagerImpl) UpdateTrackedInstanceTargets(ctx context.Context, updateRequested bool, endpoints []backend.NodePortEndpoint, tgb *elbv2api.TargetGroupBinding) error {
+ endpointStringFn := func() []string {
+ endpointStrings := make([]string, 0, len(endpoints))
+
+ for _, ep := range endpoints {
+ endpointStrings = append(endpointStrings, ep.GetIdentifier(false))
+ }
+ return endpointStrings
+ }
+
+ return m.updateTrackedTargets(ctx, updateRequested, endpointStringFn, tgb)
+}
+
+func (m *multiClusterManagerImpl) updateTrackedTargets(ctx context.Context, updateRequested bool, endpointStringFn func() []string, tgb *elbv2api.TargetGroupBinding) error {
+ if !tgb.Spec.MultiClusterTargetGroup {
+ return nil
+ }
+
+ // Initial case, we want to create the config map when it doesn't exist.
+ if !updateRequested {
+ cachedData := m.retrieveConfigMapFromCache(tgb)
+ if cachedData != nil {
+ return nil
+ }
+ }
+
+ endpoints := endpointStringFn()
+ persistedEndpoints := make(sets.Set[string])
+
+ for _, ep := range endpoints {
+ persistedEndpoints.Insert(ep)
+ }
+
+ return m.persistConfigMap(ctx, persistedEndpoints, tgb)
+}
+
+func (m *multiClusterManagerImpl) FilterTargetsForDeregistration(ctx context.Context, tgb *elbv2api.TargetGroupBinding, targets []TargetInfo) ([]TargetInfo, bool, error) {
+ if !tgb.Spec.MultiClusterTargetGroup {
+ return targets, false, nil
+ }
+
+ persistedEndpoints, err := m.getConfigMapContents(ctx, tgb)
+
+ if err != nil {
+ return nil, false, err
+ }
+
+ if persistedEndpoints == nil {
+ // Initial state after enabling MC or new TGB, we don't have enough data to accurately deregister targets here.
+ m.logger.Info(fmt.Sprintf("Initial data population for multicluster target group. No deregister will occur on this reconcile run for tg: %s", tgb.Spec.TargetGroupARN))
+ return []TargetInfo{}, true, nil
+ }
+
+ filteredTargets := make([]TargetInfo, 0)
+
+ // Loop through the purposed target lists, removing any targets that we have not stored in the config map.
+ for _, target := range targets {
+ if _, ok := persistedEndpoints[target.GetIdentifier()]; ok {
+ filteredTargets = append(filteredTargets, target)
+ }
+ }
+
+ return filteredTargets, false, nil
+}
+
+func (m *multiClusterManagerImpl) CleanUp(ctx context.Context, tgb *elbv2api.TargetGroupBinding) error {
+ // Technically we should try this clean up anyway to clean up configmaps that would exist from
+ // flipping between shared / not shared. However, it's a pretty big change for users not using multicluster support
+ // to start having these delete cm calls. The concern is around bricking clusters where users do not use MC and forget
+ // to include the new controller permissions for configmaps.
+ // TL;DR We'll document not to flip between shared / not shared.
+
+ // Always delete from in memory cache, as it's basically "free" to do so.
+ m.configMapCacheMutex.Lock()
+ delete(m.configMapCache, getCacheKey(tgb))
+ m.configMapCacheMutex.Unlock()
+
+ // If not using multicluster support currently, just bail here.
+ if !tgb.Spec.MultiClusterTargetGroup {
+ return nil
+ }
+
+ cm := &corev1.ConfigMap{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: tgb.Namespace,
+ Name: getConfigMapName(tgb),
+ },
+ }
+ err := m.kubeClient.Delete(ctx, cm)
+ if err == nil {
+ return nil
+ }
+ return client.IgnoreNotFound(err)
+}
+
+func (m *multiClusterManagerImpl) getConfigMapContents(ctx context.Context, tgb *elbv2api.TargetGroupBinding) (sets.Set[string], error) {
+
+ // First load from cache.
+ cachedData := m.retrieveConfigMapFromCache(tgb)
+ if cachedData != nil {
+ return cachedData, nil
+ }
+
+ // If not available from in-memory cache, acquire write lock, look up data from kube api, store into cache.
+ cm := &corev1.ConfigMap{}
+
+ err := m.apiReader.Get(ctx, client.ObjectKey{
+ Namespace: tgb.Namespace,
+ Name: getConfigMapName(tgb),
+ }, cm)
+
+ if err == nil {
+ targetSet := algorithm.CSVToStringSet(cm.Data[targetsKey])
+ m.updateCache(tgb, targetSet)
+ return targetSet, nil
+ }
+
+ // Detect not found error, if so first time running so need to populate the config map contents.
+ err = client.IgnoreNotFound(err)
+ if err != nil {
+ return nil, err
+ }
+ return nil, nil
+}
+
+func (m *multiClusterManagerImpl) retrieveConfigMapFromCache(tgb *elbv2api.TargetGroupBinding) sets.Set[string] {
+ m.configMapCacheMutex.RLock()
+ defer m.configMapCacheMutex.RUnlock()
+
+ cacheKey := getCacheKey(tgb)
+
+ if v, ok := m.configMapCache[cacheKey]; ok {
+ return v
+ }
+ return nil
+}
+
+func (m *multiClusterManagerImpl) persistConfigMap(ctx context.Context, endpointMap sets.Set[string], tgb *elbv2api.TargetGroupBinding) error {
+
+ targetData := algorithm.StringSetToCSV(endpointMap)
+
+ // Update the cm in kube api, to ensure things work across controller restarts.
+ cm := &corev1.ConfigMap{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: tgb.Namespace,
+ Name: getConfigMapName(tgb),
+ },
+ Data: map[string]string{
+ targetsKey: targetData,
+ },
+ }
+
+ err := m.kubeClient.Update(ctx, cm)
+
+ if err == nil {
+ m.updateCache(tgb, endpointMap)
+ return nil
+ }
+
+ // Check for initial case and create config map.
+ err = client.IgnoreNotFound(err)
+ if err == nil {
+ err = m.kubeClient.Create(ctx, cm)
+ if err == nil {
+ m.updateCache(tgb, endpointMap)
+ }
+ return err
+ }
+
+ return err
+}
+
+func (m *multiClusterManagerImpl) updateCache(tgb *elbv2api.TargetGroupBinding, endpointMap sets.Set[string]) {
+ m.configMapCacheMutex.Lock()
+ defer m.configMapCacheMutex.Unlock()
+ cacheKey := getCacheKey(tgb)
+ m.configMapCache[cacheKey] = endpointMap
+}
+
+// getCacheKey generates a key to use with the k8s api
+func getCacheKey(tgb *elbv2api.TargetGroupBinding) string {
+ return fmt.Sprintf("%s-%s", tgb.Namespace, tgb.Name)
+}
+
+// getConfigMapName generates a config map name to use with the k8s api.
+func getConfigMapName(tgb *elbv2api.TargetGroupBinding) string {
+ return fmt.Sprintf("%s%s", trackedTargetsPrefix, tgb.Name)
+}
diff --git a/pkg/targetgroupbinding/multicluster_manager_test.go b/pkg/targetgroupbinding/multicluster_manager_test.go
new file mode 100644
index 0000000000..4c77c002e7
--- /dev/null
+++ b/pkg/targetgroupbinding/multicluster_manager_test.go
@@ -0,0 +1,692 @@
+package targetgroupbinding
+
+import (
+ "context"
+ "fmt"
+ awssdk "github.com/aws/aws-sdk-go-v2/aws"
+ elbv2types "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2/types"
+ "github.com/go-logr/logr"
+ "github.com/stretchr/testify/assert"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/util/sets"
+ elbv2api "sigs.k8s.io/aws-load-balancer-controller/apis/elbv2/v1beta1"
+ "sigs.k8s.io/aws-load-balancer-controller/pkg/algorithm"
+ "sigs.k8s.io/aws-load-balancer-controller/pkg/backend"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ testclient "sigs.k8s.io/controller-runtime/pkg/client/fake"
+ "sigs.k8s.io/controller-runtime/pkg/log"
+ "testing"
+)
+
+const (
+ testNamespace = "test-ns"
+ testTGBName = "test-tgb"
+)
+
+func TestUpdateTrackedTargets(t *testing.T) {
+ testCases := []struct {
+ name string
+ updateRequested bool
+ endpoints []string
+ expectedCache sets.Set[string]
+ cachedValue sets.Set[string]
+ multiTg bool
+ validateCM bool
+ }{
+ {
+ name: "update requested tgb not shared",
+ updateRequested: true,
+ cachedValue: nil,
+ expectedCache: nil,
+ endpoints: []string{},
+ },
+ {
+ name: "update not requested tgb not shared",
+ updateRequested: false,
+ cachedValue: nil,
+ expectedCache: nil,
+ endpoints: []string{},
+ },
+ {
+ name: "update not requested tgb shared should still backfill",
+ updateRequested: false,
+ cachedValue: nil,
+ expectedCache: sets.Set[string]{},
+ endpoints: []string{},
+ multiTg: true,
+ validateCM: true,
+ },
+ {
+ name: "update not requested tgb shared should not backfill as the cache has a value already",
+ updateRequested: false,
+ cachedValue: sets.Set[string]{
+ "127.0.0.1:80": {},
+ },
+ expectedCache: sets.Set[string]{
+ "127.0.0.1:80": {},
+ },
+ endpoints: []string{},
+ multiTg: true,
+ },
+ {
+ name: "update requested tgb shared empty endpoints",
+ updateRequested: true,
+ cachedValue: nil,
+ endpoints: []string{},
+ expectedCache: sets.Set[string]{},
+ multiTg: true,
+ validateCM: true,
+ },
+ {
+ name: "update not requested tgb shared should still backfill with endpoints",
+ updateRequested: false,
+ endpoints: []string{
+ "127.0.0.1:80",
+ "127.0.0.2:80",
+ "127.0.0.3:80",
+ "127.0.0.4:80",
+ "127.0.0.5:80",
+ },
+ cachedValue: nil,
+ expectedCache: sets.Set[string]{
+ "127.0.0.1:80": {},
+ "127.0.0.2:80": {},
+ "127.0.0.3:80": {},
+ "127.0.0.4:80": {},
+ "127.0.0.5:80": {},
+ },
+ multiTg: true,
+ validateCM: true,
+ },
+ {
+ name: "update requested tgb shared with endpoints",
+ updateRequested: true,
+ endpoints: []string{
+ "127.0.0.1:80",
+ "127.0.0.2:80",
+ "127.0.0.3:80",
+ "127.0.0.4:80",
+ "127.0.0.5:80",
+ },
+ cachedValue: nil,
+ expectedCache: sets.Set[string]{
+ "127.0.0.1:80": {},
+ "127.0.0.2:80": {},
+ "127.0.0.3:80": {},
+ "127.0.0.4:80": {},
+ "127.0.0.5:80": {},
+ },
+ multiTg: true,
+ validateCM: true,
+ },
+ {
+ name: "update requested tgb shared with endpoints. endpoints from different ports",
+ updateRequested: true,
+ endpoints: []string{
+ "127.0.0.1:80",
+ "127.0.0.2:80",
+ "127.0.0.3:80",
+ "127.0.0.4:80",
+ "127.0.0.5:80",
+ "127.0.0.1:85",
+ "127.0.0.2:85",
+ "127.0.0.3:85",
+ "127.0.0.4:85",
+ "127.0.0.5:85",
+ },
+ cachedValue: nil,
+ expectedCache: sets.Set[string]{
+ "127.0.0.1:80": {},
+ "127.0.0.2:80": {},
+ "127.0.0.3:80": {},
+ "127.0.0.4:80": {},
+ "127.0.0.5:80": {},
+ "127.0.0.1:85": {},
+ "127.0.0.2:85": {},
+ "127.0.0.3:85": {},
+ "127.0.0.4:85": {},
+ "127.0.0.5:85": {},
+ },
+ multiTg: true,
+ validateCM: true,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ k8sClient := testclient.NewClientBuilder().Build()
+ mc := NewMultiClusterManager(k8sClient, k8sClient, logr.New(&log.NullLogSink{})).(*multiClusterManagerImpl)
+
+ tgb := &elbv2api.TargetGroupBinding{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: testNamespace,
+ Name: testTGBName,
+ },
+ Spec: elbv2api.TargetGroupBindingSpec{
+ MultiClusterTargetGroup: tc.multiTg,
+ },
+ }
+
+ if tc.cachedValue != nil {
+ setCachedValue(mc, tc.cachedValue, testNamespace, testTGBName)
+ }
+
+ err := mc.updateTrackedTargets(context.Background(), tc.updateRequested, func() []string {
+ return tc.endpoints
+ }, tgb)
+ assert.Nil(t, err)
+ cachedValue := getCachedValue(mc, testNamespace, testTGBName)
+ assert.Equal(t, tc.expectedCache, cachedValue)
+ if tc.validateCM {
+ cm := &corev1.ConfigMap{}
+ k8sClient.Get(context.Background(), client.ObjectKey{
+ Namespace: tgb.Namespace,
+ Name: getConfigMapName(tgb),
+ }, cm)
+ assert.Equal(t, tc.expectedCache, algorithm.CSVToStringSet(cm.Data[targetsKey]))
+ }
+ })
+ }
+}
+
+func TestUpdateTrackedTargetsUpdateConfigMap(t *testing.T) {
+ k8sClient := testclient.NewClientBuilder().Build()
+ reader := testclient.NewClientBuilder().Build()
+ mc := NewMultiClusterManager(k8sClient, reader, logr.New(&log.NullLogSink{})).(*multiClusterManagerImpl)
+
+ tgb := &elbv2api.TargetGroupBinding{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: testNamespace,
+ Name: testTGBName,
+ },
+ Spec: elbv2api.TargetGroupBindingSpec{
+ MultiClusterTargetGroup: true,
+ },
+ }
+
+ cm := &corev1.ConfigMap{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: tgb.Namespace,
+ Name: getConfigMapName(tgb),
+ },
+ Data: map[string]string{
+ targetsKey: algorithm.StringSetToCSV(sets.Set[string]{}),
+ },
+ }
+ k8sClient.Create(context.Background(), cm)
+
+ endpoints := []string{"127.0.0.1:80"}
+ endpointsFn := func() []string {
+ return endpoints
+ }
+
+ err := mc.updateTrackedTargets(context.Background(), true, endpointsFn, tgb)
+ assert.Nil(t, err)
+ cachedValue := getCachedValue(mc, testNamespace, testTGBName)
+ assert.Equal(t, sets.Set[string]{
+ "127.0.0.1:80": {},
+ }, cachedValue)
+
+ cm = &corev1.ConfigMap{}
+ k8sGetError := k8sClient.Get(context.Background(), client.ObjectKey{
+ Namespace: tgb.Namespace,
+ Name: getConfigMapName(tgb),
+ }, cm)
+ assert.Nil(t, k8sGetError)
+ assert.Equal(t, sets.Set[string]{
+ "127.0.0.1:80": {},
+ }, algorithm.CSVToStringSet(cm.Data[targetsKey]))
+
+}
+
+func TestUpdateTrackedIPTargets(t *testing.T) {
+ k8sClient := testclient.NewClientBuilder().Build()
+ reader := testclient.NewClientBuilder().Build()
+ mc := NewMultiClusterManager(k8sClient, reader, logr.New(&log.NullLogSink{})).(*multiClusterManagerImpl)
+
+ tgb := &elbv2api.TargetGroupBinding{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: testNamespace,
+ Name: testTGBName,
+ },
+ Spec: elbv2api.TargetGroupBindingSpec{
+ MultiClusterTargetGroup: true,
+ },
+ }
+
+ endpoints := []backend.PodEndpoint{
+ {
+ IP: "127.0.0.1",
+ Port: 80,
+ },
+ {
+ IP: "127.0.0.2",
+ Port: 80,
+ },
+ {
+ IP: "127.0.0.3",
+ Port: 80,
+ },
+ }
+
+ err := mc.UpdateTrackedIPTargets(context.Background(), true, endpoints, tgb)
+ assert.Nil(t, err)
+ cachedValue := getCachedValue(mc, testNamespace, testTGBName)
+ assert.Equal(t, sets.Set[string]{
+ "127.0.0.1:80": {},
+ "127.0.0.2:80": {},
+ "127.0.0.3:80": {},
+ }, cachedValue)
+}
+
+func TestUpdateTrackedInstanceTargets(t *testing.T) {
+ k8sClient := testclient.NewClientBuilder().Build()
+ reader := testclient.NewClientBuilder().Build()
+ mc := NewMultiClusterManager(k8sClient, reader, logr.New(&log.NullLogSink{})).(*multiClusterManagerImpl)
+
+ tgb := &elbv2api.TargetGroupBinding{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: testNamespace,
+ Name: testTGBName,
+ },
+ Spec: elbv2api.TargetGroupBindingSpec{
+ MultiClusterTargetGroup: true,
+ },
+ }
+
+ endpoints := []backend.NodePortEndpoint{
+ {
+ InstanceID: "i-1234",
+ Port: 80,
+ },
+ {
+ InstanceID: "i-5678",
+ Port: 80,
+ },
+ {
+ InstanceID: "i-91011",
+ Port: 80,
+ },
+ }
+
+ err := mc.UpdateTrackedInstanceTargets(context.Background(), true, endpoints, tgb)
+ assert.Nil(t, err)
+ cachedValue := getCachedValue(mc, testNamespace, testTGBName)
+ assert.Equal(t, sets.Set[string]{
+ "i-1234:80": {},
+ "i-5678:80": {},
+ "i-91011:80": {},
+ }, cachedValue)
+}
+
+func TestFilterTargetsForDeregistration(t *testing.T) {
+
+ cachedTargets := sets.Set[string]{
+ "127.0.0.1:80": {},
+ "127.0.0.2:80": {},
+ "127.0.0.3:80": {},
+ "127.0.0.4:80": {},
+ "127.0.0.5:80": {},
+ }
+
+ testCases := []struct {
+ name string
+ multiTg bool
+ setData bool
+ targets []TargetInfo
+ expectedTargets []TargetInfo
+ cachedTargets sets.Set[string]
+ refreshNeeded bool
+ }{
+ {
+ name: "multicluster not enabled",
+ targets: []TargetInfo{
+ {
+ Target: elbv2types.TargetDescription{
+ Id: awssdk.String("127.0.0.100"),
+ Port: awssdk.Int32(80),
+ },
+ },
+ {
+ Target: elbv2types.TargetDescription{
+ Id: awssdk.String("127.0.0.101"),
+ Port: awssdk.Int32(80),
+ },
+ },
+ {
+ Target: elbv2types.TargetDescription{
+ Id: awssdk.String("127.0.0.102"),
+ Port: awssdk.Int32(80),
+ },
+ },
+ },
+ expectedTargets: []TargetInfo{
+ {
+ Target: elbv2types.TargetDescription{
+ Id: awssdk.String("127.0.0.100"),
+ Port: awssdk.Int32(80),
+ },
+ },
+ {
+ Target: elbv2types.TargetDescription{
+ Id: awssdk.String("127.0.0.101"),
+ Port: awssdk.Int32(80),
+ },
+ },
+ {
+ Target: elbv2types.TargetDescription{
+ Id: awssdk.String("127.0.0.102"),
+ Port: awssdk.Int32(80),
+ },
+ },
+ },
+ },
+ {
+ name: "multicluster enabled, need to refresh config map",
+ multiTg: true,
+ refreshNeeded: true,
+ targets: []TargetInfo{},
+ expectedTargets: []TargetInfo{},
+ },
+ {
+ name: "multicluster enabled, all targets filtered out",
+ setData: true,
+ multiTg: true,
+ targets: []TargetInfo{
+ {
+ Target: elbv2types.TargetDescription{
+ Id: awssdk.String("127.0.0.100"),
+ Port: awssdk.Int32(80),
+ },
+ },
+ {
+ Target: elbv2types.TargetDescription{
+ Id: awssdk.String("127.0.0.101"),
+ Port: awssdk.Int32(80),
+ },
+ },
+ {
+ Target: elbv2types.TargetDescription{
+ Id: awssdk.String("127.0.0.102"),
+ Port: awssdk.Int32(80),
+ },
+ },
+ },
+ expectedTargets: []TargetInfo{},
+ },
+ {
+ name: "multicluster enabled, some targets filtered out",
+ setData: true,
+ multiTg: true,
+ targets: []TargetInfo{
+ {
+ Target: elbv2types.TargetDescription{
+ Id: awssdk.String("127.0.0.100"),
+ Port: awssdk.Int32(80),
+ },
+ },
+ {
+ Target: elbv2types.TargetDescription{
+ Id: awssdk.String("127.0.0.101"),
+ Port: awssdk.Int32(80),
+ },
+ },
+ {
+ Target: elbv2types.TargetDescription{
+ Id: awssdk.String("127.0.0.102"),
+ Port: awssdk.Int32(80),
+ },
+ },
+ {
+ Target: elbv2types.TargetDescription{
+ Id: awssdk.String("127.0.0.1"),
+ Port: awssdk.Int32(80),
+ },
+ },
+ {
+ Target: elbv2types.TargetDescription{
+ Id: awssdk.String("127.0.0.2"),
+ Port: awssdk.Int32(80),
+ },
+ },
+ {
+ Target: elbv2types.TargetDescription{
+ Id: awssdk.String("127.0.0.3"),
+ Port: awssdk.Int32(80),
+ },
+ },
+ {
+ Target: elbv2types.TargetDescription{
+ Id: awssdk.String("127.0.0.4"),
+ Port: awssdk.Int32(80),
+ },
+ },
+ {
+ Target: elbv2types.TargetDescription{
+ Id: awssdk.String("127.0.0.5"),
+ Port: awssdk.Int32(80),
+ },
+ },
+ },
+ expectedTargets: []TargetInfo{
+ {
+ Target: elbv2types.TargetDescription{
+ Id: awssdk.String("127.0.0.1"),
+ Port: awssdk.Int32(80),
+ },
+ },
+ {
+ Target: elbv2types.TargetDescription{
+ Id: awssdk.String("127.0.0.2"),
+ Port: awssdk.Int32(80),
+ },
+ },
+ {
+ Target: elbv2types.TargetDescription{
+ Id: awssdk.String("127.0.0.3"),
+ Port: awssdk.Int32(80),
+ },
+ },
+ {
+ Target: elbv2types.TargetDescription{
+ Id: awssdk.String("127.0.0.4"),
+ Port: awssdk.Int32(80),
+ },
+ },
+ {
+ Target: elbv2types.TargetDescription{
+ Id: awssdk.String("127.0.0.5"),
+ Port: awssdk.Int32(80),
+ },
+ },
+ },
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ k8sClient := testclient.NewClientBuilder().Build()
+ reader := testclient.NewClientBuilder().Build()
+ mc := NewMultiClusterManager(k8sClient, reader, logr.New(&log.NullLogSink{})).(*multiClusterManagerImpl)
+ tgb := &elbv2api.TargetGroupBinding{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: testNamespace,
+ Name: testTGBName,
+ },
+ Spec: elbv2api.TargetGroupBindingSpec{
+ MultiClusterTargetGroup: tc.multiTg,
+ },
+ }
+
+ if tc.setData {
+ setCachedValue(mc, cachedTargets, testNamespace, testTGBName)
+ }
+
+ returnedTargets, refreshNeeded, err := mc.FilterTargetsForDeregistration(context.Background(), tgb, tc.targets)
+ assert.Nil(t, err)
+ assert.Equal(t, tc.expectedTargets, returnedTargets)
+ assert.Equal(t, tc.refreshNeeded, refreshNeeded)
+ })
+ }
+}
+
+func TestGetConfigMapContents(t *testing.T) {
+
+ inMemoryCache := sets.Set[string]{
+ "1": {},
+ "2": {},
+ "3": {},
+ }
+
+ cmData := sets.Set[string]{
+ "4": {},
+ "5": {},
+ "6": {},
+ }
+
+ testCases := []struct {
+ name string
+ setCache bool
+ expected sets.Set[string]
+ }{
+ {
+ name: "use cached value",
+ setCache: true,
+ expected: inMemoryCache,
+ },
+ {
+ name: "use cm value",
+ expected: cmData,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ k8sClient := testclient.NewClientBuilder().Build()
+ mc := NewMultiClusterManager(k8sClient, k8sClient, logr.New(&log.NullLogSink{})).(*multiClusterManagerImpl)
+ tgb := &elbv2api.TargetGroupBinding{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: testNamespace,
+ Name: testTGBName,
+ },
+ Spec: elbv2api.TargetGroupBindingSpec{
+ MultiClusterTargetGroup: true,
+ },
+ }
+
+ if tc.setCache {
+ setCachedValue(mc, inMemoryCache, testNamespace, testTGBName)
+ }
+
+ cm := &corev1.ConfigMap{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: tgb.Namespace,
+ Name: getConfigMapName(tgb),
+ },
+ Data: map[string]string{
+ targetsKey: algorithm.StringSetToCSV(cmData),
+ },
+ }
+ k8sClient.Create(context.Background(), cm)
+
+ res, err := mc.getConfigMapContents(context.Background(), tgb)
+ assert.Nil(t, err)
+ assert.Equal(t, tc.expected, res)
+
+ cachedValue := getCachedValue(mc, testNamespace, testTGBName)
+ assert.Equal(t, cachedValue, res)
+ })
+ }
+}
+
+func TestCleanUp(t *testing.T) {
+ testCases := []struct {
+ name string
+ multiTg bool
+ setData bool
+ }{
+ {
+ name: "multicluster not enabled",
+ },
+ {
+ name: "multicluster enabled, data not set",
+ multiTg: true,
+ },
+ {
+ name: "multicluster enabled",
+ multiTg: true,
+ setData: true,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ k8sClient := testclient.NewClientBuilder().Build()
+ reader := testclient.NewClientBuilder().Build()
+ mc := NewMultiClusterManager(k8sClient, reader, logr.New(&log.NullLogSink{})).(*multiClusterManagerImpl)
+ tgb := &elbv2api.TargetGroupBinding{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: testNamespace,
+ Name: testTGBName,
+ },
+ Spec: elbv2api.TargetGroupBindingSpec{
+ MultiClusterTargetGroup: tc.multiTg,
+ },
+ }
+
+ otherNs := "otherns"
+ otherName := "othername"
+
+ if tc.setData {
+ cachedValue := sets.Set[string]{
+ "foo": {},
+ }
+ setCachedValue(mc, cachedValue, testNamespace, testTGBName)
+
+ cm := &corev1.ConfigMap{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: tgb.Namespace,
+ Name: getConfigMapName(tgb),
+ },
+ Data: map[string]string{
+ targetsKey: algorithm.StringSetToCSV(cachedValue),
+ },
+ }
+ k8sClient.Create(context.Background(), cm)
+ }
+
+ otherCacheValue := sets.Set[string]{
+ "baz": {},
+ }
+ setCachedValue(mc, otherCacheValue, otherNs, otherName)
+
+ err := mc.CleanUp(context.Background(), tgb)
+ assert.Nil(t, err)
+ assert.Nil(t, getCachedValue(mc, testNamespace, testTGBName))
+ assert.Equal(t, otherCacheValue, getCachedValue(mc, otherNs, otherName))
+ cm := &corev1.ConfigMap{}
+ k8sGetError := k8sClient.Get(context.Background(), client.ObjectKey{
+ Namespace: tgb.Namespace,
+ Name: getConfigMapName(tgb),
+ }, cm)
+ assert.NotNil(t, k8sGetError)
+ assert.Nil(t, client.IgnoreNotFound(k8sGetError))
+ })
+ }
+}
+
+func getCachedValue(mc *multiClusterManagerImpl, namespace, name string) sets.Set[string] {
+ key := fmt.Sprintf("%s-%s", namespace, name)
+
+ if v, ok := mc.configMapCache[key]; !ok {
+ return nil
+ } else {
+ return v
+ }
+}
+
+func setCachedValue(mc *multiClusterManagerImpl, v sets.Set[string], namespace, name string) {
+ key := fmt.Sprintf("%s-%s", namespace, name)
+ mc.configMapCache[key] = v
+}
diff --git a/pkg/targetgroupbinding/resource_manager.go b/pkg/targetgroupbinding/resource_manager.go
index 9bef6a70f3..78a664f0af 100644
--- a/pkg/targetgroupbinding/resource_manager.go
+++ b/pkg/targetgroupbinding/resource_manager.go
@@ -37,7 +37,7 @@ type ResourceManager interface {
// NewDefaultResourceManager constructs new defaultResourceManager.
func NewDefaultResourceManager(k8sClient client.Client, elbv2Client services.ELBV2, ec2Client services.EC2,
podInfoRepo k8s.PodInfoRepo, sgManager networking.SecurityGroupManager, sgReconciler networking.SecurityGroupReconciler,
- vpcInfoProvider networking.VPCInfoProvider,
+ vpcInfoProvider networking.VPCInfoProvider, multiClusterManager MultiClusterManager,
vpcID string, clusterName string, failOpenEnabled bool, endpointSliceEnabled bool, disabledRestrictedSGRulesFlag bool,
endpointSGTags map[string]string,
eventRecorder record.EventRecorder, logger logr.Logger) *defaultResourceManager {
@@ -50,15 +50,16 @@ func NewDefaultResourceManager(k8sClient client.Client, elbv2Client services.ELB
networkingManager := NewDefaultNetworkingManager(k8sClient, podENIResolver, nodeENIResolver, sgManager, sgReconciler, vpcID, clusterName, endpointSGTags, logger, disabledRestrictedSGRulesFlag)
return &defaultResourceManager{
- k8sClient: k8sClient,
- targetsManager: targetsManager,
- endpointResolver: endpointResolver,
- networkingManager: networkingManager,
- eventRecorder: eventRecorder,
- logger: logger,
- vpcID: vpcID,
- vpcInfoProvider: vpcInfoProvider,
- podInfoRepo: podInfoRepo,
+ k8sClient: k8sClient,
+ targetsManager: targetsManager,
+ endpointResolver: endpointResolver,
+ networkingManager: networkingManager,
+ eventRecorder: eventRecorder,
+ logger: logger,
+ vpcID: vpcID,
+ vpcInfoProvider: vpcInfoProvider,
+ podInfoRepo: podInfoRepo,
+ multiClusterManager: multiClusterManager,
requeueDuration: defaultRequeueDuration,
}
@@ -68,15 +69,16 @@ var _ ResourceManager = &defaultResourceManager{}
// default implementation for ResourceManager.
type defaultResourceManager struct {
- k8sClient client.Client
- targetsManager TargetsManager
- endpointResolver backend.EndpointResolver
- networkingManager NetworkingManager
- eventRecorder record.EventRecorder
- logger logr.Logger
- vpcInfoProvider networking.VPCInfoProvider
- podInfoRepo k8s.PodInfoRepo
- vpcID string
+ k8sClient client.Client
+ targetsManager TargetsManager
+ endpointResolver backend.EndpointResolver
+ networkingManager NetworkingManager
+ eventRecorder record.EventRecorder
+ logger logr.Logger
+ vpcInfoProvider networking.VPCInfoProvider
+ podInfoRepo k8s.PodInfoRepo
+ multiClusterManager MultiClusterManager
+ vpcID string
requeueDuration time.Duration
}
@@ -112,12 +114,18 @@ func (m *defaultResourceManager) Cleanup(ctx context.Context, tgb *elbv2api.Targ
if err := m.cleanupTargets(ctx, tgb); err != nil {
return err
}
+
+ if err := m.multiClusterManager.CleanUp(ctx, tgb); err != nil {
+ return err
+ }
+
if err := m.networkingManager.Cleanup(ctx, tgb); err != nil {
return err
}
if err := m.updatePodAsHealthyForDeletedTGB(ctx, tgb); err != nil {
return err
}
+
return nil
}
@@ -163,6 +171,7 @@ func (m *defaultResourceManager) reconcileWithIPTargetType(ctx context.Context,
if err != nil {
return "", "", false, err
}
+
notDrainingTargets, _ := partitionTargetsByDrainingStatus(targets)
matchedEndpointAndTargets, unmatchedEndpoints, unmatchedTargets := matchPodEndpointWithTargets(endpoints, notDrainingTargets)
@@ -197,17 +206,40 @@ func (m *defaultResourceManager) reconcileWithIPTargetType(ctx context.Context,
}
}
+ updateTrackedTargets := false
if len(unmatchedTargets) > 0 {
- if err := m.deregisterTargets(ctx, tgARN, unmatchedTargets); err != nil {
+ updateTrackedTargets, err = m.deregisterTargets(ctx, tgb, tgARN, unmatchedTargets)
+ if err != nil {
return "", "", false, err
}
}
+
if len(unmatchedEndpoints) > 0 {
+ // In order to support multicluster tgb, we have to write the endpoint map _before_ calling register.
+ // By only writing the map when registerPodEndpoints() completes, we could leak targets when
+ // registerPodEndpoints() fails however the registration does happen. The specific example is:
+ // The ELB API succeeds in registering the targets, however the response isn't returned to us
+ // (perhaps the network dropped the response). If this happens and the pod is terminated before
+ // the next reconcile then we would leak the target as it would not exist in our endpoint map.
+
+ // We don't want to duplicate write calls, so if we are doing target registration and deregistration
+ // in the same reconcile loop, then we can de-dupe these tracking calls. As the tracked targets are used
+ // for deregistration, it's safe to update the map here as we have completed all deregister calls already.
+ updateTrackedTargets = false
+
+ if err := m.multiClusterManager.UpdateTrackedIPTargets(ctx, true, endpoints, tgb); err != nil {
+ return "", "", false, err
+ }
+
if err := m.registerPodEndpoints(ctx, tgARN, vpcID, unmatchedEndpoints); err != nil {
return "", "", false, err
}
}
+ if err := m.multiClusterManager.UpdateTrackedIPTargets(ctx, updateTrackedTargets, endpoints, tgb); err != nil {
+ return "", "", false, err
+ }
+
anyPodNeedFurtherProbe, err := m.updateTargetHealthPodCondition(ctx, targetHealthCondType, matchedEndpointAndTargets, unmatchedEndpoints)
if err != nil {
return "", "", false, err
@@ -268,7 +300,9 @@ func (m *defaultResourceManager) reconcileWithInstanceTargetType(ctx context.Con
if err != nil {
return "", "", false, err
}
+
notDrainingTargets, _ := partitionTargetsByDrainingStatus(targets)
+
_, unmatchedEndpoints, unmatchedTargets := matchNodePortEndpointWithTargets(endpoints, notDrainingTargets)
if err := m.networkingManager.ReconcileForNodePortEndpoints(ctx, tgb, endpoints); err != nil {
@@ -285,16 +319,30 @@ func (m *defaultResourceManager) reconcileWithInstanceTargetType(ctx context.Con
}
}
+ updateTrackedTargets := false
+
if len(unmatchedTargets) > 0 {
- if err := m.deregisterTargets(ctx, tgARN, unmatchedTargets); err != nil {
+ updateTrackedTargets, err = m.deregisterTargets(ctx, tgb, tgARN, unmatchedTargets)
+ if err != nil {
return "", "", false, err
}
}
+
if len(unmatchedEndpoints) > 0 {
+ updateTrackedTargets = false
+ if err := m.multiClusterManager.UpdateTrackedInstanceTargets(ctx, true, endpoints, tgb); err != nil {
+ return "", "", false, err
+ }
+
if err := m.registerNodePortEndpoints(ctx, tgARN, unmatchedEndpoints); err != nil {
return "", "", false, err
}
}
+
+ if err := m.multiClusterManager.UpdateTrackedInstanceTargets(ctx, updateTrackedTargets, endpoints, tgb); err != nil {
+ return "", "", false, err
+ }
+
tgbScopedLogger.Info("Successful reconcile", "checkpoint", newCheckPoint)
return newCheckPoint, oldCheckPoint, false, nil
}
@@ -309,7 +357,10 @@ func (m *defaultResourceManager) cleanupTargets(ctx context.Context, tgb *elbv2a
}
return err
}
- if err := m.deregisterTargets(ctx, tgb.Spec.TargetGroupARN, targets); err != nil {
+
+ _, err = m.deregisterTargets(ctx, tgb, tgb.Spec.TargetGroupARN, targets)
+
+ if err != nil {
if isELBV2TargetGroupNotFoundError(err) {
return nil
} else if isELBV2TargetGroupARNInvalidError(err) {
@@ -467,12 +518,21 @@ func (m *defaultResourceManager) updatePodAsHealthyForDeletedTGB(ctx context.Con
return nil
}
-func (m *defaultResourceManager) deregisterTargets(ctx context.Context, tgARN string, targets []TargetInfo) error {
+func (m *defaultResourceManager) deregisterTargets(ctx context.Context, tgb *elbv2api.TargetGroupBinding, tgARN string, targets []TargetInfo) (bool, error) {
+ filteredTargets, updateTrackedTargets, err := m.multiClusterManager.FilterTargetsForDeregistration(ctx, tgb, targets)
+ if err != nil {
+ return false, err
+ }
+
+ if len(filteredTargets) == 0 {
+ return updateTrackedTargets, nil
+ }
+
sdkTargets := make([]elbv2types.TargetDescription, 0, len(targets))
- for _, target := range targets {
+ for _, target := range filteredTargets {
sdkTargets = append(sdkTargets, target.Target)
}
- return m.targetsManager.DeregisterTargets(ctx, tgARN, sdkTargets)
+ return true, m.targetsManager.DeregisterTargets(ctx, tgARN, sdkTargets)
}
func (m *defaultResourceManager) registerPodEndpoints(ctx context.Context, tgARN, tgVpcID string, endpoints []backend.PodEndpoint) error {
diff --git a/pkg/targetgroupbinding/targets_manager_types.go b/pkg/targetgroupbinding/targets_manager_types.go
index 66d363988d..6b29dd88e0 100644
--- a/pkg/targetgroupbinding/targets_manager_types.go
+++ b/pkg/targetgroupbinding/targets_manager_types.go
@@ -16,6 +16,11 @@ type TargetInfo struct {
TargetHealth *elbv2types.TargetHealth
}
+// GetIdentifier this should match backend.Endpoint
+func (t *TargetInfo) GetIdentifier() string {
+ return fmt.Sprintf("%s:%d", *t.Target.Id, *t.Target.Port)
+}
+
// IsHealthy returns whether target is healthy.
func (t *TargetInfo) IsHealthy() bool {
if t.TargetHealth == nil {
diff --git a/pkg/targetgroupbinding/targets_manager_types_test.go b/pkg/targetgroupbinding/targets_manager_types_test.go
index dd20335dd5..08583630f4 100644
--- a/pkg/targetgroupbinding/targets_manager_types_test.go
+++ b/pkg/targetgroupbinding/targets_manager_types_test.go
@@ -4,6 +4,7 @@ import (
awssdk "github.com/aws/aws-sdk-go-v2/aws"
elbv2types "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2/types"
"github.com/stretchr/testify/assert"
+ "sigs.k8s.io/aws-load-balancer-controller/pkg/backend"
"testing"
)
@@ -255,3 +256,48 @@ func TestUniqueIDForTargetDescription(t *testing.T) {
})
}
}
+
+func TestGetIdentifier(t *testing.T) {
+ tests := []struct {
+ name string
+ endpoint backend.Endpoint
+ targetInfo TargetInfo
+ want string
+ }{
+ {
+ name: "instance",
+ endpoint: backend.NodePortEndpoint{
+ InstanceID: "i-12345",
+ Port: 80,
+ },
+ targetInfo: TargetInfo{
+ Target: elbv2types.TargetDescription{
+ Id: awssdk.String("i-12345"),
+ Port: awssdk.Int32(80),
+ },
+ },
+ want: "i-12345:80",
+ },
+ {
+ name: "ip",
+ endpoint: backend.PodEndpoint{
+ IP: "127.0.0.1",
+ Port: 80,
+ },
+ targetInfo: TargetInfo{
+ Target: elbv2types.TargetDescription{
+ Id: awssdk.String("127.0.0.1"),
+ Port: awssdk.Int32(80),
+ },
+ },
+ want: "127.0.0.1:80",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ assert.Equal(t, tt.want, tt.targetInfo.GetIdentifier())
+ assert.Equal(t, tt.want, tt.endpoint.GetIdentifier(false))
+ })
+ }
+}
diff --git a/pkg/targetgroupbinding/utils.go b/pkg/targetgroupbinding/utils.go
index 05bd3746c0..9b9cddabe9 100644
--- a/pkg/targetgroupbinding/utils.go
+++ b/pkg/targetgroupbinding/utils.go
@@ -43,7 +43,7 @@ func calculateTGBReconcileCheckpoint[V backend.Endpoint](endpoints []V, tgb *elb
endpointStrings := make([]string, 0, len(endpoints))
for _, ep := range endpoints {
- endpointStrings = append(endpointStrings, ep.GetIdentifier())
+ endpointStrings = append(endpointStrings, ep.GetIdentifier(true))
}
slices.Sort(endpointStrings)
diff --git a/test/e2e/service/nlb_ip_target_test.go b/test/e2e/service/nlb_ip_target_test.go
index 540bb37fd0..e7a8d15455 100644
--- a/test/e2e/service/nlb_ip_target_test.go
+++ b/test/e2e/service/nlb_ip_target_test.go
@@ -2,6 +2,9 @@ package service
import (
"context"
+ awssdk "github.com/aws/aws-sdk-go-v2/aws"
+ elbv2types "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2/types"
+ "time"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -186,6 +189,104 @@ var _ = Describe("k8s service reconciled by the aws load balancer", func() {
})
Expect(err).ToNot(HaveOccurred())
})
+ By("Enabling Multi-cluster mode", func() {
+ // Enable multicluster mode
+ err := stack.UpdateServiceAnnotations(ctx, tf, map[string]string{
+ "service.beta.kubernetes.io/aws-load-balancer-multi-cluster-target-group": "true",
+ })
+ Expect(err).ToNot(HaveOccurred())
+
+ // Wait for the change to be applied.
+ time.Sleep(60 * time.Second)
+
+ // Register a new target that exists outside the cluster.
+ targetGroups, err := tf.TGManager.GetTargetGroupsForLoadBalancer(ctx, lbARN)
+
+ Expect(err).ToNot(HaveOccurred())
+
+ tgArn := *targetGroups[0].TargetGroupArn
+
+ Expect(targetGroups).To(HaveLen(1))
+ targets, err := tf.TGManager.GetCurrentTargets(ctx, tgArn)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(targets).ShouldNot(HaveLen(0))
+
+ err = tf.TGManager.RegisterTargets(ctx, tgArn, []elbv2types.TargetDescription{
+ {
+ Id: targets[0].Target.Id,
+ Port: awssdk.Int32(*targets[0].Target.Port + 1),
+ },
+ })
+
+ Expect(err).ToNot(HaveOccurred())
+
+ // Change the check point annotation to trigger a reconcile.
+ err = stack.UpdateServiceAnnotations(ctx, tf, map[string]string{
+ "elbv2.k8s.aws/checkpoint": "baz",
+ })
+
+ Expect(err).ToNot(HaveOccurred())
+
+ // Wait for the change to be applied.
+ time.Sleep(120 * time.Second)
+
+ // We should the targets registered from in cluster and the extra IP registered under a different port.
+ err = verifyAWSLoadBalancerResources(ctx, tf, lbARN, LoadBalancerExpectation{
+ Type: "network",
+ Scheme: "internet-facing",
+ TargetType: "ip",
+ Listeners: map[string]string{
+ "80": "TCP",
+ },
+ TargetGroups: map[string]string{
+ "80": "TCP",
+ },
+ NumTargets: int(numReplicas) + 1,
+ TargetGroupHC: &TargetGroupHC{
+ Protocol: "HTTP",
+ Port: "80",
+ Path: "/healthz",
+ Interval: 30,
+ Timeout: 6,
+ HealthyThreshold: 2,
+ UnhealthyThreshold: 2,
+ },
+ })
+ Expect(err).ToNot(HaveOccurred())
+
+ // Disable multicluster mode
+ err = stack.UpdateServiceAnnotations(ctx, tf, map[string]string{
+ "service.beta.kubernetes.io/aws-load-balancer-multi-cluster-target-group": "false",
+ })
+ Expect(err).ToNot(HaveOccurred())
+
+ // Wait for the change to be applied.
+ time.Sleep(120 * time.Second)
+
+ // Only the replicas in the cluster should exist in the target group again.
+ err = verifyAWSLoadBalancerResources(ctx, tf, lbARN, LoadBalancerExpectation{
+ Type: "network",
+ Scheme: "internet-facing",
+ TargetType: "ip",
+ Listeners: map[string]string{
+ "80": "TCP",
+ },
+ TargetGroups: map[string]string{
+ "80": "TCP",
+ },
+ NumTargets: int(numReplicas),
+ TargetGroupHC: &TargetGroupHC{
+ Protocol: "HTTP",
+ Port: "80",
+ Path: "/healthz",
+ Interval: 30,
+ Timeout: 6,
+ HealthyThreshold: 2,
+ UnhealthyThreshold: 2,
+ },
+ })
+ Expect(err).ToNot(HaveOccurred())
+ })
})
})
diff --git a/test/framework/resources/aws/target_group.go b/test/framework/resources/aws/target_group.go
index d766aea023..4c11b5e252 100644
--- a/test/framework/resources/aws/target_group.go
+++ b/test/framework/resources/aws/target_group.go
@@ -15,6 +15,8 @@ type TargetGroupManager interface {
CheckTargetGroupHealthy(ctx context.Context, tgARN string, expectedTargetCount int) (bool, error)
GetCurrentTargetCount(ctx context.Context, tgARN string) (int, error)
GetTargetGroupAttributes(ctx context.Context, tgARN string) ([]elbv2types.TargetGroupAttribute, error)
+ GetCurrentTargets(ctx context.Context, tgARN string) ([]elbv2types.TargetHealthDescription, error)
+ RegisterTargets(ctx context.Context, tgARN string, targets []elbv2types.TargetDescription) error
}
// NewDefaultTargetGroupManager constructs new defaultTargetGroupManager.
@@ -44,16 +46,24 @@ func (m *defaultTargetGroupManager) GetTargetGroupsForLoadBalancer(ctx context.C
return targetGroups.TargetGroups, nil
}
-// GetCurrentTargetCount returns the count of all the targets in the target group that are currently in initial, healthy or unhealthy state
-func (m *defaultTargetGroupManager) GetCurrentTargetCount(ctx context.Context, tgARN string) (int, error) {
+func (m *defaultTargetGroupManager) GetCurrentTargets(ctx context.Context, tgARN string) ([]elbv2types.TargetHealthDescription, error) {
resp, err := m.elbv2Client.DescribeTargetHealthWithContext(ctx, &elbv2sdk.DescribeTargetHealthInput{
TargetGroupArn: awssdk.String(tgARN),
})
+ if err != nil {
+ return nil, err
+ }
+ return resp.TargetHealthDescriptions, nil
+}
+
+// GetCurrentTargetCount returns the count of all the targets in the target group that are currently in initial, healthy or unhealthy state
+func (m *defaultTargetGroupManager) GetCurrentTargetCount(ctx context.Context, tgARN string) (int, error) {
+ targets, err := m.GetCurrentTargets(ctx, tgARN)
if err != nil {
return 0, err
}
count := 0
- for _, thd := range resp.TargetHealthDescriptions {
+ for _, thd := range targets {
state := string(thd.TargetHealth.State)
if elbv2types.TargetHealthStateEnum(state) == elbv2types.TargetHealthStateEnumHealthy || elbv2types.TargetHealthStateEnum(state) == elbv2types.TargetHealthStateEnumInitial ||
elbv2types.TargetHealthStateEnum(state) == elbv2types.TargetHealthStateEnumUnhealthy {
@@ -92,3 +102,13 @@ func (m *defaultTargetGroupManager) CheckTargetGroupHealthy(ctx context.Context,
}
return true, nil
}
+
+// RegisterTargets register targets to the target group.
+func (m *defaultTargetGroupManager) RegisterTargets(ctx context.Context, tgARN string, targets []elbv2types.TargetDescription) error {
+ _, err := m.elbv2Client.RegisterTargetsWithContext(ctx, &elbv2sdk.RegisterTargetsInput{
+ TargetGroupArn: awssdk.String(tgARN),
+ Targets: targets,
+ })
+
+ return err
+}