diff --git a/charts/aws-vpc-cni/templates/clusterrole.yaml b/charts/aws-vpc-cni/templates/clusterrole.yaml index 24b91556fb9..d2e9fd2d915 100644 --- a/charts/aws-vpc-cni/templates/clusterrole.yaml +++ b/charts/aws-vpc-cni/templates/clusterrole.yaml @@ -33,3 +33,8 @@ rules: resources: - events verbs: ["create", "patch", "list"] + - apiGroups: + - vpcresources.k8s.aws + resources: + - cninodes + verbs: ["get", "list", "patch"] diff --git a/config/master/aws-k8s-cni-cn.yaml b/config/master/aws-k8s-cni-cn.yaml index b4d16151e67..869ca457e70 100644 --- a/config/master/aws-k8s-cni-cn.yaml +++ b/config/master/aws-k8s-cni-cn.yaml @@ -66,6 +66,11 @@ rules: resources: - events verbs: ["create", "patch", "list"] + - apiGroups: + - vpcresources.k8s.aws + resources: + - cninodes + verbs: ["get", "list", "patch"] --- # Source: aws-vpc-cni/templates/clusterrolebinding.yaml apiVersion: rbac.authorization.k8s.io/v1 diff --git a/config/master/aws-k8s-cni-us-gov-east-1.yaml b/config/master/aws-k8s-cni-us-gov-east-1.yaml index b82cdbbd213..28d1ddd4ca0 100644 --- a/config/master/aws-k8s-cni-us-gov-east-1.yaml +++ b/config/master/aws-k8s-cni-us-gov-east-1.yaml @@ -66,6 +66,11 @@ rules: resources: - events verbs: ["create", "patch", "list"] + - apiGroups: + - vpcresources.k8s.aws + resources: + - cninodes + verbs: ["get", "list", "patch"] --- # Source: aws-vpc-cni/templates/clusterrolebinding.yaml apiVersion: rbac.authorization.k8s.io/v1 diff --git a/config/master/aws-k8s-cni-us-gov-west-1.yaml b/config/master/aws-k8s-cni-us-gov-west-1.yaml index 11903040293..36ad982c642 100644 --- a/config/master/aws-k8s-cni-us-gov-west-1.yaml +++ b/config/master/aws-k8s-cni-us-gov-west-1.yaml @@ -66,6 +66,11 @@ rules: resources: - events verbs: ["create", "patch", "list"] + - apiGroups: + - vpcresources.k8s.aws + resources: + - cninodes + verbs: ["get", "list", "patch"] --- # Source: aws-vpc-cni/templates/clusterrolebinding.yaml apiVersion: rbac.authorization.k8s.io/v1 diff --git a/config/master/aws-k8s-cni.yaml b/config/master/aws-k8s-cni.yaml index 5f52b1b8d9f..bbcace5f4ed 100644 --- a/config/master/aws-k8s-cni.yaml +++ b/config/master/aws-k8s-cni.yaml @@ -61,11 +61,16 @@ rules: - apiGroups: [""] resources: - nodes - verbs: ["list", "watch", "get", "update"] + verbs: ["list", "watch", "get"] - apiGroups: ["", "events.k8s.io"] resources: - events verbs: ["create", "patch", "list"] + - apiGroups: + - vpcresources.k8s.aws + resources: + - cninodes + verbs: ["get", "list", "patch"] --- # Source: aws-vpc-cni/templates/clusterrolebinding.yaml apiVersion: rbac.authorization.k8s.io/v1 diff --git a/go.mod b/go.mod index 9d03a5740e8..cf8f3cc9b6f 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.20 require ( github.com/apparentlymart/go-cidr v1.0.1 github.com/aws/amazon-vpc-cni-k8s/test/agent v0.0.0-20230614172555-5788cde362fa - github.com/aws/amazon-vpc-resource-controller-k8s v1.1.5 + github.com/aws/amazon-vpc-resource-controller-k8s v1.1.8 github.com/aws/aws-sdk-go v1.43.29 github.com/containernetworking/cni v1.1.1 github.com/containernetworking/plugins v1.1.1 @@ -123,6 +123,7 @@ require ( github.com/rubenv/sql-migrate v1.2.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/safchain/ethtool v0.0.0-20210803160452-9aa261dae9b1 // indirect + github.com/samber/lo v1.38.1 github.com/shopspring/decimal v1.2.0 // indirect github.com/spf13/cast v1.4.1 // indirect github.com/spf13/cobra v1.6.1 // indirect @@ -135,13 +136,14 @@ require ( go.uber.org/atomic v1.7.0 // indirect go.uber.org/multierr v1.6.0 // indirect golang.org/x/crypto v0.5.0 // indirect + golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 // indirect golang.org/x/oauth2 v0.4.0 // indirect golang.org/x/sync v0.1.0 // indirect golang.org/x/term v0.6.0 // indirect golang.org/x/text v0.8.0 // indirect golang.org/x/time v0.3.0 // indirect golang.org/x/tools v0.6.0 // indirect - gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect + gomodules.xyz/jsonpatch/v2 v2.3.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21 // indirect gopkg.in/inf.v0 v0.9.1 // indirect diff --git a/go.sum b/go.sum index 4b49a5dd762..134546d4b96 100644 --- a/go.sum +++ b/go.sum @@ -77,8 +77,8 @@ github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 h1:4daAzAu0 github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= github.com/aws/amazon-vpc-cni-k8s/test/agent v0.0.0-20230614172555-5788cde362fa h1:92EaVRXJVzV5mu0D9W4MTAdulShMZ7rFZIFVVO8u2XA= github.com/aws/amazon-vpc-cni-k8s/test/agent v0.0.0-20230614172555-5788cde362fa/go.mod h1:nPrcILbew8ccLKKYwzyHJ8uZvE0C7ubmDmV6M4fYnp0= -github.com/aws/amazon-vpc-resource-controller-k8s v1.1.5 h1:tyZBu4++W5SvSIxlVIh51cT84weqq0H+CmrsHtIgeys= -github.com/aws/amazon-vpc-resource-controller-k8s v1.1.5/go.mod h1:W2c9R0NFy+HkO50f/ZHge0Be6QZpol6B+SegWwOCk3c= +github.com/aws/amazon-vpc-resource-controller-k8s v1.1.8 h1:1YPnp2ltCTJVYeiPhNmQJaefIOVLGPtZJqG/NwyKVyE= +github.com/aws/amazon-vpc-resource-controller-k8s v1.1.8/go.mod h1:HwoojB30a88mOfShN5qkciEIdyevfs5Y/I28ru9m1s8= github.com/aws/aws-sdk-go v1.43.29 h1:P6tBpMLwVLS/QwPkaBxfDIF3SmPouoacIk+/7NKnDxY= github.com/aws/aws-sdk-go v1.43.29/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= @@ -171,7 +171,6 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.m github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= @@ -543,6 +542,8 @@ github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= github.com/safchain/ethtool v0.0.0-20210803160452-9aa261dae9b1 h1:ZFfeKAhIQiiOrQaI3/znw0gOmYpO28Tcu1YaqMa/jtQ= github.com/safchain/ethtool v0.0.0-20210803160452-9aa261dae9b1/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= +github.com/samber/lo v1.38.1 h1:j2XEAqXKb09Am4ebOg31SpvzUTTs6EN3VfgeLUhPdXM= +github.com/samber/lo v1.38.1/go.mod h1:+m/ZKRl6ClXCE2Lgf3MsQlWfh4bn1bz6CXEOxnEXnEA= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= @@ -647,6 +648,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 h1:k/i9J1pBpvlfR+9QsetwPyERsqu1GIbi967PQMq3Ivc= +golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -897,8 +900,8 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY= -gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY= +gomodules.xyz/jsonpatch/v2 v2.3.0 h1:8NFhfS6gzxNqjLIYnZxg319wZ5Qjnx4m/CcX+Klzazc= +gomodules.xyz/jsonpatch/v2 v2.3.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= diff --git a/pkg/ipamd/ipamd.go b/pkg/ipamd/ipamd.go index ed16d9ae944..c5c35e51d83 100644 --- a/pkg/ipamd/ipamd.go +++ b/pkg/ipamd/ipamd.go @@ -31,6 +31,7 @@ import ( "github.com/aws/aws-sdk-go/service/ec2" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" + "github.com/samber/lo" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" @@ -44,6 +45,7 @@ import ( "github.com/aws/amazon-vpc-cni-k8s/pkg/networkutils" "github.com/aws/amazon-vpc-cni-k8s/pkg/utils/eventrecorder" "github.com/aws/amazon-vpc-cni-k8s/pkg/utils/logger" + rcv1alpha1 "github.com/aws/amazon-vpc-resource-controller-k8s/apis/vpcresources/v1alpha1" ) // The package ipamd is a long running daemon which manages a warm pool of available IP addresses. @@ -135,12 +137,6 @@ const ( // envNodeName will be used to store Node name envNodeName = "MY_NODE_NAME" - // vpcENIConfigLabel is used by the VPC resource controller to pick the right ENI config. - vpcENIConfigLabel = "vpc.amazonaws.com/eniConfig" - - // trunkInterfaceLabel is used by CNI to check if correct label was added by the VPC resource controller - trunkInterfaceLabel = "vpc.amazonaws.com/has-trunk-attached" - //envEnableIpv4PrefixDelegation is used to allocate /28 prefix instead of secondary IP for an ENI. envEnableIpv4PrefixDelegation = "ENABLE_PREFIX_DELEGATION" @@ -575,34 +571,19 @@ func (c *IPAMContext) nodeInit() error { eniConfigName, err := eniconfig.GetNodeSpecificENIConfigName(node) if err == nil && c.useCustomNetworking && eniConfigName != "default" { - // Signal to VPC Resource Controller that the node is using custom networking - err := c.SetNodeLabel(ctx, vpcENIConfigLabel, eniConfigName) + // Add the feature name to CNINode of this node + err := c.AddFeatureToCNINode(ctx, rcv1alpha1.CustomNetworking, eniConfigName) if err != nil { - log.Errorf("Failed to set eniConfig node label", err) - podENIErrInc("nodeInit") - return err - } - } else { - // Remove the custom networking label - err := c.SetNodeLabel(ctx, vpcENIConfigLabel, "") - if err != nil { - log.Errorf("Failed to delete eniConfig node label", err) + log.Errorf("Failed to add feature custom networking into CNINode", err) podENIErrInc("nodeInit") return err } + log.Infof("Enabled feature %s in CNINode for node %s if not existing", rcv1alpha1.CustomNetworking, c.myNodeName) } - if metadataResult.TrunkENI != "" { - // Signal to VPC Resource Controller that the node has a trunk already - err := c.SetNodeLabel(ctx, trunkInterfaceLabel, "true") - if err != nil { - log.Errorf("Failed to set node label", err) - podENIErrInc("nodeInit") - // If this fails, we probably can't talk to the API server. Let the pod restart - return err - } - } else { - // Check if we want to label node for trunk interface during node init + if c.enablePodENI { + // Check if we want to add feature into CNINode for trunk interface during node init + // we don't check if the node already has trunk added during initialization c.askForTrunkENIIfNeeded(ctx) } @@ -701,7 +682,9 @@ func (c *IPAMContext) StartNodeIPPoolManager() { } func (c *IPAMContext) updateIPPoolIfRequired(ctx context.Context) { - c.askForTrunkENIIfNeeded(ctx) + if c.enablePodENI && c.dataStore.GetTrunkENI() == "" { + c.askForTrunkENIIfNeeded(ctx) + } if c.isDatastorePoolTooLow() { c.increaseDatastorePool(ctx) @@ -1283,27 +1266,18 @@ func (c *IPAMContext) logPoolStats(dataStoreStats *datastore.DataStoreStats) { } func (c *IPAMContext) askForTrunkENIIfNeeded(ctx context.Context) { - if c.enablePodENI && c.dataStore.GetTrunkENI() == "" { - // Check that there is room for a trunk ENI to be attached: - if c.dataStore.GetENIs() >= (c.maxENI - c.unmanagedENI) { - log.Debug("No slot available for a trunk ENI to be attached. Not labeling the node") - return - } - // We need to signal that VPC Resource Controller needs to attach a trunk ENI - err := c.SetNodeLabel(ctx, trunkInterfaceLabel, "false") - if err != nil { - podENIErrInc("askForTrunkENIIfNeeded") - log.Errorf("Failed to set node label", err) - } + // Check that there is room for a trunk ENI to be attached: + if c.dataStore.GetENIs() >= (c.maxENI - c.unmanagedENI) { + log.Error("No slot available for a trunk ENI to be attached. Not init the node") + return } -} - -func (c *IPAMContext) getNodeLabels(ctx context.Context) (map[string]string, error) { - var node corev1.Node - if err := c.cachedK8SClient.Get(ctx, types.NamespacedName{Name: c.myNodeName}, &node); err != nil { - return nil, err + // We need to signal that VPC Resource Controller needs to attach a trunk ENI + err := c.AddFeatureToCNINode(ctx, rcv1alpha1.SecurityGroupsForPods, "") + if err != nil { + podENIErrInc("askForTrunkENIIfNeeded") + log.Errorf("Failed to add SGP feature into the CNINode", err) } else { - return node.GetLabels(), err + log.Infof("Successfully added feature %s to CNINode if not existing", rcv1alpha1.SecurityGroupsForPods) } } @@ -1417,13 +1391,6 @@ func (c *IPAMContext) nodeIPPoolReconcile(ctx context.Context, interval time.Dur if c.enablePodENI && metadataResult.TrunkENI != "" { log.Debugf("Trunk interface (%s) has been added to the node already.", metadataResult.TrunkENI) - // Label the node that we have a trunk - err = c.SetNodeLabel(ctx, trunkInterfaceLabel, "true") - if err != nil { - podENIErrInc("askForTrunkENIIfNeeded") - log.Errorf("Failed to set node label for trunk. Aborting reconcile", err) - return - } } // Update trunk ENI trunkENI = metadataResult.TrunkENI @@ -1994,54 +1961,6 @@ func (c *IPAMContext) getTrunkLinkIndex() (int, error) { return -1, errors.New("no trunk found") } -// SetNodeLabel sets or deletes a node label -func (c *IPAMContext) SetNodeLabel(ctx context.Context, key, value string) error { - request := types.NamespacedName{ - Name: c.myNodeName, - } - - err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { - node := &corev1.Node{} - // Find my node - err := c.cachedK8SClient.Get(ctx, request, node) - if err != nil { - log.Errorf("Failed to get node: %v", err) - return err - } - log.Debugf("Node found %q - no of labels - %d", node.Name, len(node.Labels)) - - if labelValue, ok := node.Labels[key]; ok && labelValue == value { - log.Debugf("Node label %q is already %q", key, labelValue) - return nil - } - - // Make deep copy for modification - updateNode := node.DeepCopy() - - // in case for some reason node object doesn't have label map - if updateNode.Labels == nil { - updateNode.Labels = make(map[string]string) - } - - // Set node label - if value != "" { - updateNode.Labels[key] = value - } else { - // Empty value, delete the label - log.Debugf("Deleting label %q", key) - delete(updateNode.Labels, key) - } - - if err = c.cachedK8SClient.Update(ctx, updateNode); err != nil { - log.Errorf("Failed to patch node %s with label %q: %q, error: %v", c.myNodeName, key, value, err) - return err - } - log.Debugf("Updated node %s with label %q: %q", c.myNodeName, key, value) - return nil - }) - return err -} - // GetPod returns the pod matching the name and namespace func (c *IPAMContext) GetPod(podName, namespace string) (*corev1.Pod, error) { ctx := context.TODO() @@ -2371,3 +2290,43 @@ func (c *IPAMContext) isConfigValid() bool { return true } + +func (c *IPAMContext) AddFeatureToCNINode(ctx context.Context, featureName rcv1alpha1.FeatureName, featureValue string) error { + cniNode := &rcv1alpha1.CNINode{} + + if err := c.rawK8SClient.Get(ctx, types.NamespacedName{Name: c.myNodeName}, cniNode); err != nil { + return err + } + + if lo.ContainsBy(cniNode.Spec.Features, func(addedFeature rcv1alpha1.Feature) bool { + return featureName == addedFeature.Name && featureValue == addedFeature.Value + }) { + return nil + } + + newCNINode := cniNode.DeepCopy() + newFeature := rcv1alpha1.Feature{ + Name: featureName, + Value: featureValue, + } + + if lo.ContainsBy(cniNode.Spec.Features, func(addedFeature rcv1alpha1.Feature) bool { + return featureName == addedFeature.Name + }) { + // this should happen when user updated eniConfig name + // aws-node restarted and CNINode need to be updated if node wasn't terminated + // we need groom old features here + newCNINode.Spec.Features = []rcv1alpha1.Feature{} + for _, oldFeature := range cniNode.Spec.Features { + if oldFeature.Name == featureName { + continue + } else { + newCNINode.Spec.Features = append(newCNINode.Spec.Features, oldFeature) + } + } + } + + newCNINode.Spec.Features = append(newCNINode.Spec.Features, newFeature) + + return c.rawK8SClient.Patch(ctx, newCNINode, client.MergeFromWithOptions(cniNode, client.MergeFromWithOptimisticLock{})) +} diff --git a/pkg/ipamd/ipamd_test.go b/pkg/ipamd/ipamd_test.go index 906f7a391b3..c8f6effe92f 100644 --- a/pkg/ipamd/ipamd_test.go +++ b/pkg/ipamd/ipamd_test.go @@ -27,6 +27,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" "github.com/golang/mock/gomock" + "github.com/samber/lo" "github.com/stretchr/testify/assert" "github.com/vishvananda/netlink" corev1 "k8s.io/api/core/v1" @@ -45,6 +46,7 @@ import ( mock_eniconfig "github.com/aws/amazon-vpc-cni-k8s/pkg/eniconfig/mocks" "github.com/aws/amazon-vpc-cni-k8s/pkg/ipamd/datastore" mock_networkutils "github.com/aws/amazon-vpc-cni-k8s/pkg/networkutils/mocks" + rcscheme "github.com/aws/amazon-vpc-resource-controller-k8s/apis/vpcresources/v1alpha1" ) const ( @@ -93,6 +95,7 @@ func setup(t *testing.T) *testMocks { k8sSchema := runtime.NewScheme() clientgoscheme.AddToScheme(k8sSchema) eniconfigscheme.AddToScheme(k8sSchema) + rcscheme.AddToScheme(k8sSchema) return &testMocks{ ctrl: ctrl, @@ -1945,171 +1948,71 @@ func TestIPAMContext_askForTrunkENIIfNeeded(t *testing.T) { ctx := context.Background() mockContext := &IPAMContext{ - rawK8SClient: m.rawK8SClient, - cachedK8SClient: m.cachedK8SClient, - dataStore: datastore.NewDataStore(log, datastore.NewTestCheckpoint(datastore.CheckpointData{Version: datastore.CheckpointFormatVersion}), false), - awsClient: m.awsutils, - networkClient: m.network, - primaryIP: make(map[string]string), - terminating: int32(0), - maxENI: 1, - myNodeName: myNodeName, + rawK8SClient: m.rawK8SClient, + dataStore: datastore.NewDataStore(log, datastore.NewTestCheckpoint(datastore.CheckpointData{Version: datastore.CheckpointFormatVersion}), false), + awsClient: m.awsutils, + networkClient: m.network, + primaryIP: make(map[string]string), + terminating: int32(0), + maxENI: 1, + myNodeName: myNodeName, } - labels := map[string]string{ - "testKey": "testValue", - } fakeNode := v1.Node{ TypeMeta: metav1.TypeMeta{Kind: "Node"}, - ObjectMeta: metav1.ObjectMeta{Name: myNodeName, Labels: labels}, + ObjectMeta: metav1.ObjectMeta{Name: myNodeName}, Spec: v1.NodeSpec{}, Status: v1.NodeStatus{}, } - _ = m.cachedK8SClient.Create(ctx, &fakeNode) + _ = m.rawK8SClient.Create(ctx, &fakeNode) + + fakeCNINode := rcscheme.CNINode{ + ObjectMeta: metav1.ObjectMeta{Name: fakeNode.Name}, + } + + err := m.rawK8SClient.Create(ctx, &fakeCNINode) + assert.NoError(t, err) _ = mockContext.dataStore.AddENI("eni-1", 1, true, false, false) // If ENABLE_POD_ENI is not set, nothing happens mockContext.askForTrunkENIIfNeeded(ctx) mockContext.enablePodENI = true - // Enabled, we should try to set the label if there is room mockContext.askForTrunkENIIfNeeded(ctx) var notUpdatedNode corev1.Node - var updatedNode corev1.Node NodeKey := types.NamespacedName{ Namespace: "", Name: myNodeName, } - err := m.cachedK8SClient.Get(ctx, NodeKey, ¬UpdatedNode) - // Since there was no room, no label should be added + err = m.rawK8SClient.Get(ctx, NodeKey, ¬UpdatedNode) assert.NoError(t, err) - _, has := notUpdatedNode.Labels["vpc.amazonaws.com/has-trunk-attached"] - assert.False(t, has, "the node shouldn't be labelled for trunk when there is no room") - assert.Equal(t, 1, len(notUpdatedNode.Labels)) + var cniNode rcscheme.CNINode + + err = mockContext.rawK8SClient.Get(ctx, types.NamespacedName{ + Name: fakeNode.Name, + }, &cniNode) + assert.NoError(t, err) + + contained := lo.ContainsBy(cniNode.Spec.Features, func(addedFeature rcscheme.Feature) bool { + return rcscheme.SecurityGroupsForPods == addedFeature.Name && addedFeature.Value == "" + }) + assert.False(t, contained, "the node's CNINode shouldn't be updated for trunk when there is no room") + assert.Equal(t, 0, len(cniNode.Spec.Features)) mockContext.maxENI = 4 // Now there is room! mockContext.askForTrunkENIIfNeeded(ctx) - // Fetch the updated node and verify that the label is set - err = m.cachedK8SClient.Get(ctx, NodeKey, &updatedNode) + err = mockContext.rawK8SClient.Get(ctx, types.NamespacedName{ + Name: fakeNode.Name, + }, &cniNode) assert.NoError(t, err) - assert.Equal(t, "false", updatedNode.Labels["vpc.amazonaws.com/has-trunk-attached"]) -} - -func TestGetNodeLabels(t *testing.T) { - os.Setenv("MY_NODE_NAME", myNodeName) - var m *testMocks - - eniConfigName := "testConfig" - tests := []struct { - node v1.Node - hasError bool - labelsLen int - hasLabel bool - msg string - }{ - { - node: v1.Node{ - TypeMeta: metav1.TypeMeta{Kind: "Node"}, - ObjectMeta: metav1.ObjectMeta{ - Name: myNodeName, - Labels: map[string]string{vpcENIConfigLabel: eniConfigName}, - }, - Spec: v1.NodeSpec{}, - Status: v1.NodeStatus{}, - }, - hasError: false, - hasLabel: true, - labelsLen: 1, - msg: "Having EniConfig Label on node", - }, - { - node: v1.Node{ - TypeMeta: metav1.TypeMeta{Kind: "Node"}, - ObjectMeta: metav1.ObjectMeta{ - Name: myNodeName, - Labels: map[string]string{"key_1": "value_1", "key_2": "value_2"}, - }, - Spec: v1.NodeSpec{}, - Status: v1.NodeStatus{}, - }, - hasError: false, - hasLabel: true, - labelsLen: 2, - msg: "Having irrelevant labels map", - }, - { - node: v1.Node{ - TypeMeta: metav1.TypeMeta{Kind: "Node"}, - ObjectMeta: metav1.ObjectMeta{ - Name: myNodeName, - Labels: make(map[string]string), - }, - Spec: v1.NodeSpec{}, - Status: v1.NodeStatus{}, - }, - hasError: false, - hasLabel: false, - msg: "Having empty map", - }, - { - node: v1.Node{ - TypeMeta: metav1.TypeMeta{Kind: "Node"}, - ObjectMeta: metav1.ObjectMeta{ - Name: myNodeName, - Labels: nil, - }, - Spec: v1.NodeSpec{}, - Status: v1.NodeStatus{}, - }, - hasError: false, - hasLabel: false, - msg: "Having nil map", - }, - { - node: v1.Node{ - TypeMeta: metav1.TypeMeta{Kind: "Node"}, - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Labels: map[string]string{vpcENIConfigLabel: eniConfigName}, - }, - Spec: v1.NodeSpec{}, - Status: v1.NodeStatus{}, - }, - hasError: true, - hasLabel: false, - labelsLen: 0, - msg: "Wrong node name", - }, - } - - for _, test := range tests { - m = setup(t) - ctx := context.Background() - k8sSchema := runtime.NewScheme() - clientgoscheme.AddToScheme(k8sSchema) - - mockContext := &IPAMContext{ - rawK8SClient: m.rawK8SClient, - cachedK8SClient: m.cachedK8SClient, - dataStore: datastore.NewDataStore(log, datastore.NewTestCheckpoint(datastore.CheckpointData{Version: datastore.CheckpointFormatVersion}), false), - awsClient: m.awsutils, - networkClient: m.network, - primaryIP: make(map[string]string), - terminating: int32(0), - myNodeName: myNodeName, - } - - _ = m.cachedK8SClient.Create(ctx, &test.node) - labels, err := mockContext.getNodeLabels(ctx) - assert.Equal(t, test.hasError, err != nil, test.msg) - assert.Equal(t, test.hasLabel, labels != nil, test.msg) - assert.Equal(t, test.labelsLen, len(labels), test.msg) - } - - m.ctrl.Finish() + contained = lo.ContainsBy(cniNode.Spec.Features, func(addedFeature rcscheme.Feature) bool { + return rcscheme.SecurityGroupsForPods == addedFeature.Name && addedFeature.Value == "" + }) + assert.True(t, contained, "the node's CNINode should be updated for trunk when there is some room") + assert.Equal(t, 1, len(cniNode.Spec.Features)) } func TestIsConfigValid(t *testing.T) { @@ -2321,3 +2224,126 @@ func TestAnnotatePod(t *testing.T) { assert.Error(t, err) assert.Equal(t, fmt.Errorf("error while trying to retrieve pod info: pods \"no-exist-name\" not found"), err) } + +func TestAddFeatureToCNINode(t *testing.T) { + m := setup(t) + defer m.ctrl.Finish() + ctx := context.Background() + + nodeName := "fake-node-name" + + key := types.NamespacedName{ + Name: nodeName, + } + + tests := []struct { + testFeatures []rcscheme.Feature + testFeatureLength int + sgp bool + customNet bool + msg string + }{ + { + testFeatures: []rcscheme.Feature{ + { + Name: rcscheme.SecurityGroupsForPods, + Value: "", + }, + }, + testFeatureLength: 1, + sgp: true, + customNet: false, + msg: "test adding one new feature to CNINode", + }, + { + testFeatures: []rcscheme.Feature{ + { + Name: rcscheme.SecurityGroupsForPods, + Value: "", + }, + { + Name: rcscheme.CustomNetworking, + Value: "default", + }, + }, + testFeatureLength: 2, + sgp: true, + customNet: true, + msg: "test adding two new feature to CNINode", + }, + { + testFeatures: []rcscheme.Feature{ + { + Name: rcscheme.SecurityGroupsForPods, + Value: "", + }, + { + Name: rcscheme.CustomNetworking, + Value: "default", + }, + }, + testFeatureLength: 2, + sgp: true, + customNet: true, + msg: "test adding duplicated features to CNINode", + }, + { + testFeatures: []rcscheme.Feature{ + { + Name: rcscheme.SecurityGroupsForPods, + Value: "", + }, + { + Name: rcscheme.CustomNetworking, + Value: "update", + }, + }, + testFeatureLength: 2, + sgp: true, + customNet: true, + msg: "test updating existing feature to CNINode", + }, + } + + for _, tt := range tests { + t.Run(tt.msg, func(t *testing.T) { + mockContext := &IPAMContext{ + awsClient: m.awsutils, + rawK8SClient: m.rawK8SClient, + cachedK8SClient: m.cachedK8SClient, + } + + nodeName := "fake-node-name" + mockContext.myNodeName = nodeName + fakeCNINode := &rcscheme.CNINode{ + ObjectMeta: metav1.ObjectMeta{Name: nodeName, Namespace: ""}, + } + // don't check error and let it fail open since we need to create CNINode in test Runner + mockContext.rawK8SClient.Create(ctx, fakeCNINode) + + var sgpValue, cnValue string + var err error + for _, feature := range tt.testFeatures { + err = mockContext.AddFeatureToCNINode(ctx, feature.Name, feature.Value) + assert.NoError(t, err) + if feature.Name == rcscheme.SecurityGroupsForPods { + sgpValue = feature.Value + } else if feature.Name == rcscheme.CustomNetworking { + cnValue = feature.Value + } + } + var wantedCNINode rcscheme.CNINode + err = mockContext.rawK8SClient.Get(ctx, key, &wantedCNINode) + assert.NoError(t, err) + assert.True(t, len(wantedCNINode.Spec.Features) == tt.testFeatureLength) + containedSGP := lo.ContainsBy(wantedCNINode.Spec.Features, func(addedFeature rcscheme.Feature) bool { + return rcscheme.SecurityGroupsForPods == addedFeature.Name && addedFeature.Value == sgpValue + }) + containedCN := lo.ContainsBy(wantedCNINode.Spec.Features, func(addedFeature rcscheme.Feature) bool { + return rcscheme.CustomNetworking == addedFeature.Name && addedFeature.Value == cnValue + }) + assert.True(t, containedSGP == tt.sgp) + assert.True(t, containedCN == tt.customNet) + }) + } +} diff --git a/pkg/k8sapi/k8sutils.go b/pkg/k8sapi/k8sutils.go index 35b6d2092bf..141d8d13823 100644 --- a/pkg/k8sapi/k8sutils.go +++ b/pkg/k8sapi/k8sutils.go @@ -14,6 +14,7 @@ import ( eniconfigscheme "github.com/aws/amazon-vpc-cni-k8s/pkg/apis/crd/v1alpha1" "github.com/aws/amazon-vpc-cni-k8s/pkg/utils/logger" + rcscheme "github.com/aws/amazon-vpc-resource-controller-k8s/apis/vpcresources/v1alpha1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" @@ -47,6 +48,7 @@ func CreateKubeClient(mapper meta.RESTMapper) (client.Client, error) { vpcCniScheme := runtime.NewScheme() clientgoscheme.AddToScheme(vpcCniScheme) eniconfigscheme.AddToScheme(vpcCniScheme) + rcscheme.AddToScheme(vpcCniScheme) rawK8SClient, err := client.New(restCfg, client.Options{Scheme: vpcCniScheme, Mapper: mapper}) @@ -70,6 +72,7 @@ func CreateCachedKubeClient(rawK8SClient client.Client, mapper meta.RESTMapper) eniconfigscheme.AddToScheme(vpcCniScheme) stopChan := ctrl.SetupSignalHandler() + cache, err := cache.New(restCfg, cache.Options{Scheme: vpcCniScheme, Mapper: mapper}) if err != nil { return nil, err diff --git a/pkg/sgpp/utils.go b/pkg/sgpp/utils.go index b76310a4188..d03361e9df3 100644 --- a/pkg/sgpp/utils.go +++ b/pkg/sgpp/utils.go @@ -1,6 +1,8 @@ package sgpp -import "os" +import ( + "os" +) const vlanInterfacePrefix = "vlan" diff --git a/pkg/sgpp/utils_test.go b/pkg/sgpp/utils_test.go index 9ce01bb839e..8b150a7687b 100644 --- a/pkg/sgpp/utils_test.go +++ b/pkg/sgpp/utils_test.go @@ -4,9 +4,36 @@ import ( "os" "testing" + "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + eniconfigscheme "github.com/aws/amazon-vpc-cni-k8s/pkg/apis/crd/v1alpha1" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + testclient "sigs.k8s.io/controller-runtime/pkg/client/fake" + + rcscheme "github.com/aws/amazon-vpc-resource-controller-k8s/apis/vpcresources/v1alpha1" ) +type testMocks struct { + ctrl *gomock.Controller + cachedK8SClient client.Client +} + +func setup(t *testing.T) *testMocks { + ctrl := gomock.NewController(t) + k8sSchema := runtime.NewScheme() + clientgoscheme.AddToScheme(k8sSchema) + eniconfigscheme.AddToScheme(k8sSchema) + rcscheme.AddToScheme(k8sSchema) + + return &testMocks{ + ctrl: ctrl, + cachedK8SClient: testclient.NewClientBuilder().WithScheme(k8sSchema).Build(), + } +} + func TestBuildHostVethNamePrefix(t *testing.T) { type args struct { hostVethNamePrefix string