diff --git a/api/v1beta1/azurecluster_default.go b/api/v1beta1/azurecluster_default.go index bcbe32bbfd0..67fae98d523 100644 --- a/api/v1beta1/azurecluster_default.go +++ b/api/v1beta1/azurecluster_default.go @@ -20,6 +20,8 @@ import ( "fmt" "k8s.io/utils/ptr" + + "sigs.k8s.io/cluster-api-provider-azure/feature" ) const ( @@ -245,6 +247,29 @@ func (c *AzureCluster) setAPIServerLBDefaults() { }, } } + // If the API Server ILB feature is enabled, create a default internal LB IP or use the specified one + if feature.Gates.Enabled(feature.APIServerILB) { + privateIPFound := false + for i := range lb.FrontendIPs { + if lb.FrontendIPs[i].FrontendIPClass.PrivateIPAddress != "" { + if lb.FrontendIPs[i].Name == "" { + lb.FrontendIPs[i].Name = generatePrivateIPConfigName(lb.Name) + } + privateIPFound = true + break + } + } + // if no private IP is found, we should create a default internal LB IP + if !privateIPFound { + privateIP := FrontendIP{ + Name: generatePrivateIPConfigName(lb.Name), + FrontendIPClass: FrontendIPClass{ + PrivateIPAddress: DefaultInternalLBIPAddress, + }, + } + lb.FrontendIPs = append(lb.FrontendIPs, privateIP) + } + } } else if lb.Type == Internal { if lb.Name == "" { lb.Name = generateInternalLBName(c.ObjectMeta.Name) @@ -530,6 +555,11 @@ func generateFrontendIPConfigName(lbName string) string { return fmt.Sprintf("%s-%s", lbName, "frontEnd") } +// generateFrontendIPConfigName generates a load balancer frontend IP config name. +func generatePrivateIPConfigName(lbName string) string { + return fmt.Sprintf("%s-%s", lbName, "frontEnd-internal-ip") +} + // generateNodeOutboundIPName generates a public IP name, based on the cluster name. func generateNodeOutboundIPName(clusterName string) string { return fmt.Sprintf("pip-%s-node-outbound", clusterName) diff --git a/api/v1beta1/azurecluster_default_test.go b/api/v1beta1/azurecluster_default_test.go index 507bb86f139..a9ec4e85dd7 100644 --- a/api/v1beta1/azurecluster_default_test.go +++ b/api/v1beta1/azurecluster_default_test.go @@ -23,7 +23,11 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/component-base/featuregate" + featuregatetesting "k8s.io/component-base/featuregate/testing" "k8s.io/utils/ptr" + + "sigs.k8s.io/cluster-api-provider-azure/feature" ) func TestResourceGroupDefault(t *testing.T) { @@ -1236,9 +1240,10 @@ func TestVnetPeeringDefaults(t *testing.T) { func TestAPIServerLBDefaults(t *testing.T) { cases := []struct { - name string - cluster *AzureCluster - output *AzureCluster + name string + featureGate featuregate.Feature + cluster *AzureCluster + output *AzureCluster }{ { name: "no lb", @@ -1282,6 +1287,55 @@ func TestAPIServerLBDefaults(t *testing.T) { }, }, }, + { + name: "no lb with APIServerILB feature gate enabled", + featureGate: feature.APIServerILB, + cluster: &AzureCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster-test", + }, + Spec: AzureClusterSpec{ + ControlPlaneEnabled: true, + NetworkSpec: NetworkSpec{}, + }, + }, + output: &AzureCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster-test", + }, + Spec: AzureClusterSpec{ + ControlPlaneEnabled: true, + NetworkSpec: NetworkSpec{ + APIServerLB: &LoadBalancerSpec{ + Name: "cluster-test-public-lb", + FrontendIPs: []FrontendIP{ + { + Name: "cluster-test-public-lb-frontEnd", + PublicIP: &PublicIPSpec{ + Name: "pip-cluster-test-apiserver", + DNSName: "", + }, + }, + { + Name: "cluster-test-public-lb-frontEnd-internal-ip", + FrontendIPClass: FrontendIPClass{ + PrivateIPAddress: DefaultInternalLBIPAddress, + }, + }, + }, + BackendPool: BackendPool{ + Name: "cluster-test-public-lb-backendPool", + }, + LoadBalancerClassSpec: LoadBalancerClassSpec{ + SKU: SKUStandard, + Type: Public, + IdleTimeoutInMinutes: ptr.To[int32](DefaultOutboundRuleIdleTimeoutInMinutes), + }, + }, + }, + }, + }, + }, { name: "internal lb", cluster: &AzureCluster{ @@ -1327,6 +1381,52 @@ func TestAPIServerLBDefaults(t *testing.T) { }, }, }, + { + name: "internal lb with feature gate API Server ILB enabled", + featureGate: feature.APIServerILB, + cluster: &AzureCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster-test", + }, + Spec: AzureClusterSpec{ + NetworkSpec: NetworkSpec{ + APIServerLB: &LoadBalancerSpec{ + LoadBalancerClassSpec: LoadBalancerClassSpec{ + Type: Internal, + }, + }, + }, + }, + }, + output: &AzureCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster-test", + }, + Spec: AzureClusterSpec{ + NetworkSpec: NetworkSpec{ + APIServerLB: &LoadBalancerSpec{ + FrontendIPs: []FrontendIP{ + { + Name: "cluster-test-internal-lb-frontEnd", + FrontendIPClass: FrontendIPClass{ + PrivateIPAddress: DefaultInternalLBIPAddress, + }, + }, + }, + BackendPool: BackendPool{ + Name: "cluster-test-internal-lb-backendPool", + }, + LoadBalancerClassSpec: LoadBalancerClassSpec{ + SKU: SKUStandard, + Type: Internal, + IdleTimeoutInMinutes: ptr.To[int32](DefaultOutboundRuleIdleTimeoutInMinutes), + }, + Name: "cluster-test-internal-lb", + }, + }, + }, + }, + }, { name: "with custom backend pool name", cluster: &AzureCluster{ @@ -1375,12 +1475,135 @@ func TestAPIServerLBDefaults(t *testing.T) { }, }, }, + { + name: "with custom backend pool name with feature gate API Server ILB enabled", + featureGate: feature.APIServerILB, + cluster: &AzureCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster-test", + }, + Spec: AzureClusterSpec{ + NetworkSpec: NetworkSpec{ + APIServerLB: &LoadBalancerSpec{ + LoadBalancerClassSpec: LoadBalancerClassSpec{ + Type: Internal, + }, + BackendPool: BackendPool{ + Name: "custom-backend-pool", + }, + }, + }, + }, + }, + output: &AzureCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster-test", + }, + Spec: AzureClusterSpec{ + NetworkSpec: NetworkSpec{ + APIServerLB: &LoadBalancerSpec{ + FrontendIPs: []FrontendIP{ + { + Name: "cluster-test-internal-lb-frontEnd", + FrontendIPClass: FrontendIPClass{ + PrivateIPAddress: DefaultInternalLBIPAddress, + }, + }, + }, + BackendPool: BackendPool{ + Name: "custom-backend-pool", + }, + LoadBalancerClassSpec: LoadBalancerClassSpec{ + SKU: SKUStandard, + Type: Internal, + IdleTimeoutInMinutes: ptr.To[int32](DefaultOutboundRuleIdleTimeoutInMinutes), + }, + Name: "cluster-test-internal-lb", + }, + }, + }, + }, + }, + { + name: "public lb with APIServerILB feature gate enabled and custom private IP belonging to default control plane CIDR", + featureGate: feature.APIServerILB, + cluster: &AzureCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster-test", + }, + Spec: AzureClusterSpec{ + ControlPlaneEnabled: true, + NetworkSpec: NetworkSpec{ + APIServerLB: &LoadBalancerSpec{ + Name: "cluster-test-public-lb", + FrontendIPs: []FrontendIP{ + { + Name: "cluster-test-public-lb-frontEnd", + PublicIP: &PublicIPSpec{ + Name: "pip-cluster-test-apiserver", + DNSName: "", + }, + }, + { + Name: "my-internal-ip", + FrontendIPClass: FrontendIPClass{ + PrivateIPAddress: "10.0.0.111", + }, + }, + }, + LoadBalancerClassSpec: LoadBalancerClassSpec{ + Type: Public, + SKU: SKUStandard, + }, + }, + }, + }, + }, + output: &AzureCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster-test", + }, + Spec: AzureClusterSpec{ + ControlPlaneEnabled: true, + NetworkSpec: NetworkSpec{ + APIServerLB: &LoadBalancerSpec{ + Name: "cluster-test-public-lb", + FrontendIPs: []FrontendIP{ + { + Name: "cluster-test-public-lb-frontEnd", + PublicIP: &PublicIPSpec{ + Name: "pip-cluster-test-apiserver", + DNSName: "", + }, + }, + { + Name: "my-internal-ip", + FrontendIPClass: FrontendIPClass{ + PrivateIPAddress: "10.0.0.111", + }, + }, + }, + BackendPool: BackendPool{ + Name: "cluster-test-public-lb-backendPool", + }, + LoadBalancerClassSpec: LoadBalancerClassSpec{ + SKU: SKUStandard, + Type: Public, + IdleTimeoutInMinutes: ptr.To[int32](DefaultOutboundRuleIdleTimeoutInMinutes), + }, + }, + }, + }, + }, + }, } for _, c := range cases { tc := c t.Run(tc.name, func(t *testing.T) { - t.Parallel() + if tc.featureGate != "" { + defer featuregatetesting.SetFeatureGateDuringTest(t, feature.Gates, tc.featureGate, true)() + } tc.cluster.setAPIServerLBDefaults() if !reflect.DeepEqual(tc.cluster, tc.output) { expected, _ := json.MarshalIndent(tc.output, "", "\t") diff --git a/api/v1beta1/azurecluster_validation.go b/api/v1beta1/azurecluster_validation.go index 667065c3a34..cab56b710c2 100644 --- a/api/v1beta1/azurecluster_validation.go +++ b/api/v1beta1/azurecluster_validation.go @@ -412,37 +412,57 @@ func validateAPIServerLB(lb *LoadBalancerSpec, old *LoadBalancerSpec, cidrs []st allErrs = append(allErrs, field.Forbidden(fldPath.Child("name"), "API Server load balancer name should not be modified after AzureCluster creation.")) } - // There should only be one IP config. - if len(lb.FrontendIPs) != 1 || ptr.Deref[int32](lb.FrontendIPsCount, 1) != 1 { - allErrs = append(allErrs, field.Invalid(fldPath.Child("frontendIPConfigs"), lb.FrontendIPs, - "API Server Load balancer should have 1 Frontend IP")) - } else { - // if Internal, IP config should not have a public IP. - if lb.Type == Internal { - if lb.FrontendIPs[0].PublicIP != nil { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("frontendIPConfigs").Index(0).Child("publicIP"), - "Internal Load Balancers cannot have a Public IP")) - } - if lb.FrontendIPs[0].PrivateIPAddress != "" { - if err := validateInternalLBIPAddress(lb.FrontendIPs[0].PrivateIPAddress, cidrs, - fldPath.Child("frontendIPConfigs").Index(0).Child("privateIP")); err != nil { - allErrs = append(allErrs, err) - } - if len(old.FrontendIPs) != 0 && old.FrontendIPs[0].PrivateIPAddress != lb.FrontendIPs[0].PrivateIPAddress { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("name"), "API Server load balancer private IP should not be modified after AzureCluster creation.")) - } + publicIPCount, privateIPCount := 0, 0 + privateIP := "" + for i := range lb.FrontendIPs { + if lb.FrontendIPs[i].PublicIP != nil { + publicIPCount++ + } + if lb.FrontendIPs[i].PrivateIPAddress != "" { + privateIPCount++ + privateIP = lb.FrontendIPs[i].PrivateIPAddress + } + } + if lb.Type == Public { + // there should be one public IP for public LB. + if publicIPCount != 1 || ptr.Deref[int32](lb.FrontendIPsCount, 1) != 1 { + // Note: FrontendIPsCount creates public IPs when set. Therefore, we check for both publicIPCount and FrontendIPsCount to be 1. + allErrs = append(allErrs, field.Invalid(fldPath.Child("frontendIPConfigs"), lb.FrontendIPs, + "API Server Load balancer should have 1 Frontend IP")) + } + if feature.Gates.Enabled(feature.APIServerILB) { + if err := validateInternalLBIPAddress(privateIP, cidrs, fldPath.Child("frontendIPConfigs").Index(0).Child("privateIP")); err != nil { + allErrs = append(allErrs, err) } - } - - // if Public, IP config should not have a private IP. - if lb.Type == Public { - if lb.FrontendIPs[0].PrivateIPAddress != "" { + } else { + // API Server LB should not have a Private IP if APIServerILB feature is disabled. + if privateIPCount > 0 { allErrs = append(allErrs, field.Forbidden(fldPath.Child("frontendIPConfigs").Index(0).Child("privateIP"), "Public Load Balancers cannot have a Private IP")) } } } + // internal LB should not have a public IP. + if lb.Type == Internal { + if publicIPCount != 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("frontendIPConfigs").Index(0).Child("publicIP"), + "Internal Load Balancers cannot have a Public IP")) + } + if privateIPCount != 1 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("frontendIPConfigs"), lb.FrontendIPs, + "API Server Load balancer of type private should have 1 frontend private IP")) + } else { + if err := validateInternalLBIPAddress(lb.FrontendIPs[0].PrivateIPAddress, cidrs, + fldPath.Child("frontendIPConfigs").Index(0).Child("privateIP")); err != nil { + allErrs = append(allErrs, err) + } + + if len(old.FrontendIPs) != 0 && old.FrontendIPs[0].PrivateIPAddress != lb.FrontendIPs[0].PrivateIPAddress { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("name"), "API Server load balancer private IP should not be modified after AzureCluster creation.")) + } + } + } return allErrs } diff --git a/api/v1beta1/azurecluster_validation_test.go b/api/v1beta1/azurecluster_validation_test.go index f9a169e83e0..2a83c976774 100644 --- a/api/v1beta1/azurecluster_validation_test.go +++ b/api/v1beta1/azurecluster_validation_test.go @@ -23,7 +23,11 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/component-base/featuregate" + featuregatetesting "k8s.io/component-base/featuregate/testing" "k8s.io/utils/ptr" + + "sigs.k8s.io/cluster-api-provider-azure/feature" ) func TestClusterNameValidation(t *testing.T) { @@ -888,6 +892,7 @@ func TestValidateSecurityRule(t *testing.T) { func TestValidateAPIServerLB(t *testing.T) { testcases := []struct { name string + featureGate featuregate.Feature lb LoadBalancerSpec old LoadBalancerSpec cpCIDRS []string @@ -947,6 +952,40 @@ func TestValidateAPIServerLB(t *testing.T) { { name: "too many IP configs", lb: LoadBalancerSpec{ + LoadBalancerClassSpec: LoadBalancerClassSpec{ + Type: Public, + }, + FrontendIPs: []FrontendIP{ + { + Name: "ip-1", + }, + { + Name: "ip-2", + }, + }, + }, + wantErr: true, + expectedErr: field.Error{ + Type: "FieldValueInvalid", + Field: "apiServerLB.frontendIPConfigs", + BadValue: []FrontendIP{ + { + Name: "ip-1", + }, + { + Name: "ip-2", + }, + }, + Detail: "API Server Load balancer should have 1 Frontend IP", + }, + }, + { + name: "too many IP configs with feature flag APIServerILB enabled", + featureGate: feature.APIServerILB, + lb: LoadBalancerSpec{ + LoadBalancerClassSpec: LoadBalancerClassSpec{ + Type: Public, + }, FrontendIPs: []FrontendIP{ { Name: "ip-1", @@ -993,6 +1032,34 @@ func TestValidateAPIServerLB(t *testing.T) { Detail: "Public Load Balancers cannot have a Private IP", }, }, + { + name: "public LB with private IP with feature flag APIServerILB enabled", + featureGate: feature.APIServerILB, + lb: LoadBalancerSpec{ + Name: "my-awesome-lb", + FrontendIPs: []FrontendIP{ + { + Name: "ip-1", + PublicIP: &PublicIPSpec{ + Name: "my-valid-frontend-ip", + DNSName: "my-valid-frontend-ip", + }, + }, + { + Name: "ip-2", + FrontendIPClass: FrontendIPClass{ + PrivateIPAddress: "10.0.0.111", + }, + }, + }, + LoadBalancerClassSpec: LoadBalancerClassSpec{ + Type: Public, + SKU: SKUStandard, + }, + }, + cpCIDRS: []string{"10.0.0.0/24"}, + wantErr: false, + }, { name: "internal LB with public IP", lb: LoadBalancerSpec{ @@ -1015,6 +1082,29 @@ func TestValidateAPIServerLB(t *testing.T) { Detail: "Internal Load Balancers cannot have a Public IP", }, }, + { + name: "internal LB with public IP with feature flag APIServerILB enabled", + featureGate: feature.APIServerILB, + lb: LoadBalancerSpec{ + FrontendIPs: []FrontendIP{ + { + Name: "ip-1", + PublicIP: &PublicIPSpec{ + Name: "my-invalid-ip", + }, + }, + }, + LoadBalancerClassSpec: LoadBalancerClassSpec{ + Type: Internal, + }, + }, + wantErr: true, + expectedErr: field.Error{ + Type: "FieldValueForbidden", + Field: "apiServerLB.frontendIPConfigs[0].publicIP", + Detail: "Internal Load Balancers cannot have a Public IP", + }, + }, { name: "internal LB with invalid private IP", lb: LoadBalancerSpec{ @@ -1038,6 +1128,30 @@ func TestValidateAPIServerLB(t *testing.T) { Detail: "Internal LB IP address isn't a valid IPv4 or IPv6 address", }, }, + { + name: "internal LB with invalid private IP with feature flag APIServerILB enabled", + featureGate: feature.APIServerILB, + lb: LoadBalancerSpec{ + FrontendIPs: []FrontendIP{ + { + Name: "ip-1", + FrontendIPClass: FrontendIPClass{ + PrivateIPAddress: "NAIP", + }, + }, + }, + LoadBalancerClassSpec: LoadBalancerClassSpec{ + Type: Internal, + }, + }, + wantErr: true, + expectedErr: field.Error{ + Type: "FieldValueInvalid", + Field: "apiServerLB.frontendIPConfigs[0].privateIP", + BadValue: "NAIP", + Detail: "Internal LB IP address isn't a valid IPv4 or IPv6 address", + }, + }, { name: "internal LB with out of range private IP", lb: LoadBalancerSpec{ @@ -1062,6 +1176,31 @@ func TestValidateAPIServerLB(t *testing.T) { Detail: "Internal LB IP address needs to be in control plane subnet range ([10.0.0.0/24 10.1.0.0/24])", }, }, + { + name: "internal LB with out of range private IP with feature flag APIServerILB enabled", + featureGate: feature.APIServerILB, + lb: LoadBalancerSpec{ + FrontendIPs: []FrontendIP{ + { + Name: "ip-1", + FrontendIPClass: FrontendIPClass{ + PrivateIPAddress: "20.1.2.3", + }, + }, + }, + LoadBalancerClassSpec: LoadBalancerClassSpec{ + Type: Internal, + }, + }, + cpCIDRS: []string{"10.0.0.0/24", "10.1.0.0/24"}, + wantErr: true, + expectedErr: field.Error{ + Type: "FieldValueInvalid", + Field: "apiServerLB.frontendIPConfigs[0].privateIP", + BadValue: "20.1.2.3", + Detail: "Internal LB IP address needs to be in control plane subnet range ([10.0.0.0/24 10.1.0.0/24])", + }, + }, { name: "internal LB with in range private IP", lb: LoadBalancerSpec{ @@ -1082,12 +1221,67 @@ func TestValidateAPIServerLB(t *testing.T) { cpCIDRS: []string{"10.0.0.0/24", "10.1.0.0/24"}, wantErr: false, }, + { + name: "public LB with in-range private IP with feature flag APIServerILB enabled", + featureGate: feature.APIServerILB, + lb: LoadBalancerSpec{ + FrontendIPs: []FrontendIP{ + { + Name: "ip-1", + FrontendIPClass: FrontendIPClass{ + PrivateIPAddress: "10.0.0.123", + }, + }, + { + Name: "ip-2", + PublicIP: &PublicIPSpec{ + Name: "my-valid-ip", + DNSName: "my-valid-ip", + }, + }, + }, + LoadBalancerClassSpec: LoadBalancerClassSpec{ + Type: Public, + SKU: SKUStandard, + }, + Name: "my-private-lb", + }, + cpCIDRS: []string{"10.0.0.0/24"}, + wantErr: false, + }, + { + name: "public LB with out of range private IP with feature flag APIServerILB enabled", + featureGate: feature.APIServerILB, + lb: LoadBalancerSpec{ + FrontendIPs: []FrontendIP{ + { + Name: "ip-1", + FrontendIPClass: FrontendIPClass{ + PrivateIPAddress: "20.1.2.3", + }, + }, + }, + LoadBalancerClassSpec: LoadBalancerClassSpec{ + Type: Public, + }, + }, + cpCIDRS: []string{"10.0.0.0/24", "10.1.0.0/24"}, + wantErr: true, + expectedErr: field.Error{ + Type: "FieldValueInvalid", + Field: "apiServerLB.frontendIPConfigs[0].privateIP", + BadValue: "20.1.2.3", + Detail: "Internal LB IP address needs to be in control plane subnet range ([10.0.0.0/24 10.1.0.0/24])", + }, + }, } for _, test := range testcases { t.Run(test.name, func(t *testing.T) { - t.Parallel() g := NewWithT(t) + if test.featureGate == feature.APIServerILB { + defer featuregatetesting.SetFeatureGateDuringTest(t, feature.Gates, test.featureGate, true)() + } err := validateAPIServerLB(&test.lb, &test.old, test.cpCIDRS, field.NewPath("apiServerLB")) if test.wantErr { g.Expect(err).To(ContainElement(MatchError(test.expectedErr.Error()))) diff --git a/azure/scope/cluster.go b/azure/scope/cluster.go index 8f197f0dc1f..8148fc0469a 100644 --- a/azure/scope/cluster.go +++ b/azure/scope/cluster.go @@ -248,48 +248,50 @@ func (s *ClusterScope) PublicIPSpecs() []azure.ResourceSpecGetter { func (s *ClusterScope) LBSpecs() []azure.ResourceSpecGetter { var specs []azure.ResourceSpecGetter if s.ControlPlaneEnabled() { - specs = []azure.ResourceSpecGetter{ - &loadbalancers.LBSpec{ - // API Server LB - Name: s.APIServerLB().Name, - ResourceGroup: s.ResourceGroup(), - SubscriptionID: s.SubscriptionID(), - ClusterName: s.ClusterName(), - Location: s.Location(), - ExtendedLocation: s.ExtendedLocation(), - VNetName: s.Vnet().Name, - VNetResourceGroup: s.Vnet().ResourceGroup, - SubnetName: s.ControlPlaneSubnet().Name, - FrontendIPConfigs: s.APIServerLB().FrontendIPs, - APIServerPort: s.APIServerPort(), - Type: s.APIServerLB().Type, - SKU: s.APIServerLB().SKU, - Role: infrav1.APIServerRole, - BackendPoolName: s.APIServerLB().BackendPool.Name, - IdleTimeoutInMinutes: s.APIServerLB().IdleTimeoutInMinutes, - AdditionalTags: s.AdditionalTags(), - }, + frontendLB := &loadbalancers.LBSpec{ + // API Server LB + Name: s.APIServerLB().Name, + ResourceGroup: s.ResourceGroup(), + SubscriptionID: s.SubscriptionID(), + ClusterName: s.ClusterName(), + Location: s.Location(), + ExtendedLocation: s.ExtendedLocation(), + VNetName: s.Vnet().Name, + VNetResourceGroup: s.Vnet().ResourceGroup, + SubnetName: s.ControlPlaneSubnet().Name, + APIServerPort: s.APIServerPort(), + Type: s.APIServerLB().Type, + SKU: s.APIServerLB().SKU, + Role: infrav1.APIServerRole, + BackendPoolName: s.APIServerLB().BackendPool.Name, + IdleTimeoutInMinutes: s.APIServerLB().IdleTimeoutInMinutes, + AdditionalTags: s.AdditionalTags(), } + + if s.APIServerLB().FrontendIPs != nil { + for _, frontendIP := range s.APIServerLB().FrontendIPs { + // save the public IP for the frontend LB + // or if the LB is of the type internal, save the only IP allowed for the frontend LB + if frontendIP.PublicIP != nil || frontendLB.Type == infrav1.Internal { + frontendLB.FrontendIPConfigs = []infrav1.FrontendIP{frontendIP} + break + } + } + } + specs = append(specs, frontendLB) } + if s.APIServerLB().Type != infrav1.Internal && feature.Gates.Enabled(feature.APIServerILB) { - specs = append(specs, &loadbalancers.LBSpec{ - Name: s.APIServerLB().Name + "-internal", - ResourceGroup: s.ResourceGroup(), - SubscriptionID: s.SubscriptionID(), - ClusterName: s.ClusterName(), - Location: s.Location(), - ExtendedLocation: s.ExtendedLocation(), - VNetName: s.Vnet().Name, - VNetResourceGroup: s.Vnet().ResourceGroup, - SubnetName: s.ControlPlaneSubnet().Name, - FrontendIPConfigs: []infrav1.FrontendIP{ - { - Name: s.APIServerLB().Name + "-internal-frontEnd", // TODO: improve this name. - FrontendIPClass: infrav1.FrontendIPClass{ - PrivateIPAddress: infrav1.DefaultInternalLBIPAddress, - }, - }, - }, + internalLB := &loadbalancers.LBSpec{ + Name: s.APIServerLB().Name + "-internal", + ResourceGroup: s.ResourceGroup(), + SubscriptionID: s.SubscriptionID(), + ClusterName: s.ClusterName(), + Location: s.Location(), + ExtendedLocation: s.ExtendedLocation(), + VNetName: s.Vnet().Name, + VNetResourceGroup: s.Vnet().ResourceGroup, + SubnetName: s.ControlPlaneSubnet().Name, APIServerPort: s.APIServerPort(), Type: infrav1.Internal, SKU: s.APIServerLB().SKU, @@ -297,7 +299,33 @@ func (s *ClusterScope) LBSpecs() []azure.ResourceSpecGetter { BackendPoolName: s.APIServerLB().BackendPool.Name + "-internal", IdleTimeoutInMinutes: s.APIServerLB().IdleTimeoutInMinutes, AdditionalTags: s.AdditionalTags(), - }) + } + + privateIPFound := false + if s.APIServerLB().FrontendIPs != nil { + for _, frontendIP := range s.APIServerLB().FrontendIPs { + if frontendIP.PrivateIPAddress != "" { + internalLB.FrontendIPConfigs = []infrav1.FrontendIP{frontendIP} + privateIPFound = true + break + } + } + } + + if !privateIPFound { + // If no private IP is found, use the default internal LB IP + // useful for scenarios where the user has not specified a private IP and is upgrading from a version that did not support it + // TODO: Update the underlying infra prekubeadm command with the new internal IP and trigger a reconcile. https://github.com/kubernetes-sigs/cluster-api-provider-azure/issues/5334 + internalLB.FrontendIPConfigs = []infrav1.FrontendIP{ + { + Name: s.APIServerLB().Name + "-internal-ip", + FrontendIPClass: infrav1.FrontendIPClass{ + PrivateIPAddress: infrav1.DefaultInternalLBIPAddress, + }, + }, + } + } + specs = append(specs, internalLB) } // Node outbound LB diff --git a/azure/scope/cluster_test.go b/azure/scope/cluster_test.go index c2636b654e3..b1dac336a16 100644 --- a/azure/scope/cluster_test.go +++ b/azure/scope/cluster_test.go @@ -2956,7 +2956,7 @@ func TestClusterScope_LBSpecs(t *testing.T) { SubnetName: "cp-subnet", FrontendIPConfigs: []infrav1.FrontendIP{ { - Name: "api-server-lb-internal-frontEnd", + Name: "api-server-lb-internal-ip", FrontendIPClass: infrav1.FrontendIPClass{ PrivateIPAddress: infrav1.DefaultInternalLBIPAddress, }, diff --git a/templates/cluster-template-apiserver-ilb.yaml b/templates/cluster-template-apiserver-ilb.yaml index a18b181f968..b0a5519f307 100644 --- a/templates/cluster-template-apiserver-ilb.yaml +++ b/templates/cluster-template-apiserver-ilb.yaml @@ -35,12 +35,20 @@ spec: publicIP: dnsName: ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com name: ${CLUSTER_NAME}-api-lb + - name: ${CLUSTER_NAME}-internal-lb-private-ip + privateIP: 30.0.0.100 subnets: - - name: control-plane-subnet + - cidrBlocks: + - 30.0.0.0/16 + name: control-plane-subnet role: control-plane - - name: node-subnet + - cidrBlocks: + - 30.1.0.0/16 + name: node-subnet role: node vnet: + cidrBlocks: + - 30.0.0.0/8 name: ${AZURE_VNET_NAME:=${CLUSTER_NAME}-vnet} resourceGroup: ${AZURE_RESOURCE_GROUP:=${CLUSTER_NAME}} subscriptionID: ${AZURE_SUBSCRIPTION_ID} @@ -196,7 +204,7 @@ spec: cloud-provider: external name: '{{ ds.meta_data["local_hostname"] }}' preKubeadmCommands: - - echo '10.0.0.100 ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com' + - echo '30.0.0.100 ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com' >> /etc/hosts --- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 diff --git a/templates/cluster-template-windows-apiserver-ilb.yaml b/templates/cluster-template-windows-apiserver-ilb.yaml index ecee4ae0eaf..fff149a4e53 100644 --- a/templates/cluster-template-windows-apiserver-ilb.yaml +++ b/templates/cluster-template-windows-apiserver-ilb.yaml @@ -39,12 +39,20 @@ spec: publicIP: dnsName: ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com name: ${CLUSTER_NAME}-api-lb + - name: ${CLUSTER_NAME}-internal-lb-private-ip + privateIP: 40.0.11.100 subnets: - - name: control-plane-subnet + - cidrBlocks: + - 40.0.0.0/16 + name: control-plane-subnet role: control-plane - - name: node-subnet + - cidrBlocks: + - 40.1.0.0/16 + name: node-subnet role: node vnet: + cidrBlocks: + - 40.0.0.0/8 name: ${AZURE_VNET_NAME:=${CLUSTER_NAME}-vnet} resourceGroup: ${AZURE_RESOURCE_GROUP:=${CLUSTER_NAME}} subscriptionID: ${AZURE_SUBSCRIPTION_ID} @@ -200,7 +208,7 @@ spec: cloud-provider: external name: '{{ ds.meta_data["local_hostname"] }}' preKubeadmCommands: - - echo '10.0.0.100 ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com' + - echo '40.0.0.100 ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com' >> /etc/hosts --- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 @@ -298,7 +306,7 @@ spec: - powershell C:/defender-exclude-calico.ps1 preKubeadmCommands: - powershell -Command "Add-Content -Path 'C:\\Windows\\System32\\drivers\\etc\\hosts' - -Value '10.0.0.100 ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com'" + -Value '40.0.0.100 ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com'" users: - groups: Administrators name: capi diff --git a/templates/flavors/apiserver-ilb/patches/control-plane.yaml b/templates/flavors/apiserver-ilb/patches/control-plane.yaml index 45a584e4a02..3d954fc8584 100644 --- a/templates/flavors/apiserver-ilb/patches/control-plane.yaml +++ b/templates/flavors/apiserver-ilb/patches/control-plane.yaml @@ -12,3 +12,17 @@ spec: publicIP: name: ${CLUSTER_NAME}-api-lb dnsName: ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com + - name: ${CLUSTER_NAME}-internal-lb-private-ip + privateIP: 30.0.0.100 + vnet: + cidrBlocks: + - 30.0.0.0/8 + subnets: + - name: control-plane-subnet + role: control-plane + cidrBlocks: + - 30.0.0.0/16 + - name: node-subnet + role: node + cidrBlocks: + - 30.1.0.0/16 diff --git a/templates/flavors/apiserver-ilb/patches/kubeadm-config-template.yaml b/templates/flavors/apiserver-ilb/patches/kubeadm-config-template.yaml index 6f5dc9a772c..84c9cd4d07f 100644 --- a/templates/flavors/apiserver-ilb/patches/kubeadm-config-template.yaml +++ b/templates/flavors/apiserver-ilb/patches/kubeadm-config-template.yaml @@ -8,6 +8,5 @@ spec: # /etc/hosts file is updated with a pre-created DNS name of the API server and internal load-balancer's IP. # This custom DNS Resolution of the API server ensures that the worker nodes can reach the API server when # the public IP of the API server is not accessible. - # 10.0.0.100 is the default IP that gets assigned to an internal load balancer. preKubeadmCommands: - - echo '10.0.0.100 ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com' >> /etc/hosts + - echo '30.0.0.100 ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com' >> /etc/hosts diff --git a/templates/flavors/windows-apiserver-ilb/kustomization.yaml b/templates/flavors/windows-apiserver-ilb/kustomization.yaml index f75b802a39c..83f7ba8fe03 100644 --- a/templates/flavors/windows-apiserver-ilb/kustomization.yaml +++ b/templates/flavors/windows-apiserver-ilb/kustomization.yaml @@ -2,12 +2,51 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization namespace: default resources: -- ../apiserver-ilb -- machine-deployment-windows.yaml + - ../apiserver-ilb + - machine-deployment-windows.yaml patches: -- path: ../base-windows-containerd/cluster.yaml -- path: patches/kubeadm-config-template.yaml + - path: ../base-windows-containerd/cluster.yaml + - path: patches/kubeadm-config-template.yaml + - target: + kind: KubeadmConfigTemplate + name: .*-md-0 + patch: |- + - op: replace + path: /spec/template/spec/preKubeadmCommands/0 + value: echo '40.0.0.100 ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com' >> /etc/hosts + - target: + kind: AzureCluster + patch: |- + - op: replace + path: /spec/networkSpec/apiServerLB/frontendIPs/1/privateIP + value: 40.0.11.100 + - target: + kind: AzureCluster + patch: |- + - op: replace + path: /spec/networkSpec/vnet/cidrBlocks/0 + value: 40.0.0.0/8 + - target: + kind: AzureCluster + patch: |- + - op: replace + path: /spec/networkSpec/subnets/0/cidrBlocks/0 + value: 40.0.0.0/16 + - target: + kind: AzureCluster + patch: |- + - op: replace + path: /spec/networkSpec/subnets/1/cidrBlocks/0 + value: 40.1.0.0/16 + - target: + kind: KubeadmConfigTemplate + name: .*-md-win + patch: |- + - op: replace + path: /spec/template/spec/preKubeadmCommands/0 + value: + powershell -Command "Add-Content -Path 'C:\\Windows\\System32\\drivers\\etc\\hosts' -Value '40.0.0.100 ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com'" sortOptions: order: fifo diff --git a/templates/test/ci/cluster-template-prow-apiserver-ilb.yaml b/templates/test/ci/cluster-template-prow-apiserver-ilb.yaml new file mode 100644 index 00000000000..410c2fb96d9 --- /dev/null +++ b/templates/test/ci/cluster-template-prow-apiserver-ilb.yaml @@ -0,0 +1,334 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + labels: + cloud-provider: ${CLOUD_PROVIDER_AZURE_LABEL:=azure} + cni: calico + name: ${CLUSTER_NAME} + namespace: default +spec: + clusterNetwork: + pods: + cidrBlocks: + - 192.168.0.0/16 + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlane + name: ${CLUSTER_NAME}-control-plane + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureCluster + name: ${CLUSTER_NAME} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureCluster +metadata: + name: ${CLUSTER_NAME} + namespace: default +spec: + additionalTags: + buildProvenance: ${BUILD_PROVENANCE} + creationTimestamp: ${TIMESTAMP} + jobName: ${JOB_NAME} + identityRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureClusterIdentity + name: ${CLUSTER_IDENTITY_NAME} + location: ${AZURE_LOCATION} + networkSpec: + apiServerLB: + frontendIPs: + - name: ${CLUSTER_NAME}-api-lb + publicIP: + dnsName: ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com + name: ${CLUSTER_NAME}-api-lb + - name: ${CLUSTER_NAME}-internal-lb-private-ip + privateIP: ${AZURE_INTERNAL_LB_PRIVATE_IP} + subnets: + - cidrBlocks: + - ${AZURE_CP_SUBNET_CIDR} + name: control-plane-subnet + role: control-plane + - cidrBlocks: + - ${AZURE_NODE_SUBNET_CIDR} + name: node-subnet + role: node + vnet: + cidrBlocks: + - ${AZURE_VNET_CIDR} + name: ${AZURE_VNET_NAME:=${CLUSTER_NAME}-vnet} + resourceGroup: ${AZURE_RESOURCE_GROUP:=${CLUSTER_NAME}} + subscriptionID: ${AZURE_SUBSCRIPTION_ID} +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlane +metadata: + name: ${CLUSTER_NAME}-control-plane + namespace: default +spec: + kubeadmConfigSpec: + clusterConfiguration: + apiServer: + extraArgs: + cloud-provider: external + timeoutForControlPlane: 20m + controllerManager: + extraArgs: + allocate-node-cidrs: "false" + cloud-provider: external + cluster-name: ${CLUSTER_NAME} + v: "4" + etcd: + local: + dataDir: /var/lib/etcddisk/etcd + extraArgs: + quota-backend-bytes: "8589934592" + diskSetup: + filesystems: + - device: /dev/disk/azure/scsi1/lun0 + extraOpts: + - -E + - lazy_itable_init=1,lazy_journal_init=1 + filesystem: ext4 + label: etcd_disk + - device: ephemeral0.1 + filesystem: ext4 + label: ephemeral0 + replaceFS: ntfs + partitions: + - device: /dev/disk/azure/scsi1/lun0 + layout: true + overwrite: false + tableType: gpt + files: + - contentFrom: + secret: + key: control-plane-azure.json + name: ${CLUSTER_NAME}-control-plane-azure-json + owner: root:root + path: /etc/kubernetes/azure.json + permissions: "0644" + initConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external + name: '{{ ds.meta_data["local_hostname"] }}' + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external + name: '{{ ds.meta_data["local_hostname"] }}' + mounts: + - - LABEL=etcd_disk + - /var/lib/etcddisk + postKubeadmCommands: [] + preKubeadmCommands: [] + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureMachineTemplate + name: ${CLUSTER_NAME}-control-plane + replicas: ${CONTROL_PLANE_MACHINE_COUNT:=1} + version: ${KUBERNETES_VERSION} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureMachineTemplate +metadata: + name: ${CLUSTER_NAME}-control-plane + namespace: default +spec: + template: + spec: + dataDisks: + - diskSizeGB: 256 + lun: 0 + nameSuffix: etcddisk + identity: UserAssigned + osDisk: + diskSizeGB: 128 + osType: Linux + sshPublicKey: ${AZURE_SSH_PUBLIC_KEY_B64:=""} + userAssignedIdentities: + - providerID: /subscriptions/${AZURE_SUBSCRIPTION_ID}/resourceGroups/${CI_RG:=capz-ci}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/${USER_IDENTITY:=cloud-provider-user-identity} + vmSize: ${AZURE_CONTROL_PLANE_MACHINE_TYPE} +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineDeployment +metadata: + name: ${CLUSTER_NAME}-md-0 + namespace: default +spec: + clusterName: ${CLUSTER_NAME} + replicas: ${WORKER_MACHINE_COUNT:=2} + selector: + matchLabels: null + template: + spec: + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + name: ${CLUSTER_NAME}-md-0 + clusterName: ${CLUSTER_NAME} + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureMachineTemplate + name: ${CLUSTER_NAME}-md-0 + version: ${KUBERNETES_VERSION} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureMachineTemplate +metadata: + name: ${CLUSTER_NAME}-md-0 + namespace: default +spec: + template: + spec: + identity: UserAssigned + osDisk: + diskSizeGB: 128 + osType: Linux + sshPublicKey: ${AZURE_SSH_PUBLIC_KEY_B64:=""} + userAssignedIdentities: + - providerID: /subscriptions/${AZURE_SUBSCRIPTION_ID}/resourceGroups/${CI_RG:=capz-ci}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/${USER_IDENTITY:=cloud-provider-user-identity} + vmSize: ${AZURE_NODE_MACHINE_TYPE} +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: ${CLUSTER_NAME}-md-0 + namespace: default +spec: + template: + spec: + files: + - contentFrom: + secret: + key: worker-node-azure.json + name: ${CLUSTER_NAME}-md-0-azure-json + owner: root:root + path: /etc/kubernetes/azure.json + permissions: "0644" + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external + name: '{{ ds.meta_data["local_hostname"] }}' + preKubeadmCommands: + - echo '${AZURE_INTERNAL_LB_PRIVATE_IP} ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com' + >> /etc/hosts +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureClusterIdentity +metadata: + labels: + clusterctl.cluster.x-k8s.io/move-hierarchy: "true" + name: ${CLUSTER_IDENTITY_NAME} + namespace: default +spec: + allowedNamespaces: {} + clientID: ${AZURE_CLIENT_ID_USER_ASSIGNED_IDENTITY} + tenantID: ${AZURE_TENANT_ID} + type: ${CLUSTER_IDENTITY_TYPE:=WorkloadIdentity} +--- +apiVersion: addons.cluster.x-k8s.io/v1alpha1 +kind: HelmChartProxy +metadata: + name: calico + namespace: default +spec: + chartName: tigera-operator + clusterSelector: + matchLabels: + cni: calico + namespace: tigera-operator + releaseName: projectcalico + repoURL: https://docs.tigera.io/calico/charts + valuesTemplate: |- + installation: + cni: + type: Calico + calicoNetwork: + bgp: Disabled + mtu: 1350 + ipPools: + ipPools:{{range $i, $cidr := .Cluster.spec.clusterNetwork.pods.cidrBlocks }} + - cidr: {{ $cidr }} + encapsulation: VXLAN{{end}} + registry: mcr.microsoft.com/oss + # Image and registry configuration for the tigera/operator pod. + tigeraOperator: + image: tigera/operator + registry: mcr.microsoft.com/oss + calicoctl: + image: mcr.microsoft.com/oss/calico/ctl + version: ${CALICO_VERSION} +--- +apiVersion: addons.cluster.x-k8s.io/v1alpha1 +kind: HelmChartProxy +metadata: + name: azuredisk-csi-driver-chart + namespace: default +spec: + chartName: azuredisk-csi-driver + clusterSelector: + matchLabels: + azuredisk-csi: "true" + namespace: kube-system + releaseName: azuredisk-csi-driver-oot + repoURL: https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/charts + valuesTemplate: |- + controller: + replicas: 1 + runOnControlPlane: true + windows: + useHostProcessContainers: {{ hasKey .Cluster.metadata.labels "cni-windows" }} +--- +apiVersion: addons.cluster.x-k8s.io/v1alpha1 +kind: HelmChartProxy +metadata: + name: cloud-provider-azure-chart + namespace: default +spec: + chartName: cloud-provider-azure + clusterSelector: + matchLabels: + cloud-provider: azure + releaseName: cloud-provider-azure-oot + repoURL: https://raw.githubusercontent.com/kubernetes-sigs/cloud-provider-azure/master/helm/repo + valuesTemplate: | + infra: + clusterName: {{ .Cluster.metadata.name }} + cloudControllerManager: + clusterCIDR: {{ .Cluster.spec.clusterNetwork.pods.cidrBlocks | join "," }} + logVerbosity: 4 +--- +apiVersion: addons.cluster.x-k8s.io/v1alpha1 +kind: HelmChartProxy +metadata: + name: cloud-provider-azure-chart-ci + namespace: default +spec: + chartName: cloud-provider-azure + clusterSelector: + matchLabels: + cloud-provider: azure-ci + releaseName: cloud-provider-azure-oot + repoURL: https://raw.githubusercontent.com/kubernetes-sigs/cloud-provider-azure/master/helm/repo + valuesTemplate: | + infra: + clusterName: {{ .Cluster.metadata.name }} + cloudControllerManager: + cloudConfig: ${CLOUD_CONFIG:-"/etc/kubernetes/azure.json"} + cloudConfigSecretName: ${CONFIG_SECRET_NAME:-""} + clusterCIDR: {{ .Cluster.spec.clusterNetwork.pods.cidrBlocks | join "," }} + imageName: "${CCM_IMAGE_NAME:-""}" + imageRepository: "${IMAGE_REGISTRY:-""}" + imageTag: "${IMAGE_TAG_CCM:-""}" + logVerbosity: ${CCM_LOG_VERBOSITY:-4} + replicas: ${CCM_COUNT:-1} + enableDynamicReloading: ${ENABLE_DYNAMIC_RELOADING:-false} + cloudNodeManager: + imageName: "${CNM_IMAGE_NAME:-""}" + imageRepository: "${IMAGE_REGISTRY:-""}" + imageTag: "${IMAGE_TAG_CNM:-""}" diff --git a/templates/test/ci/prow-apiserver-ilb/kustomization.yaml b/templates/test/ci/prow-apiserver-ilb/kustomization.yaml new file mode 100644 index 00000000000..8bdb920e38f --- /dev/null +++ b/templates/test/ci/prow-apiserver-ilb/kustomization.yaml @@ -0,0 +1,51 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: default +resources: + - ../../../flavors/apiserver-ilb + - ../../../addons/cluster-api-helm/calico.yaml + - ../../../addons/cluster-api-helm/azuredisk-csi-driver.yaml + - ../../../addons/cluster-api-helm/cloud-provider-azure.yaml + - ../../../addons/cluster-api-helm/cloud-provider-azure-ci.yaml + +patches: + - path: ../patches/tags.yaml + - path: ../patches/controller-manager.yaml + - path: ../patches/uami-md-0.yaml + - path: ../patches/uami-control-plane.yaml + - path: ../patches/cluster-label-calico.yaml + - path: ../patches/cluster-label-cloud-provider-azure.yaml + - target: + kind: KubeadmConfigTemplate + name: .*-md-0 + patch: |- + - op: replace + path: /spec/template/spec/preKubeadmCommands/0 + value: echo '${AZURE_INTERNAL_LB_PRIVATE_IP} ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com' >> /etc/hosts + - target: + kind: AzureCluster + patch: |- + - op: replace + path: /spec/networkSpec/apiServerLB/frontendIPs/1/privateIP + value: ${AZURE_INTERNAL_LB_PRIVATE_IP} + - target: + kind: AzureCluster + patch: |- + - op: replace + path: /spec/networkSpec/vnet/cidrBlocks/0 + value: ${AZURE_VNET_CIDR} + - target: + kind: AzureCluster + patch: |- + - op: replace + path: /spec/networkSpec/subnets/0/cidrBlocks/0 + value: ${AZURE_CP_SUBNET_CIDR} + - target: + kind: AzureCluster + patch: |- + - op: replace + path: /spec/networkSpec/subnets/1/cidrBlocks/0 + value: ${AZURE_NODE_SUBNET_CIDR} + +sortOptions: + order: fifo diff --git a/test/e2e/azure_apiserver_ilb.go b/test/e2e/azure_apiserver_ilb.go new file mode 100644 index 00000000000..710b8224a5b --- /dev/null +++ b/test/e2e/azure_apiserver_ilb.go @@ -0,0 +1,464 @@ +//go:build e2e +// +build e2e + +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +/* + +--------------------------------+ + | Start Test | + +--------------------------------+ + | + v ++-------------------------------------------+--------------------------------------------+ +| Fetch Azure Credentials | Get Azure Load Balancer Client | ++-------------------------------------------+--------------------------------------------+ + | + v ++-------------------------------------------+--------------------------------------------+ +| Verify Azure Internal Load Balancer | - Check Load Balancer Name | +| - Verify Provisioning State | - Confirm Succeeded State | ++-------------------------------------------+--------------------------------------------+ + | + v ++-------------------------------------------+--------------------------------------------+ +| Create Dynamic Client for Management | Get Azure Cluster Resource | +| Cluster | - Extract Control Plane Endpoint | +| | - Extract API Server ILB Private IP | ++-------------------------------------------+--------------------------------------------+ + | + v ++-------------------------------------------+--------------------------------------------+ +| Create Kubernetes Client Set for | Create Workload Cluster Proxy | +| Workload Cluster | | ++-------------------------------------------+--------------------------------------------+ + | + v ++-------------------------------------------+--------------------------------------------+ +| Deploy Node-Debug DaemonSet | - Add to Default Namespace | +| - Mount Host /etc/hosts | - Configure Privileged Container | ++-------------------------------------------+--------------------------------------------+ + | + v ++-------------------------------------------+--------------------------------------------+ +| List and Verify Worker Nodes | - Ensure Expected Number of Nodes | +| - Identify Worker Nodes | - Confirm Node Names Match Cluster | ++-------------------------------------------+--------------------------------------------+ + | + v ++-------------------------------------------+--------------------------------------------+ +| List Node-Debug Pods | - Verify Pods on Worker Nodes | +| - Check Pod Status | - Ensure Running Phase | ++-------------------------------------------+--------------------------------------------+ + | + v ++-------------------------------------------+--------------------------------------------+ +| Execute Test Commands on Each Pod | 1. Verify "Hello from node-debug pod" | +| - Use Remote Exec | 2. Check /host/etc contents | +| - Stream stdout/stderr | 3. Validate Against Expected Outputs | ++-------------------------------------------+--------------------------------------------+ + | + v ++-------------------------------------------+--------------------------------------------+ +| Validate Test Results | - Check All Pods Pass Tests | +| - Retry if Any Pod Fails | - Ensure Consistent Results | ++-------------------------------------------+--------------------------------------------+ + | + v + +--------------------------------+ + | Test Complete | + +--------------------------------+ +*/ + +package e2e + +import ( + "bytes" + "context" + "fmt" + "os" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + v1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/tools/remotecommand" + "k8s.io/kubectl/pkg/scheme" + "k8s.io/utils/ptr" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/test/framework" + + infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" +) + +// AzureAPIServerILBSpecInput is the input for AzureAPIServerILBSpec. +type AzureAPIServerILBSpecInput struct { + BootstrapClusterProxy framework.ClusterProxy + Cluster *clusterv1.Cluster + Namespace *corev1.Namespace + ClusterName string + ExpectedWorkerNodes int32 + WaitIntervals []interface{} + TemplateHasPrivateIPCustomDNSResolution bool +} + +// AzureAPIServerILBSpec implements a test that verifies the Azure API server ILB is created. +func AzureAPIServerILBSpec(ctx context.Context, inputGetter func() AzureAPIServerILBSpecInput) { + var ( + specName = "azure-apiserver-ilb" + input AzureAPIServerILBSpecInput + ) + + input = inputGetter() + Expect(input.Namespace).NotTo(BeNil(), "Invalid argument. input.Namespace can't be nil when calling %s spec", specName) + Expect(input.ClusterName).NotTo(BeEmpty(), "Invalid argument. input.ClusterName can't be empty when calling %s spec", specName) + + By("Fetching new Azure Credentials") + cred, err := azidentity.NewDefaultAzureCredential(nil) + Expect(err).NotTo(HaveOccurred()) + + By("Getting azureLoadBalancerClient") + azureLoadBalancerClient, err := armnetwork.NewLoadBalancersClient(getSubscriptionID(Default), cred, nil) + Expect(err).NotTo(HaveOccurred()) + + By("Verifying the Azure API Server Internal Load Balancer is created") + groupName := os.Getenv(AzureResourceGroup) + Logf("Azure Resource Group: %s\n", groupName) + internalLoadbalancerName := fmt.Sprintf("%s-%s", input.ClusterName, "public-lb-internal") + + backoff := wait.Backoff{ + Duration: 300 * time.Second, + Factor: 0.5, + Jitter: 0.5, + Steps: 7, + } + retryFn := func(ctx context.Context) (bool, error) { + defer GinkgoRecover() + resp, err := azureLoadBalancerClient.Get(ctx, groupName, internalLoadbalancerName, nil) + if err != nil { + return false, err + } + + By("Verifying the Azure API Server Internal Load Balancer is the right one created") + internalLoadbalancer := resp.LoadBalancer + Expect(ptr.Deref(internalLoadbalancer.Name, "")).To(Equal(internalLoadbalancerName)) + + By("Verifying the Azure API Server Internal Load Balancer is in a succeeded state") + switch ptr.Deref(internalLoadbalancer.Properties.ProvisioningState, "") { + case armnetwork.ProvisioningStateSucceeded: + return true, nil + case armnetwork.ProvisioningStateUpdating: + // Wait for operation to complete. + return false, nil + default: + return false, fmt.Errorf("azure internal loadbalancer provisioning failed with state: %q", ptr.Deref(internalLoadbalancer.Properties.ProvisioningState, "(nil)")) + } + } + err = wait.ExponentialBackoffWithContext(ctx, backoff, retryFn) + Expect(err).NotTo(HaveOccurred()) + + // ------------------------ // + By("Creating a dynamic client for accessing custom resources in the management cluster") + mgmtRestConfig := input.BootstrapClusterProxy.GetRESTConfig() + mgmtDynamicClientSet, err := dynamic.NewForConfig(mgmtRestConfig) + Expect(err).NotTo(HaveOccurred()) + Expect(mgmtDynamicClientSet).NotTo(BeNil()) + + By("Getting the AzureCluster using the dynamic client set") + azureClusterGVR := schema.GroupVersionResource{ + Group: "infrastructure.cluster.x-k8s.io", + Version: "v1beta1", + Resource: "azureclusters", + } + + azureCluster, err := mgmtDynamicClientSet.Resource(azureClusterGVR). + Namespace(input.Namespace.Name). + Get(ctx, input.ClusterName, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + deployedAzureCluster := &infrav1.AzureCluster{} + err = runtime.DefaultUnstructuredConverter.FromUnstructured( + azureCluster.UnstructuredContent(), + deployedAzureCluster, + ) + Expect(err).NotTo(HaveOccurred()) + + By("Getting the controlplane endpoint name") + controlPlaneEndpointDNSName, apiServerILBPrivateIP := "", "" + for _, frontendIP := range deployedAzureCluster.Spec.NetworkSpec.APIServerLB.FrontendIPs { + if frontendIP.PublicIP != nil && frontendIP.PublicIP.DNSName != "" { + Logf("Control Plane Endpoint Name: %s\n", frontendIP.PublicIP.DNSName) + controlPlaneEndpointDNSName = frontendIP.PublicIP.DNSName + } else if frontendIP.PrivateIPAddress != "" { + Logf("API Server ILB Private IP: %s\n", frontendIP.PrivateIPAddress) + apiServerILBPrivateIP = frontendIP.PrivateIPAddress + } + } + + // skip checking the controlPlaneEndpointDNSName in case of default template + // TODO: remove this block when the underlying infra spec is also updated with private IP of the internal load balancer + Expect(controlPlaneEndpointDNSName).NotTo(BeEmpty(), "controlPlaneEndpointDNSName should be found at AzureCluster.Spec.NetworkSpec.APIServerLB.FrontendIPs with a valid DNS name") + if input.TemplateHasPrivateIPCustomDNSResolution { + Expect(controlPlaneEndpointDNSName).To(Equal(fmt.Sprintf("%s-%s.%s.cloudapp.azure.com", input.ClusterName, os.Getenv("APISERVER_LB_DNS_SUFFIX"), os.Getenv("AZURE_LOCATION")))) + } + Expect(apiServerILBPrivateIP).NotTo(BeEmpty(), "apiServerILBPrivateIP should be found at AzureCluster.Spec.NetworkSpec.APIServerLB.FrontendIPs when apiserver ilb feature flag is enabled") + // ------------------------ // + + By("Creating a Kubernetes client set to the workload cluster") + workloadClusterProxy := input.BootstrapClusterProxy.GetWorkloadCluster(ctx, input.Namespace.Name, input.ClusterName) + Expect(workloadClusterProxy).NotTo(BeNil()) + workloadClusterClient := workloadClusterProxy.GetClient() + Expect(workloadClusterClient).NotTo(BeNil()) + workloadClusterClientSet := workloadClusterProxy.GetClientSet() + Expect(workloadClusterClientSet).NotTo(BeNil()) + + // Deploy node-debug daemonset to workload cluster + By("Deploying node-debug daemonset to the workload cluster") + nodeDebugDS := &v1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node-debug", + Namespace: "default", + }, + Spec: v1.DaemonSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "node-debug", + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": "node-debug", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "node-debug", + Image: "docker.io/library/busybox:latest", + SecurityContext: &corev1.SecurityContext{ + Privileged: ptr.To(true), + }, + Command: []string{ + "sh", + "-c", + "tail -f /dev/null", + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "etc-hosts", + MountPath: "/host/etc", + ReadOnly: true, + }, + }, + ReadinessProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + Exec: &corev1.ExecAction{ + Command: []string{"ls"}, + }, + }, + InitialDelaySeconds: 0, + PeriodSeconds: 1, + TimeoutSeconds: 60, + }, + }, + }, + Volumes: []corev1.Volume{ + { + Name: "etc-hosts", + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: "/etc/hosts", + Type: ptr.To(corev1.HostPathFile), + }, + }, + }, + }, + }, + }, + }, + } + err = workloadClusterClient.Create(ctx, nodeDebugDS) + Expect(err).NotTo(HaveOccurred()) + + backoff = wait.Backoff{ + Duration: 100 * time.Second, + Factor: 0.5, + Jitter: 0.5, + Steps: 5, + } + retryDSFn := func(ctx context.Context) (bool, error) { + defer GinkgoRecover() + + By("Saving all the nodes") + allNodes := &corev1.NodeList{} + err = workloadClusterClient.List(ctx, allNodes) + if err != nil { + return false, fmt.Errorf("failed to list nodes in the workload cluster: %v", err) + } + + if len(allNodes.Items) == 0 { + return false, fmt.Errorf("no nodes found in the workload cluster") + } + + By("Saving all the worker nodes") + workerNodes := make(map[string]corev1.Node, 0) + for i, node := range allNodes.Items { + if strings.Contains(node.Name, input.ClusterName+"-md-0") { + workerNodes[node.Name] = allNodes.Items[i] + } + } + if len(workerNodes) != int(input.ExpectedWorkerNodes) { + return false, fmt.Errorf("expected number of worker nodes: %d, got: %d", input.ExpectedWorkerNodes, len(workerNodes)) + } + + By("Saving all the node-debug pods running on the worker nodes") + allNodeDebugPods, err := workloadClusterClientSet.CoreV1().Pods("default").List(ctx, metav1.ListOptions{ + LabelSelector: "app=node-debug", + }) + if err != nil { + return false, fmt.Errorf("failed to list node-debug pods in the workload cluster: %v", err) + } + + workerDSPods := make(map[string]corev1.Pod, 0) + workerDSPodsTestResult := make(map[string]bool, 0) + for _, daemonsetPod := range allNodeDebugPods.Items { + if _, ok := workerNodes[daemonsetPod.Spec.NodeName]; ok { + workerDSPods[daemonsetPod.Name] = daemonsetPod + workerDSPodsTestResult[daemonsetPod.Name] = false + } + } + if len(workerDSPods) != int(input.ExpectedWorkerNodes) { + return false, fmt.Errorf("expected number of worker node-debug daemonset pods: %d, got: %d", input.ExpectedWorkerNodes, len(workerDSPods)) + } + + By("Getting the kubeconfig path for the workload cluster") + workloadClusterKubeConfigPath := workloadClusterProxy.GetKubeconfigPath() + workloadClusterKubeConfig, err := clientcmd.BuildConfigFromFlags("", workloadClusterKubeConfigPath) + + if err != nil { + return false, fmt.Errorf("failed to build workload cluster kubeconfig from flags: %v", err) + } + + Logf("Number of node debug pods deployed on worker nodes: %v\n", len(workerDSPods)) + for _, nodeDebugPod := range workerDSPods { + Logf("node-debug pod %v is deployed on node %v\n", nodeDebugPod.Name, nodeDebugPod.Spec.NodeName) + + By("Checking the status of the node-debug pod") + switch nodeDebugPod.Status.Phase { + case corev1.PodPending: + Logf("Pod %s is in Pending phase. Retrying\n", nodeDebugPod.Name) + return false /* retry */, nil + case corev1.PodRunning: + Logf("Pod %s is in Running phase. Proceeding\n", nodeDebugPod.Name) + default: + return false, fmt.Errorf("node-debug pod %s is in an unexpected phase: %v", nodeDebugPod.Name, nodeDebugPod.Status.Phase) + } + + helloFromTheNodeDebugPod := "Hello from node-debug pod" + listOfCommands := map[string][]string{ + helloFromTheNodeDebugPod: {"sh", "-c", "echo \"Hello from node-debug pod\""}, + apiServerILBPrivateIP: {"sh", "-c", "test -f /host/etc && cat /host/etc || echo 'File not found'"}, // /etc/host is mounted as /host/etc/hosts in the node-debug pod + } + testResult := map[string]bool{ + helloFromTheNodeDebugPod: false, + apiServerILBPrivateIP: false, + } + for expectedCmdOutput, execCommand := range listOfCommands { + Logf("Trying to exec into the pod %s at namespace %s and running the command %s\n", nodeDebugPod.Name, nodeDebugPod.Namespace, strings.Join(execCommand, " ")) + execRequest := workloadClusterClientSet.CoreV1().RESTClient().Post().Resource("pods").Name(nodeDebugPod.Name). + Namespace(nodeDebugPod.Namespace). + SubResource("exec") + + option := &corev1.PodExecOptions{ + Command: execCommand, + Stdin: false, + Stdout: true, + Stderr: true, + TTY: false, + } + + execRequest.VersionedParams( + option, + scheme.ParameterCodec, + ) + + Logf("Creating executor for the pod %s using the URL %v\n", nodeDebugPod.Name, execRequest.URL()) + exec, err := remotecommand.NewSPDYExecutor(workloadClusterKubeConfig, "POST", execRequest.URL()) + if err != nil { + return false, fmt.Errorf("failed to create executor: %v", err) + } + + By("Streaming stdout/err from the daemonset") + var stdout, stderr bytes.Buffer + err = exec.StreamWithContext(context.Background(), remotecommand.StreamOptions{ + Stdin: nil, + Stdout: &stdout, + Stderr: &stderr, + Tty: false, + }) + if err != nil { + return false, fmt.Errorf("failed to stream stdout/err from the daemonset: %v", err) + } + output := stdout.String() + Logf("Captured output:\n%s\n", output) + + if strings.Contains(output, expectedCmdOutput) { + testResult[expectedCmdOutput] = true + } + + // TODO: remove this below block when the underlying infra spec is also updated with + // private IP of the internal load balancer + // For now, a default template will not have the private IP of the internal load balancer in its /etc/hosts + // So, we will skip the test for default templates. + if !input.TemplateHasPrivateIPCustomDNSResolution { + testResult[expectedCmdOutput] = true + } + } + + if testResult[apiServerILBPrivateIP] && testResult[helloFromTheNodeDebugPod] { + Logf("Tests passed for the pod %s\n", nodeDebugPod.Name) + workerDSPodsTestResult[nodeDebugPod.Name] = true + } else { + Logf("Tests did not pass for the pod %s\n", nodeDebugPod.Name) + Logf("Tests update: %v, %v\n", testResult[helloFromTheNodeDebugPod], testResult[apiServerILBPrivateIP]) + return false /* retry */, nil + } + } + + checkTestOutputForAllWorkerPods := true + for podName, testResult := range workerDSPodsTestResult { + Logf("Test result for pod %s: %v\n", podName, testResult) + checkTestOutputForAllWorkerPods = checkTestOutputForAllWorkerPods && testResult + } + + if checkTestOutputForAllWorkerPods { + return true, nil + } + return false /* retry */, nil + } + err = wait.ExponentialBackoffWithContext(ctx, backoff, retryDSFn) + Expect(err).NotTo(HaveOccurred()) +} diff --git a/test/e2e/azure_test.go b/test/e2e/azure_test.go index cc501675ce1..1e62b568ff4 100644 --- a/test/e2e/azure_test.go +++ b/test/e2e/azure_test.go @@ -1153,4 +1153,103 @@ var _ = Describe("Workload cluster creation", func() { By("PASSED!") }) }) + + Context("Creating a self-managed VM based cluster using API Server ILB feature gate using default template [OPTIONAL][API-Server-ILB]", func() { + It("with three controlplane node and three worker nodes", func() { + clusterName = getClusterName(clusterNamePrefix, "apiserver-ilb") + + // Enable the API Server ILB feature gate + Expect(os.Setenv("EXP_APISERVER_ILB", "true")).To(Succeed()) + + clusterctl.ApplyClusterTemplateAndWait(ctx, createApplyClusterTemplateInput( + specName, + withNamespace(namespace.Name), + withClusterName(clusterName), + withControlPlaneMachineCount(3), + withWorkerMachineCount(2), + withControlPlaneInterval(specName, "wait-control-plane-ha"), + withControlPlaneWaiters(clusterctl.ControlPlaneWaiters{ + WaitForControlPlaneInitialized: EnsureControlPlaneInitializedNoAddons, + }), + withPostMachinesProvisioned(func() { + EnsureDaemonsets(ctx, func() DaemonsetsSpecInput { + return DaemonsetsSpecInput{ + BootstrapClusterProxy: bootstrapClusterProxy, + Namespace: namespace, + ClusterName: clusterName, + } + }) + }), + ), result) + + By("Probing workload cluster with APIServerILB feature gate", func() { + AzureAPIServerILBSpec(ctx, func() AzureAPIServerILBSpecInput { + return AzureAPIServerILBSpecInput{ + BootstrapClusterProxy: bootstrapClusterProxy, + Cluster: result.Cluster, + Namespace: namespace, + ClusterName: clusterName, + ExpectedWorkerNodes: result.ExpectedWorkerNodes(), + WaitIntervals: e2eConfig.GetIntervals(specName, "wait-worker-nodes"), + TemplateHasPrivateIPCustomDNSResolution: false, + } + }) + }) + + By("PASSED!") + }) + }) + + Context("Creating a self-managed VM based cluster using API Server ILB feature gate and fully spec-ed out APIServer ILB template [OPTIONAL][API-Server-ILB]", func() { + It("with three controlplane node and three worker nodes", func() { + clusterName = getClusterName(clusterNamePrefix, "apiserver-ilb") + + // Set the environment variables required for the API Server ILB feature gate + Expect(os.Setenv("EXP_APISERVER_ILB", "true")).To(Succeed()) + Expect(os.Setenv("AZURE_INTERNAL_LB_PRIVATE_IP", "40.0.0.100")).To(Succeed()) + Expect(os.Setenv("AZURE_VNET_CIDR", "40.0.0.0/8")).To(Succeed()) + Expect(os.Setenv("AZURE_CP_SUBNET_CIDR", "40.0.0.0/16")).To(Succeed()) + Expect(os.Setenv("AZURE_NODE_SUBNET_CIDR", "40.1.0.0/16")).To(Succeed()) + + clusterctl.ApplyClusterTemplateAndWait(ctx, createApplyClusterTemplateInput( + specName, + withFlavor("apiserver-ilb"), + withNamespace(namespace.Name), + withClusterName(clusterName), + withControlPlaneMachineCount(3), + withWorkerMachineCount(2), + withControlPlaneInterval(specName, "wait-control-plane-ha"), + withControlPlaneWaiters(clusterctl.ControlPlaneWaiters{ + WaitForControlPlaneInitialized: EnsureControlPlaneInitializedNoAddons, + }), + withPostMachinesProvisioned(func() { + EnsureDaemonsets(ctx, func() DaemonsetsSpecInput { + return DaemonsetsSpecInput{ + BootstrapClusterProxy: bootstrapClusterProxy, + Namespace: namespace, + ClusterName: clusterName, + } + }) + }), + ), result) + + By("Probing workload cluster with APIServerILB feature gate", func() { + AzureAPIServerILBSpec(ctx, func() AzureAPIServerILBSpecInput { + return AzureAPIServerILBSpecInput{ + BootstrapClusterProxy: bootstrapClusterProxy, + Cluster: result.Cluster, + Namespace: namespace, + ClusterName: clusterName, + ExpectedWorkerNodes: result.ExpectedWorkerNodes(), + WaitIntervals: e2eConfig.GetIntervals(specName, "wait-worker-nodes"), + TemplateHasPrivateIPCustomDNSResolution: true, + } + }) + }) + + By("PASSED!") + }) + }) + + // TODO: add a same test as above for a windows cluster }) diff --git a/test/e2e/config/azure-dev.yaml b/test/e2e/config/azure-dev.yaml index b9d23ffe4e0..b8dafc2b604 100644 --- a/test/e2e/config/azure-dev.yaml +++ b/test/e2e/config/azure-dev.yaml @@ -172,6 +172,8 @@ providers: targetName: "cluster-template-azure-cni-v1.yaml" - sourcePath: "${PWD}/templates/test/ci/cluster-template-prow-spot.yaml" targetName: "cluster-template-spot.yaml" + - sourcePath: "${PWD}/templates/test/ci/cluster-template-prow-apiserver-ilb.yaml" + targetName: "cluster-template-apiserver-ilb.yaml" replacements: - old: "--v=0" new: "--v=2" @@ -240,6 +242,7 @@ variables: LATEST_CAAPH_UPGRADE_VERSION: "v0.2.5" CI_RG: capz-ci USER_IDENTITY: cloud-provider-user-identity + EXP_APISERVER_ILB: "true" intervals: default/wait-controllers: ["3m", "10s"]