diff --git a/templates/cluster-template-aks-aso-clusterclass.yaml b/templates/cluster-template-aks-aso-clusterclass.yaml index 172be02416a..27aa069c625 100644 --- a/templates/cluster-template-aks-aso-clusterclass.yaml +++ b/templates/cluster-template-aks-aso-clusterclass.yaml @@ -59,6 +59,8 @@ spec: type: SystemAssigned servicePrincipalProfile: clientId: msi + networkProfile: + networkPlugin: azure operatorSpec: secrets: adminCredentials: diff --git a/templates/cluster-template-aks-aso.yaml b/templates/cluster-template-aks-aso.yaml index 702994a4868..e8c53069469 100644 --- a/templates/cluster-template-aks-aso.yaml +++ b/templates/cluster-template-aks-aso.yaml @@ -36,6 +36,8 @@ spec: identity: type: SystemAssigned location: ${AZURE_LOCATION} + networkProfile: + networkPlugin: azure operatorSpec: secrets: adminCredentials: diff --git a/templates/flavors/aks-aso-clusterclass/clusterclass.yaml b/templates/flavors/aks-aso-clusterclass/clusterclass.yaml index fade32af556..0062ed9730f 100644 --- a/templates/flavors/aks-aso-clusterclass/clusterclass.yaml +++ b/templates/flavors/aks-aso-clusterclass/clusterclass.yaml @@ -94,6 +94,8 @@ spec: type: SystemAssigned servicePrincipalProfile: clientId: msi + networkProfile: + networkPlugin: azure operatorSpec: secrets: adminCredentials: diff --git a/templates/flavors/aks-aso/cluster-template.yaml b/templates/flavors/aks-aso/cluster-template.yaml index ed1bced1880..f8d72bd7bf3 100644 --- a/templates/flavors/aks-aso/cluster-template.yaml +++ b/templates/flavors/aks-aso/cluster-template.yaml @@ -36,6 +36,8 @@ spec: type: SystemAssigned servicePrincipalProfile: clientId: msi + networkProfile: + networkPlugin: azure operatorSpec: secrets: adminCredentials: diff --git a/templates/test/ci/cluster-template-prow-aks-aso.yaml b/templates/test/ci/cluster-template-prow-aks-aso.yaml new file mode 100644 index 00000000000..45a7134b752 --- /dev/null +++ b/templates/test/ci/cluster-template-prow-aks-aso.yaml @@ -0,0 +1,197 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: ${CLUSTER_NAME} + namespace: default +spec: + controlPlaneRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: AzureASOManagedControlPlane + name: ${CLUSTER_NAME} + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: AzureASOManagedCluster + name: ${CLUSTER_NAME} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: AzureASOManagedControlPlane +metadata: + name: ${CLUSTER_NAME} + namespace: default +spec: + resources: + - apiVersion: containerservice.azure.com/v1api20231001 + kind: ManagedCluster + metadata: + annotations: + serviceoperator.azure.com/credential-from: ${ASO_CREDENTIAL_SECRET_NAME} + name: ${CLUSTER_NAME} + spec: + agentPoolProfiles: + - count: 1 + mode: System + name: stub + vmSize: ${AZURE_NODE_MACHINE_TYPE} + dnsPrefix: ${CLUSTER_NAME} + identity: + type: SystemAssigned + location: ${AZURE_LOCATION} + networkProfile: + networkPlugin: azure + operatorSpec: + secrets: + adminCredentials: + key: value + name: ${CLUSTER_NAME}-kubeconfig + owner: + name: ${CLUSTER_NAME} + servicePrincipalProfile: + clientId: msi + version: ${KUBERNETES_VERSION} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: AzureASOManagedCluster +metadata: + name: ${CLUSTER_NAME} + namespace: default +spec: + resources: + - apiVersion: resources.azure.com/v1api20200601 + kind: ResourceGroup + metadata: + annotations: + serviceoperator.azure.com/credential-from: ${ASO_CREDENTIAL_SECRET_NAME} + name: ${CLUSTER_NAME} + spec: + location: ${AZURE_LOCATION} + tags: + buildProvenance: ${BUILD_PROVENANCE} + creationTimestamp: ${TIMESTAMP} + jobName: ${JOB_NAME} +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachinePool +metadata: + name: ${CLUSTER_NAME}-pool0 + namespace: default +spec: + clusterName: ${CLUSTER_NAME} + replicas: ${WORKER_MACHINE_COUNT:=2} + template: + metadata: {} + spec: + bootstrap: + dataSecretName: "" + clusterName: ${CLUSTER_NAME} + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: AzureASOManagedMachinePool + name: ${CLUSTER_NAME}-pool0 + version: ${KUBERNETES_VERSION} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: AzureASOManagedMachinePool +metadata: + name: ${CLUSTER_NAME}-pool0 + namespace: default +spec: + resources: + - apiVersion: containerservice.azure.com/v1api20231001 + kind: ManagedClustersAgentPool + metadata: + annotations: + serviceoperator.azure.com/credential-from: ${ASO_CREDENTIAL_SECRET_NAME} + name: ${CLUSTER_NAME}-pool0 + spec: + azureName: pool0 + count: ${WORKER_MACHINE_COUNT:=2} + mode: System + owner: + name: ${CLUSTER_NAME} + type: VirtualMachineScaleSets + vmSize: ${AZURE_AKS_NODE_MACHINE_TYPE:=Standard_D2s_v3} +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachinePool +metadata: + name: ${CLUSTER_NAME}-pool1 + namespace: default +spec: + clusterName: ${CLUSTER_NAME} + replicas: ${WORKER_MACHINE_COUNT:=2} + template: + metadata: {} + spec: + bootstrap: + dataSecretName: "" + clusterName: ${CLUSTER_NAME} + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: AzureASOManagedMachinePool + name: ${CLUSTER_NAME}-pool1 + version: ${KUBERNETES_VERSION} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: AzureASOManagedMachinePool +metadata: + name: ${CLUSTER_NAME}-pool1 + namespace: default +spec: + resources: + - apiVersion: containerservice.azure.com/v1api20231001 + kind: ManagedClustersAgentPool + metadata: + annotations: + serviceoperator.azure.com/credential-from: ${ASO_CREDENTIAL_SECRET_NAME} + name: ${CLUSTER_NAME}-pool1 + spec: + azureName: pool1 + count: ${WORKER_MACHINE_COUNT:=2} + mode: User + owner: + name: ${CLUSTER_NAME} + type: VirtualMachineScaleSets + vmSize: ${AZURE_AKS_NODE_MACHINE_TYPE:=Standard_D2s_v3} +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachinePool +metadata: + name: ${CLUSTER_NAME}-pool2 + namespace: default +spec: + clusterName: ${CLUSTER_NAME} + replicas: 1 + template: + metadata: {} + spec: + bootstrap: + dataSecretName: "" + clusterName: ${CLUSTER_NAME} + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: AzureASOManagedMachinePool + name: ${CLUSTER_NAME}-pool2 + version: ${KUBERNETES_VERSION} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: AzureASOManagedMachinePool +metadata: + name: ${CLUSTER_NAME}-pool2 + namespace: default +spec: + resources: + - apiVersion: containerservice.azure.com/v1api20231001 + kind: ManagedClustersAgentPool + metadata: + annotations: + serviceoperator.azure.com/credential-from: ${ASO_CREDENTIAL_SECRET_NAME} + name: ${CLUSTER_NAME}-pool2 + spec: + azureName: pool2 + count: 1 + mode: User + osType: Windows + owner: + name: ${CLUSTER_NAME} + type: VirtualMachineScaleSets + vmSize: ${AZURE_AKS_NODE_MACHINE_TYPE:=Standard_D2s_v3} diff --git a/templates/test/ci/prow-aks-aso/kustomization.yaml b/templates/test/ci/prow-aks-aso/kustomization.yaml new file mode 100644 index 00000000000..db7b880c771 --- /dev/null +++ b/templates/test/ci/prow-aks-aso/kustomization.yaml @@ -0,0 +1,29 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: default +resources: + - ../../../flavors/aks-aso + - patches/aks-pool2.yaml + +patches: + - patch: |- + - op: test + path: /spec/resources/0/kind + value: ResourceGroup + - op: replace + path: /spec/resources/0/spec/tags + value: + jobName: ${JOB_NAME} + creationTimestamp: ${TIMESTAMP} + buildProvenance: ${BUILD_PROVENANCE} + target: + kind: AzureASOManagedCluster + - patch: |- + - op: test + path: /spec/resources/0/kind + value: ManagedClustersAgentPool + - op: replace + path: /spec/resources/0/spec/vmSize + value: "${AZURE_AKS_NODE_MACHINE_TYPE:=Standard_D2s_v3}" + target: + kind: AzureASOManagedMachinePool diff --git a/templates/test/ci/prow-aks-aso/patches/aks-pool2.yaml b/templates/test/ci/prow-aks-aso/patches/aks-pool2.yaml new file mode 100644 index 00000000000..6fdfcd1ee9b --- /dev/null +++ b/templates/test/ci/prow-aks-aso/patches/aks-pool2.yaml @@ -0,0 +1,40 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachinePool +metadata: + name: "${CLUSTER_NAME}-pool2" +spec: + clusterName: "${CLUSTER_NAME}" + replicas: 1 + template: + metadata: {} + spec: + bootstrap: + dataSecretName: "" + clusterName: "${CLUSTER_NAME}" + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: AzureASOManagedMachinePool + name: "${CLUSTER_NAME}-pool2" + version: "${KUBERNETES_VERSION}" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: AzureASOManagedMachinePool +metadata: + name: "${CLUSTER_NAME}-pool2" +spec: + resources: + - apiVersion: "containerservice.azure.com/v1api20231001" + kind: ManagedClustersAgentPool + metadata: + name: ${CLUSTER_NAME}-pool2 + annotations: + serviceoperator.azure.com/credential-from: ${ASO_CREDENTIAL_SECRET_NAME} + spec: + azureName: pool2 + owner: + name: ${CLUSTER_NAME} + mode: User + type: VirtualMachineScaleSets + vmSize: "${AZURE_AKS_NODE_MACHINE_TYPE:=Standard_D2s_v3}" + osType: Windows + count: 1 diff --git a/test/e2e/aks.go b/test/e2e/aks.go index 310092bdeb0..4b6885aa2c9 100644 --- a/test/e2e/aks.go +++ b/test/e2e/aks.go @@ -22,10 +22,14 @@ package e2e import ( "context" + asocontainerservicev1 "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20231001" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/ptr" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" + infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha1" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" "sigs.k8s.io/cluster-api/test/framework" @@ -78,20 +82,15 @@ func DiscoverAndWaitForAKSControlPlaneInitialized(ctx context.Context, input Dis Expect(input.Lister).NotTo(BeNil(), "Invalid argument. input.Lister can't be nil when calling DiscoverAndWaitForAKSControlPlaneInitialized") Expect(input.Cluster).NotTo(BeNil(), "Invalid argument. input.Cluster can't be nil when calling DiscoverAndWaitForAKSControlPlaneInitialized") - controlPlane := GetAzureManagedControlPlaneByCluster(ctx, GetAzureManagedControlPlaneByClusterInput{ + controlPlaneNamespace := input.Cluster.Spec.ControlPlaneRef.Namespace + controlPlaneName := input.Cluster.Spec.ControlPlaneRef.Name + + Logf("Waiting for the first AKS machine in the %s/%s 'system' node pool to exist", controlPlaneNamespace, controlPlaneName) + WaitForAtLeastOneSystemNodePoolMachineToExist(ctx, WaitForControlPlaneAndMachinesReadyInput{ Lister: input.Lister, + Getter: input.Getter, ClusterName: input.Cluster.Name, Namespace: input.Cluster.Namespace, - }) - Expect(controlPlane).NotTo(BeNil()) - - Logf("Waiting for the first AKS machine in the %s/%s 'system' node pool to exist", controlPlane.Namespace, controlPlane.Name) - WaitForAtLeastOneSystemNodePoolMachineToExist(ctx, WaitForControlPlaneAndMachinesReadyInput{ - Lister: input.Lister, - Getter: input.Getter, - ControlPlane: controlPlane, - ClusterName: input.Cluster.Name, - Namespace: input.Cluster.Namespace, }, intervals...) } @@ -102,52 +101,26 @@ func DiscoverAndWaitForAKSControlPlaneReady(ctx context.Context, input DiscoverA Expect(input.Lister).NotTo(BeNil(), "Invalid argument. input.Lister can't be nil when calling DiscoverAndWaitForAKSControlPlaneReady") Expect(input.Cluster).NotTo(BeNil(), "Invalid argument. input.Cluster can't be nil when calling DiscoverAndWaitForAKSControlPlaneReady") - controlPlane := GetAzureManagedControlPlaneByCluster(ctx, GetAzureManagedControlPlaneByClusterInput{ + controlPlaneNamespace := input.Cluster.Spec.ControlPlaneRef.Namespace + controlPlaneName := input.Cluster.Spec.ControlPlaneRef.Name + + Logf("Waiting for all AKS machines in the %s/%s 'system' node pool to exist", controlPlaneNamespace, controlPlaneName) + WaitForAllControlPlaneAndMachinesToExist(ctx, WaitForControlPlaneAndMachinesReadyInput{ Lister: input.Lister, + Getter: input.Getter, ClusterName: input.Cluster.Name, Namespace: input.Cluster.Namespace, - }) - Expect(controlPlane).NotTo(BeNil()) - - Logf("Waiting for all AKS machines in the %s/%s 'system' node pool to exist", controlPlane.Namespace, controlPlane.Name) - WaitForAllControlPlaneAndMachinesToExist(ctx, WaitForControlPlaneAndMachinesReadyInput{ - Lister: input.Lister, - Getter: input.Getter, - ControlPlane: controlPlane, - ClusterName: input.Cluster.Name, - Namespace: input.Cluster.Namespace, }, intervals...) } -// GetAzureManagedControlPlaneByClusterInput contains the fields the required for fetching the azure managed control plane. -type GetAzureManagedControlPlaneByClusterInput struct { +// WaitForControlPlaneAndMachinesReadyInput contains the fields required for checking the status of azure managed control plane machines. +type WaitForControlPlaneAndMachinesReadyInput struct { Lister framework.Lister + Getter framework.Getter ClusterName string Namespace string } -// GetAzureManagedControlPlaneByCluster returns the AzureManagedControlPlane object for a cluster. -// Important! this method relies on labels that are created by the CAPI controllers during the first reconciliation, so -// it is necessary to ensure this is already happened before calling it. -func GetAzureManagedControlPlaneByCluster(ctx context.Context, input GetAzureManagedControlPlaneByClusterInput) *infrav1.AzureManagedControlPlane { - controlPlaneList := &infrav1.AzureManagedControlPlaneList{} - Expect(input.Lister.List(ctx, controlPlaneList, byClusterOptions(input.ClusterName, input.Namespace)...)).To(Succeed(), "Failed to list AzureManagedControlPlane object for Cluster %s/%s", input.Namespace, input.ClusterName) - Expect(len(controlPlaneList.Items)).NotTo(BeNumerically(">", 1), "Cluster %s/%s should not have more than 1 AzureManagedControlPlane object", input.Namespace, input.ClusterName) - if len(controlPlaneList.Items) == 1 { - return &controlPlaneList.Items[0] - } - return nil -} - -// WaitForControlPlaneAndMachinesReadyInput contains the fields required for checking the status of azure managed control plane machines. -type WaitForControlPlaneAndMachinesReadyInput struct { - Lister framework.Lister - Getter framework.Getter - ControlPlane *infrav1.AzureManagedControlPlane - ClusterName string - Namespace string -} - // WaitForAtLeastOneSystemNodePoolMachineToExist waits for at least one machine in the "system" node pool to exist. func WaitForAtLeastOneSystemNodePoolMachineToExist(ctx context.Context, input WaitForControlPlaneAndMachinesReadyInput, intervals ...interface{}) { By("Waiting for at least one node to exist in the 'system' node pool") @@ -184,20 +157,56 @@ func WaitForAKSSystemNodePoolMachinesToExist(ctx context.Context, input WaitForC Eventually(func() bool { opt1 := client.InNamespace(input.Namespace) opt2 := client.MatchingLabels(map[string]string{ - infrav1.LabelAgentPoolMode: string(infrav1.NodePoolModeSystem), clusterv1.ClusterNameLabel: input.ClusterName, }) + opt3 := client.MatchingLabels(map[string]string{ + infrav1.LabelAgentPoolMode: string(infrav1.NodePoolModeSystem), + }) + + var capzMPs []client.Object ammpList := &infrav1.AzureManagedMachinePoolList{} + asommpList := &infrav1exp.AzureASOManagedMachinePoolList{} + + if err := input.Lister.List(ctx, ammpList, opt1, opt2, opt3); err != nil { + LogWarningf("Failed to list AzureManagedMachinePools: %+v", err) + return false + } + for _, ammp := range ammpList.Items { + capzMPs = append(capzMPs, ptr.To(ammp)) + } - if err := input.Lister.List(ctx, ammpList, opt1, opt2); err != nil { - LogWarningf("Failed to get machinePool: %+v", err) + if err := input.Lister.List(ctx, asommpList, opt1, opt2); err != nil { + LogWarningf("Failed to list AzureASOManagedMachinePools: %+v", err) return false } + for _, asommp := range asommpList.Items { + var resources []*unstructured.Unstructured + for _, resource := range asommp.Spec.Resources { + u := &unstructured.Unstructured{} + Expect(u.UnmarshalJSON(resource.Raw)).To(Succeed()) + resources = append(resources, u) + } + for _, resource := range resources { + if resource.GroupVersionKind().Group != asocontainerservicev1.GroupVersion.Group || + resource.GroupVersionKind().Kind != "ManagedClustersAgentPool" { + continue + } + mode, _, err := unstructured.NestedString(resource.UnstructuredContent(), "spec", "mode") + if err != nil { + LogWarningf("Failed to get spec.mode for AzureASOManagedMachinePools %s/%s: %v", asommp.Namespace, asommp.Name, err) + continue + } + if mode == string(asocontainerservicev1.AgentPoolMode_System) { + capzMPs = append(capzMPs, ptr.To(asommp)) + } + break + } + } - for _, pool := range ammpList.Items { + for _, pool := range capzMPs { // Fetch the owning MachinePool. - for _, ref := range pool.OwnerReferences { + for _, ref := range pool.GetOwnerReferences() { if ref.Kind != "MachinePool" { continue } diff --git a/test/e2e/aks_versions.go b/test/e2e/aks_versions.go index ff721e8ab51..562d6f888c7 100644 --- a/test/e2e/aks_versions.go +++ b/test/e2e/aks_versions.go @@ -29,9 +29,7 @@ import ( "github.com/pkg/errors" "golang.org/x/mod/semver" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/test/framework/clusterctl" - "sigs.k8s.io/controller-runtime/pkg/client" ) // GetAKSKubernetesVersion gets the kubernetes version for AKS clusters as specified by the environment variable defined by versionVar. @@ -57,16 +55,6 @@ func GetAKSKubernetesVersion(ctx context.Context, e2eConfig *clusterctl.E2EConfi return maxVersion, nil } -// byClusterOptions returns a set of ListOptions that will identify all the objects belonging to a Cluster. -func byClusterOptions(name, namespace string) []client.ListOption { - return []client.ListOption{ - client.InNamespace(namespace), - client.MatchingLabels{ - clusterv1.ClusterNameLabel: name, - }, - } -} - // GetWorkingAKSKubernetesVersion returns an available Kubernetes version of AKS given a desired semver version, if possible. // If the desired version is available, we return it. // If the desired version is not available, we check for any available patch version using desired version's Major.Minor semver release. diff --git a/test/e2e/azure_clusterproxy.go b/test/e2e/azure_clusterproxy.go index a8c48f37a7e..f07ae7cfedd 100644 --- a/test/e2e/azure_clusterproxy.go +++ b/test/e2e/azure_clusterproxy.go @@ -36,18 +36,22 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/monitor/armmonitor" asocontainerservicev1preview "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20230202preview" asocontainerservicev1 "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20231001" + asoresourcesv1 "github.com/Azure/azure-service-operator/v2/api/resources/v1api20200601" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/kubectl/pkg/describe" "k8s.io/utils/ptr" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" + infrav1expalpha "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha1" infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1" expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" "sigs.k8s.io/cluster-api/test/framework" + "sigs.k8s.io/controller-runtime/pkg/client" ) type ( @@ -71,7 +75,9 @@ func initScheme() *runtime.Scheme { framework.TryAddDefaultSchemes(scheme) Expect(infrav1.AddToScheme(scheme)).To(Succeed()) Expect(infrav1exp.AddToScheme(scheme)).To(Succeed()) + Expect(infrav1expalpha.AddToScheme(scheme)).To(Succeed()) Expect(expv1.AddToScheme(scheme)).To(Succeed()) + Expect(asoresourcesv1.AddToScheme(scheme)).To(Succeed()) Expect(asocontainerservicev1.AddToScheme(scheme)).To(Succeed()) Expect(asocontainerservicev1preview.AddToScheme(scheme)).To(Succeed()) return scheme @@ -223,12 +229,33 @@ func (acp *AzureClusterProxy) collectActivityLogs(ctx context.Context, namespace workloadCluster, err := getAzureCluster(timeoutctx, clusterClient, namespace, name) if apierrors.IsNotFound(err) { controlPlane, err := getAzureManagedControlPlane(timeoutctx, clusterClient, namespace, name) - if err != nil { - // Failing to fetch logs should not cause the test to fail - Logf("Error fetching activity logs for cluster %s in namespace %s. Not able to find the AzureManagedControlPlane on the management cluster: %v", name, namespace, err) - return + if apierrors.IsNotFound(err) { + asoCluster, err := getAzureASOManagedCluster(timeoutctx, clusterClient, namespace, name) + if err != nil { + // Failing to fetch logs should not cause the test to fail + Logf("Error fetching activity logs for cluster %s in namespace %s. Not able to find the AzureASOManagedCluster on the management cluster: %v", name, namespace, err) + return + } + for _, resource := range asoCluster.Spec.Resources { + u := &unstructured.Unstructured{} + Expect(u.UnmarshalJSON(resource.Raw)).To(Succeed()) + if u.GroupVersionKind().Kind != "ResourceGroup" { + continue + } + // AzureName might not be specified in the CAPZ resource. GET the rg to make sure we have it. + rg := &asoresourcesv1.ResourceGroup{} + Expect(clusterClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: u.GetName()}, rg)).To(Succeed()) + groupName = rg.AzureName() + break + } + } else { + if err != nil { + // Failing to fetch logs should not cause the test to fail + Logf("Error fetching activity logs for cluster %s in namespace %s. Not able to find the AzureManagedControlPlane on the management cluster: %v", name, namespace, err) + return + } + groupName = controlPlane.Spec.ResourceGroupName } - groupName = controlPlane.Spec.ResourceGroupName } else { if err != nil { // Failing to fetch logs should not cause the test to fail diff --git a/test/e2e/azure_logcollector.go b/test/e2e/azure_logcollector.go index 316a4b0ff05..0f7fdc69198 100644 --- a/test/e2e/azure_logcollector.go +++ b/test/e2e/azure_logcollector.go @@ -34,6 +34,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/azure" + infrav1expalpha "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha1" infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1" azureutil "sigs.k8s.io/cluster-api-provider-azure/util/azure" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -94,6 +95,10 @@ func (k AzureLogCollector) CollectMachinePoolLog(ctx context.Context, management // Machine pool can be an AzureManagedMachinePool for AKS clusters. _, err = getAzureManagedMachinePool(ctx, managementClusterClient, mp) if err != nil { + if !apierrors.IsNotFound(err) { + return err + } + _, err = getAzureASOManagedMachinePool(ctx, managementClusterClient, mp) return err } } else { @@ -205,6 +210,17 @@ func getAzureManagedControlPlane(ctx context.Context, managementClusterClient cl return azManagedControlPlane, err } +func getAzureASOManagedCluster(ctx context.Context, managementClusterClient client.Client, namespace, name string) (*infrav1expalpha.AzureASOManagedCluster, error) { + key := client.ObjectKey{ + Namespace: namespace, + Name: name, + } + + azManagedCluster := &infrav1expalpha.AzureASOManagedCluster{} + err := managementClusterClient.Get(ctx, key, azManagedCluster) + return azManagedCluster, err +} + func getAzureMachine(ctx context.Context, managementClusterClient client.Client, m *clusterv1.Machine) (*infrav1.AzureMachine, error) { key := client.ObjectKey{ Namespace: m.Spec.InfrastructureRef.Namespace, @@ -238,6 +254,17 @@ func getAzureManagedMachinePool(ctx context.Context, managementClusterClient cli return azManagedMachinePool, err } +func getAzureASOManagedMachinePool(ctx context.Context, managementClusterClient client.Client, mp *expv1.MachinePool) (*infrav1expalpha.AzureASOManagedMachinePool, error) { + key := client.ObjectKey{ + Namespace: mp.Spec.Template.Spec.InfrastructureRef.Namespace, + Name: mp.Spec.Template.Spec.InfrastructureRef.Name, + } + + azManagedMachinePool := &infrav1expalpha.AzureASOManagedMachinePool{} + err := managementClusterClient.Get(ctx, key, azManagedMachinePool) + return azManagedMachinePool, err +} + func linuxLogs(execToPathFn func(outputFileName string, command string, args ...string) func() error) []func() error { return []func() error{ execToPathFn( diff --git a/test/e2e/azure_test.go b/test/e2e/azure_test.go index 29ae75dd50f..bdf292c46a6 100644 --- a/test/e2e/azure_test.go +++ b/test/e2e/azure_test.go @@ -25,6 +25,7 @@ import ( "os" "time" + "github.com/Azure/azure-service-operator/v2/pkg/common/config" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" @@ -35,6 +36,7 @@ import ( capi_e2e "sigs.k8s.io/cluster-api/test/e2e" "sigs.k8s.io/cluster-api/test/framework/clusterctl" "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/controller-runtime/pkg/client" ) var _ = Describe("Workload cluster creation", func() { @@ -104,6 +106,22 @@ var _ = Describe("Workload cluster creation", func() { Logf("Using existing cluster identity secret") } + asoSecretName := e2eConfig.GetVariable("ASO_CREDENTIAL_SECRET_NAME") + asoSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace.Name, + Name: asoSecretName, + }, + StringData: map[string]string{ + config.AzureSubscriptionID: e2eConfig.GetVariable(AzureSubscriptionID), + config.AzureTenantID: e2eConfig.GetVariable(AzureTenantID), + config.AzureClientID: e2eConfig.GetVariable(AzureClientID), + config.AuthMode: e2eConfig.GetVariable("ASO_CREDENTIAL_SECRET_MODE"), + }, + } + err = bootstrapClusterProxy.GetClient().Create(ctx, asoSecret) + Expect(client.IgnoreAlreadyExists(err)).NotTo(HaveOccurred()) + identityName := e2eConfig.GetVariable(ClusterIdentityName) Expect(os.Setenv(ClusterIdentityName, identityName)).To(Succeed()) Expect(os.Setenv(ClusterIdentityNamespace, defaultNamespace)).To(Succeed()) @@ -937,6 +955,28 @@ var _ = Describe("Workload cluster creation", func() { }) }) + Context("Creating an AKS cluster with the ASO API [Managed Kubernetes]", func() { + It("with a single control plane node and 1 node", func() { + clusterName = getClusterName(clusterNamePrefix, "asoapi") + kubernetesVersion, err := GetAKSKubernetesVersion(ctx, e2eConfig, AKSKubernetesVersion) + Expect(err).NotTo(HaveOccurred()) + + clusterctl.ApplyClusterTemplateAndWait(ctx, createApplyClusterTemplateInput( + specName, + withFlavor("aks-aso"), + withNamespace(namespace.Name), + withClusterName(clusterName), + withKubernetesVersion(kubernetesVersion), + withWorkerMachineCount(1), + withMachinePoolInterval(specName, "wait-worker-nodes"), + withControlPlaneWaiters(clusterctl.ControlPlaneWaiters{ + WaitForControlPlaneInitialized: WaitForAKSControlPlaneInitialized, + WaitForControlPlaneMachinesReady: WaitForAKSControlPlaneReady, + }), + ), result) + }) + }) + // ci-e2e.sh and Prow CI skip this test by default. To include this test, set `GINKGO_SKIP=""`. // This spec expects a user-assigned identity named "cloud-provider-user-identity" in a "capz-ci" // resource group. Override these defaults by setting the USER_IDENTITY and CI_RG environment variables. diff --git a/test/e2e/common.go b/test/e2e/common.go index f41eb76dd98..5806e82536a 100644 --- a/test/e2e/common.go +++ b/test/e2e/common.go @@ -73,6 +73,7 @@ const ( AzureClientSecret = "AZURE_CLIENT_SECRET" //nolint:gosec // Not a secret itself, just its name AzureClientID = "AZURE_CLIENT_ID" AzureSubscriptionID = "AZURE_SUBSCRIPTION_ID" + AzureTenantID = "AZURE_TENANT_ID" AzureUserIdentity = "USER_IDENTITY" AzureIdentityResourceGroup = "CI_RG" JobName = "JOB_NAME" diff --git a/test/e2e/config/azure-dev.yaml b/test/e2e/config/azure-dev.yaml index eea57a1cbb4..9a441ddbbdd 100644 --- a/test/e2e/config/azure-dev.yaml +++ b/test/e2e/config/azure-dev.yaml @@ -143,6 +143,8 @@ providers: targetName: "cluster-template-aks.yaml" - sourcePath: "${PWD}/templates/test/ci/cluster-template-prow-aks-clusterclass.yaml" targetName: "cluster-template-aks-clusterclass.yaml" + - sourcePath: "${PWD}/templates/test/ci/cluster-template-prow-aks-aso.yaml" + targetName: "cluster-template-aks-aso.yaml" - sourcePath: "${PWD}/templates/test/ci/cluster-template-prow-custom-vnet.yaml" targetName: "cluster-template-custom-vnet.yaml" - sourcePath: "${PWD}/templates/test/ci/cluster-template-prow-dual-stack.yaml" @@ -204,6 +206,7 @@ variables: EXP_MACHINE_POOL: "true" EXP_CLUSTER_RESOURCE_SET: "true" EXP_EDGEZONE: "true" + EXP_ASO_API: "true" CLUSTER_TOPOLOGY: "true" EXP_KUBEADM_BOOTSTRAP_FORMAT_IGNITION: "true" AZURE_EXTENDEDLOCATION_TYPE: "${AZURE_EXTENDEDLOCATION_TYPE:-EdgeZone}" @@ -214,6 +217,8 @@ variables: CONFORMANCE_NODES: "${CONFORMANCE_NODES:-1}" IP_FAMILY: "IPv4" CLUSTER_IDENTITY_NAME: "cluster-identity-sp" + ASO_CREDENTIAL_SECRET_NAME: "aso-credentials" + ASO_CREDENTIAL_SECRET_MODE: workloadidentity NODE_DRAIN_TIMEOUT: "60s" CI_VERSION: "" KUBETEST_CONFIGURATION: "./data/kubetest/conformance.yaml"