Skip to content

Commit

Permalink
Add E2E test
Browse files Browse the repository at this point in the history
  • Loading branch information
willie-yao committed Dec 2, 2023
1 parent 0a9ec1a commit d688d75
Show file tree
Hide file tree
Showing 4 changed files with 229 additions and 100 deletions.
1 change: 1 addition & 0 deletions go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,7 @@ require (

require (
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservicefleet/armcontainerservicefleet v1.1.0
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
github.com/Azure/go-autorest/autorest/adal v0.9.23 // indirect
github.com/Azure/go-autorest/autorest/azure/cli v0.4.5 // indirect
Expand Down
2 changes: 2 additions & 0 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,8 @@ github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.3
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice v1.0.0 h1:figxyQZXzZQIcP3njhC68bYUiTw45J8/SsHaLW8Ax0M=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4 v4.4.0 h1:GYbAJIzQQBmtCx19HQur/hBT8YZxx8l6kyxcQFYMXHc=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4 v4.4.0/go.mod h1:su7G1Z0RoXhEJB4P35m34hDFNMEGik0sAUETEUuBeUA=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservicefleet/armcontainerservicefleet v1.1.0 h1:cv2cjvdh2V6AQynbMGUFMSivtqM04zH+PL8A9iK9IWk=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservicefleet/armcontainerservicefleet v1.1.0/go.mod h1:F2Ad7qGpAbSYv2AoupVMoc+JBpd5gMMY9V/NAIv4+48=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/cosmos/armcosmos v1.0.0 h1:Fv8iibGn1eSw0lt2V3cTsuokBEnOP+M//n8OiMcCgTM=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal v1.1.2 h1:mLY+pNLjCUeKhgnAJWAKhEUQM+RJQo2H1fuGSw1Ky1E=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0 h1:PTFGRSlMKCQelWwxUyYVEUqseBJVemLyqWJjvMyt0do=
Expand Down
117 changes: 117 additions & 0 deletions test/e2e/aks_fleets_member.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,117 @@
//go:build e2e
// +build e2e

/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package e2e

import (
"context"
"os"

"github.com/Azure/azure-sdk-for-go/sdk/azidentity"
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservicefleet/armcontainerservicefleet"
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/types"
"k8s.io/utils/ptr"
infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1"
"sigs.k8s.io/cluster-api-provider-azure/azure"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/controller-runtime/pkg/client"
)

type AKSFleetsMemberInput struct {
Cluster *clusterv1.Cluster
WaitIntervals []interface{}
}

const (
groupName = "capz-aks-fleets-member"
fleetName = "capz-aks-fleets-manager"
)

func AKSFleetsMemberSpec(ctx context.Context, inputGetter func() AKSFleetsMemberInput) {
input := inputGetter()

cred, err := azidentity.NewDefaultAzureCredential(nil)
Expect(err).NotTo(HaveOccurred())

mgmtClient := bootstrapClusterProxy.GetClient()
Expect(mgmtClient).NotTo(BeNil())

amcp := &infrav1.AzureManagedControlPlane{}
err = mgmtClient.Get(ctx, types.NamespacedName{
Namespace: input.Cluster.Spec.ControlPlaneRef.Namespace,
Name: input.Cluster.Spec.ControlPlaneRef.Name,
}, amcp)
Expect(err).NotTo(HaveOccurred())

groupClient, err := armresources.NewResourceGroupsClient(getSubscriptionID(Default), cred, nil)
Expect(err).NotTo(HaveOccurred())

By("creating a resource group")
_, err = groupClient.CreateOrUpdate(ctx, groupName, armresources.ResourceGroup{
Location: ptr.To(os.Getenv(AzureLocation)),
}, nil)
Expect(err).To(BeNil())

fleetClient, err := armcontainerservicefleet.NewFleetsClient(getSubscriptionID(Default), cred, nil)
Expect(err).NotTo(HaveOccurred())

fleetsMemberClient, err := armcontainerservicefleet.NewFleetMembersClient(getSubscriptionID(Default), cred, nil)
Expect(err).NotTo(HaveOccurred())

By("creating a fleet manager")
poller, err := fleetClient.BeginCreateOrUpdate(ctx, groupName, fleetName, armcontainerservicefleet.Fleet{
Location: ptr.To(os.Getenv(AzureLocation)),
}, nil)
Expect(err).To(BeNil())

Eventually(func(g Gomega) {
_, err := poller.PollUntilDone(ctx, nil)
Expect(err).NotTo(HaveOccurred())
}, input.WaitIntervals...).Should(Succeed(), "failed to create fleet manager")

By("Joining the cluster to the fleet hub")
var infraControlPlane = &infrav1.AzureManagedControlPlane{}
Eventually(func(g Gomega) {
err = mgmtClient.Get(ctx, client.ObjectKey{Namespace: input.Cluster.Spec.ControlPlaneRef.Namespace, Name: input.Cluster.Spec.ControlPlaneRef.Name}, infraControlPlane)
g.Expect(err).NotTo(HaveOccurred())
infraControlPlane.Spec.FleetsMember = &infrav1.FleetsMember{
Name: fleetName,
FleetsMemberClassSpec: infrav1.FleetsMemberClassSpec{
ManagerName: fleetName,
ManagerResourceGroup: groupName,
},
}
g.Expect(mgmtClient.Update(ctx, infraControlPlane)).To(Succeed())
}, input.WaitIntervals...).Should(Succeed())

By("Ensuring the fleet member is created and attached to the managed cluster")
Eventually(func(g Gomega) {
resp, err := fleetsMemberClient.Get(ctx, infraControlPlane.Spec.ResourceGroupName, fleetName, input.Cluster.Name, nil)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(resp.Properties.ProvisioningState).To(Equal(ptr.To("Succeeded")))
fleetsMember := resp.FleetMember
g.Expect(fleetsMember.Properties).NotTo(BeNil())
expectedID := azure.ManagedClusterID(getSubscriptionID(Default), infraControlPlane.Spec.ResourceGroupName, input.Cluster.Name)
g.Expect(fleetsMember.Properties.ClusterResourceID).To(Equal(expectedID))
}, input.WaitIntervals...).Should(Succeed())

}
209 changes: 109 additions & 100 deletions test/e2e/azure_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -740,107 +740,116 @@ var _ = Describe("Workload cluster creation", func() {
}),
), result)

By("Upgrading the Kubernetes version of the cluster", func() {
AKSUpgradeSpec(ctx, func() AKSUpgradeSpecInput {
return AKSUpgradeSpecInput{
Cluster: result.Cluster,
MachinePools: result.MachinePools,
KubernetesVersionUpgradeTo: kubernetesVersion,
WaitForControlPlane: e2eConfig.GetIntervals(specName, "wait-machine-upgrade"),
WaitForMachinePools: e2eConfig.GetIntervals(specName, "wait-machine-pool-upgrade"),
}
})
})

By("Exercising machine pools", func() {
AKSMachinePoolSpec(ctx, func() AKSMachinePoolSpecInput {
return AKSMachinePoolSpecInput{
Cluster: result.Cluster,
MachinePools: result.MachinePools,
WaitIntervals: e2eConfig.GetIntervals(specName, "wait-machine-pool-nodes"),
}
})
})

By("creating a machine pool with public IP addresses from a prefix", func() {
// This test is also currently serving as the canonical
// "create/delete node pool" test. Eventually, that should be
// made more distinct from this public IP prefix test.
AKSPublicIPPrefixSpec(ctx, func() AKSPublicIPPrefixSpecInput {
return AKSPublicIPPrefixSpecInput{
Cluster: result.Cluster,
KubernetesVersion: kubernetesVersion,
WaitIntervals: e2eConfig.GetIntervals(specName, "wait-worker-nodes"),
}
})
})

By("creating a machine pool with spot max price and scale down mode", func() {
AKSSpotSpec(ctx, func() AKSSpotSpecInput {
return AKSSpotSpecInput{
Cluster: result.Cluster,
KubernetesVersion: kubernetesVersion,
WaitIntervals: e2eConfig.GetIntervals(specName, "wait-worker-nodes"),
}
})
})

By("modifying nodepool autoscaling configuration", func() {
AKSAutoscaleSpec(ctx, func() AKSAutoscaleSpecInput {
return AKSAutoscaleSpecInput{
// By("Upgrading the Kubernetes version of the cluster", func() {
// AKSUpgradeSpec(ctx, func() AKSUpgradeSpecInput {
// return AKSUpgradeSpecInput{
// Cluster: result.Cluster,
// MachinePools: result.MachinePools,
// KubernetesVersionUpgradeTo: kubernetesVersion,
// WaitForControlPlane: e2eConfig.GetIntervals(specName, "wait-machine-upgrade"),
// WaitForMachinePools: e2eConfig.GetIntervals(specName, "wait-machine-pool-upgrade"),
// }
// })
// })

// By("Exercising machine pools", func() {
// AKSMachinePoolSpec(ctx, func() AKSMachinePoolSpecInput {
// return AKSMachinePoolSpecInput{
// Cluster: result.Cluster,
// MachinePools: result.MachinePools,
// WaitIntervals: e2eConfig.GetIntervals(specName, "wait-machine-pool-nodes"),
// }
// })
// })

// By("creating a machine pool with public IP addresses from a prefix", func() {
// // This test is also currently serving as the canonical
// // "create/delete node pool" test. Eventually, that should be
// // made more distinct from this public IP prefix test.
// AKSPublicIPPrefixSpec(ctx, func() AKSPublicIPPrefixSpecInput {
// return AKSPublicIPPrefixSpecInput{
// Cluster: result.Cluster,
// KubernetesVersion: kubernetesVersion,
// WaitIntervals: e2eConfig.GetIntervals(specName, "wait-worker-nodes"),
// }
// })
// })

// By("creating a machine pool with spot max price and scale down mode", func() {
// AKSSpotSpec(ctx, func() AKSSpotSpecInput {
// return AKSSpotSpecInput{
// Cluster: result.Cluster,
// KubernetesVersion: kubernetesVersion,
// WaitIntervals: e2eConfig.GetIntervals(specName, "wait-worker-nodes"),
// }
// })
// })

// By("modifying nodepool autoscaling configuration", func() {
// AKSAutoscaleSpec(ctx, func() AKSAutoscaleSpecInput {
// return AKSAutoscaleSpecInput{
// Cluster: result.Cluster,
// MachinePool: result.MachinePools[0],
// WaitIntervals: e2eConfig.GetIntervals(specName, "wait-machine-pool-nodes"),
// }
// })
// })

// By("modifying additionalTags configuration", func() {
// AKSAdditionalTagsSpec(ctx, func() AKSAdditionalTagsSpecInput {
// return AKSAdditionalTagsSpecInput{
// Cluster: result.Cluster,
// MachinePools: result.MachinePools,
// WaitForUpdate: e2eConfig.GetIntervals(specName, "wait-machine-pool-nodes"),
// }
// })
// })

// By("modifying the azure cluster-autoscaler settings", func() {
// AKSAzureClusterAutoscalerSettingsSpec(ctx, func() AKSAzureClusterAutoscalerSettingsSpecInput {
// return AKSAzureClusterAutoscalerSettingsSpecInput{
// Cluster: result.Cluster,
// WaitIntervals: e2eConfig.GetIntervals(specName, "wait-control-plane"),
// }
// })
// })

// By("modifying node labels configuration", func() {
// AKSNodeLabelsSpec(ctx, func() AKSNodeLabelsSpecInput {
// return AKSNodeLabelsSpecInput{
// Cluster: result.Cluster,
// MachinePools: result.MachinePools,
// WaitForUpdate: e2eConfig.GetIntervals(specName, "wait-machine-pool-nodes"),
// }
// })
// })

// By("modifying taints configuration", func() {
// AKSNodeTaintsSpec(ctx, func() AKSNodeTaintsSpecInput {
// return AKSNodeTaintsSpecInput{
// Cluster: result.Cluster,
// MachinePools: result.MachinePools,
// WaitForUpdate: e2eConfig.GetIntervals(specName, "wait-machine-pool-nodes"),
// }
// })
// })

// By("creating a byo nodepool", func() {
// AKSBYONodeSpec(ctx, func() AKSBYONodeSpecInput {
// return AKSBYONodeSpecInput{
// Cluster: result.Cluster,
// KubernetesVersion: kubernetesVersion,
// WaitIntervals: e2eConfig.GetIntervals(specName, "wait-worker-nodes"),
// ExpectedWorkerNodes: result.ExpectedWorkerNodes(),
// }
// })
// })

By("attaching the cluster to azure fleet", func() {
AKSFleetsMemberSpec(ctx, func() AKSFleetsMemberInput {
return AKSFleetsMemberInput{
Cluster: result.Cluster,
MachinePool: result.MachinePools[0],
WaitIntervals: e2eConfig.GetIntervals(specName, "wait-machine-pool-nodes"),
}
})
})

By("modifying additionalTags configuration", func() {
AKSAdditionalTagsSpec(ctx, func() AKSAdditionalTagsSpecInput {
return AKSAdditionalTagsSpecInput{
Cluster: result.Cluster,
MachinePools: result.MachinePools,
WaitForUpdate: e2eConfig.GetIntervals(specName, "wait-machine-pool-nodes"),
}
})
})

By("modifying the azure cluster-autoscaler settings", func() {
AKSAzureClusterAutoscalerSettingsSpec(ctx, func() AKSAzureClusterAutoscalerSettingsSpecInput {
return AKSAzureClusterAutoscalerSettingsSpecInput{
Cluster: result.Cluster,
WaitIntervals: e2eConfig.GetIntervals(specName, "wait-control-plane"),
}
})
})

By("modifying node labels configuration", func() {
AKSNodeLabelsSpec(ctx, func() AKSNodeLabelsSpecInput {
return AKSNodeLabelsSpecInput{
Cluster: result.Cluster,
MachinePools: result.MachinePools,
WaitForUpdate: e2eConfig.GetIntervals(specName, "wait-machine-pool-nodes"),
}
})
})

By("modifying taints configuration", func() {
AKSNodeTaintsSpec(ctx, func() AKSNodeTaintsSpecInput {
return AKSNodeTaintsSpecInput{
Cluster: result.Cluster,
MachinePools: result.MachinePools,
WaitForUpdate: e2eConfig.GetIntervals(specName, "wait-machine-pool-nodes"),
}
})
})

By("creating a byo nodepool", func() {
AKSBYONodeSpec(ctx, func() AKSBYONodeSpecInput {
return AKSBYONodeSpecInput{
Cluster: result.Cluster,
KubernetesVersion: kubernetesVersion,
WaitIntervals: e2eConfig.GetIntervals(specName, "wait-worker-nodes"),
ExpectedWorkerNodes: result.ExpectedWorkerNodes(),
WaitIntervals: e2eConfig.GetIntervals(specName, "wait-worker-nodes"),
}
})
})
Expand Down

0 comments on commit d688d75

Please sign in to comment.