Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add in-place upgrade e2e tests #53

Merged
merged 4 commits into from
Sep 12, 2024
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 22 additions & 0 deletions bootstrap/config/rbac/role.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -47,11 +47,33 @@ rules:
resources:
- clusters
- clusters/status
verbs:
- get
- list
- watch
- apiGroups:
- cluster.x-k8s.io
resources:
- clusters
- clusters/status
- machines
- machines/status
verbs:
- get
- list
- watch
- apiGroups:
- cluster.x-k8s.io
resources:
- machines
- machines/status
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- exp.cluster.x-k8s.io
Expand Down
1 change: 1 addition & 0 deletions bootstrap/controllers/upgrade_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,7 @@ type UpgradeScope struct {
// +kubebuilder:rbac:groups=bootstrap.cluster.x-k8s.io,resources=ck8sconfigs,verbs=get;list;watch
// +kubebuilder:rbac:groups=bootstrap.cluster.x-k8s.io,resources=ck8sconfigs/status,verbs=get;list;watch
// +kubebuilder:rbac:groups="",resources=secrets;events;configmaps,verbs=get;list;watch

bschimke95 marked this conversation as resolved.
Show resolved Hide resolved
func (r *InPlaceUpgradeReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
log := r.Log.WithValues("namespace", req.Namespace, "machine", req.Name)

Expand Down
2 changes: 1 addition & 1 deletion hack/build-e2e-images.sh
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,6 @@
DIR="$(realpath "$(dirname "${0}")")"

cd "${DIR}/../templates/docker"
sudo docker build . -t k8s-snap:dev-old --build-arg BRANCH=main --build-arg KUBERNETES_VERSION=v1.29.6
sudo docker build . -t k8s-snap:dev-old --build-arg BRANCH=main --build-arg KUBERNETES_VERSION=v1.29.6 --build-arg KUBERNETES_VERSION_UPGRADE_TO=v1.30.4
sudo docker build . -t k8s-snap:dev-new --build-arg BRANCH=main --build-arg KUBERNETES_VERSION=v1.30.4
cd -
11 changes: 11 additions & 0 deletions templates/docker/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ ARG REPO=https://github.com/canonical/k8s-snap
ARG BRANCH=main

ARG KUBERNETES_VERSION=""
ARG KUBERNETES_VERSION_UPGRADE_TO=""
berkayoz marked this conversation as resolved.
Show resolved Hide resolved

## NOTE(neoaggelos): install dependencies needed to build the tools
## !!!IMPORTANT!!! Keep up to date with "snapcraft.yaml:parts.build-deps.build-packages"
Expand Down Expand Up @@ -95,6 +96,15 @@ RUN if [ -n "$KUBERNETES_VERSION" ]; then \
fi
RUN /src/k8s-snap/build-scripts/build-component.sh kubernetes

## kubernetes upgrade version build
FROM builder AS build-kubernetes-upgrade-to
ENV KUBERNETES_VERSION_UPGRADE_TO=${KUBERNETES_VERSION_UPGRADE_TO}
RUN if [ -n "$KUBERNETES_VERSION_UPGRADE_TO" ]; then \
echo "Overwriting Kubernetes version with $KUBERNETES_VERSION_UPGRADE_TO"; \
echo "$KUBERNETES_VERSION_UPGRADE_TO" > /src/k8s-snap/build-scripts/components/kubernetes/version; \
fi
RUN /src/k8s-snap/build-scripts/build-component.sh kubernetes

## runc build
FROM builder AS build-runc
RUN /src/k8s-snap/build-scripts/build-component.sh runc
Expand Down Expand Up @@ -141,6 +151,7 @@ COPY --from=build-helm /out /snap/k8s/current
COPY --from=build-containerd /out /snap/k8s/current
COPY --from=build-cni /out /snap/k8s/current
COPY --from=build-kubernetes /out /snap/k8s/current
COPY --from=build-kubernetes-upgrade-to /out /k8s/upgrade
COPY --from=build-k8sd /out /snap/k8s/current
COPY --from=build-pebble /out /snap/k8s/current
COPY --from=build-preload-images /out/images /var/snap/k8s/common/images
Expand Down
118 changes: 118 additions & 0 deletions test/e2e/helpers.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ import (
"sigs.k8s.io/cluster-api/util/patch"
"sigs.k8s.io/controller-runtime/pkg/client"

bootstrapv1 "github.com/canonical/cluster-api-k8s/bootstrap/api/v1beta2"
controlplanev1 "github.com/canonical/cluster-api-k8s/controlplane/api/v1beta2"
)

Expand Down Expand Up @@ -552,6 +553,123 @@ func WaitForControlPlaneAndMachinesReady(ctx context.Context, input WaitForContr
})
}

type ApplyInPlaceUpgradeAndWaitInput struct {
Getter framework.Getter
Machine *clusterv1.Machine
ClusterProxy framework.ClusterProxy
WaitForUpgradeIntervals []interface{}
}

func ApplyInPlaceUpgradeAndWait(ctx context.Context, input ApplyInPlaceUpgradeAndWaitInput) {
mgmtClient := input.ClusterProxy.GetClient()

patchHelper, err := patch.NewHelper(input.Machine, mgmtClient)
Expect(err).ToNot(HaveOccurred())
mAnnotations := input.Machine.GetAnnotations()

if mAnnotations == nil {
mAnnotations = map[string]string{}
}

mAnnotations[bootstrapv1.InPlaceUpgradeToAnnotation] = "localPath=/k8s/upgrade/bin/kubernetes"
input.Machine.SetAnnotations(mAnnotations)
err = patchHelper.Patch(ctx, input.Machine)
Expect(err).ToNot(HaveOccurred())

By("Checking for in-place upgrade status to be equal to done")

Eventually(func() (bool, error) {
um := &clusterv1.Machine{}
if err := input.Getter.Get(ctx, client.ObjectKey{Namespace: input.Machine.Namespace, Name: input.Machine.Name}, um); err != nil {
Byf("Failed to get the machine: %+v", err)
return false, err
}

mAnnotations := um.GetAnnotations()

status, ok := mAnnotations[bootstrapv1.InPlaceUpgradeStatusAnnotation]
if !ok {
return false, nil
}

return status == bootstrapv1.InPlaceUpgradeDoneStatus, nil
}, input.WaitForUpgradeIntervals...).Should(BeTrue(), "In-place upgrade failed for %s", input.Machine.Name)
}

type ApplyInPlaceUpgradeForControlPlaneInput struct {
Lister framework.Lister
Getter framework.Getter
ClusterProxy framework.ClusterProxy
Cluster *clusterv1.Cluster
WaitForUpgradeIntervals []interface{}
}

func ApplyInPlaceUpgradeForControlPlane(ctx context.Context, input ApplyInPlaceUpgradeForControlPlaneInput) {
Expect(ctx).NotTo(BeNil(), "ctx is required for ApplyInPlaceUpgrade")
Expect(input.ClusterProxy).ToNot(BeNil(), "Invalid argument. input.ClusterProxy can't be nil when calling ApplyInPlaceUpgradeForControlPlane")
Expect(input.Cluster).ToNot(BeNil(), "Invalid argument. input.Cluster can't be nil when calling ApplyInPlaceUpgradeForControlPlane")
berkayoz marked this conversation as resolved.
Show resolved Hide resolved

// Look up all the control plane machines.
inClustersNamespaceListOption := client.InNamespace(input.Cluster.Namespace)
matchClusterListOption := client.MatchingLabels{
clusterv1.ClusterNameLabel: input.Cluster.Name,
clusterv1.MachineControlPlaneLabel: "",
}

machineList := &clusterv1.MachineList{}
Eventually(func() error {
return input.Lister.List(ctx, machineList, inClustersNamespaceListOption, matchClusterListOption)
}, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Couldn't list control-plane machines for the cluster %q", input.Cluster.Name)

for _, machine := range machineList.Items {
ApplyInPlaceUpgradeAndWait(ctx, ApplyInPlaceUpgradeAndWaitInput{
Getter: input.Getter,
Machine: &machine,
ClusterProxy: input.ClusterProxy,
WaitForUpgradeIntervals: input.WaitForUpgradeIntervals,
})
}
}

type ApplyInPlaceUpgradeForWorkerInput struct {
Lister framework.Lister
Getter framework.Getter
ClusterProxy framework.ClusterProxy
Cluster *clusterv1.Cluster
MachineDeployments []*clusterv1.MachineDeployment
WaitForUpgradeIntervals []interface{}
}

func ApplyInPlaceUpgradeForWorker(ctx context.Context, input ApplyInPlaceUpgradeForWorkerInput) {
Expect(ctx).NotTo(BeNil(), "ctx is required for ApplyInPlaceUpgrade")
Expect(input.ClusterProxy).ToNot(BeNil(), "Invalid argument. input.ClusterProxy can't be nil when calling ApplyInPlaceUpgradeForWorker")
Expect(input.Cluster).ToNot(BeNil(), "Invalid argument. input.Cluster can't be nil when calling ApplyInPlaceUpgradeForWorker")
Expect(input.MachineDeployments).ToNot(BeNil(), "Invalid argument. input.MachineDeployments can't be nil when calling ApplyInPlaceUpgradeForWorker")
berkayoz marked this conversation as resolved.
Show resolved Hide resolved

for _, md := range input.MachineDeployments {
// Look up all the control plane machines.
inClustersNamespaceListOption := client.InNamespace(input.Cluster.Namespace)
matchClusterListOption := client.MatchingLabels{
clusterv1.ClusterNameLabel: input.Cluster.Name,
clusterv1.MachineDeploymentNameLabel: md.Name,
}

machineList := &clusterv1.MachineList{}
Eventually(func() error {
return input.Lister.List(ctx, machineList, inClustersNamespaceListOption, matchClusterListOption)
}, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Couldn't list control-plane machines for the cluster %q", input.Cluster.Name)

for _, machine := range machineList.Items {
ApplyInPlaceUpgradeAndWait(ctx, ApplyInPlaceUpgradeAndWaitInput{
Getter: input.Getter,
Machine: &machine,
ClusterProxy: input.ClusterProxy,
WaitForUpgradeIntervals: input.WaitForUpgradeIntervals,
})
}
}
}

// UpgradeControlPlaneAndWaitForUpgradeInput is the input type for UpgradeControlPlaneAndWaitForUpgrade.
type UpgradeControlPlaneAndWaitForUpgradeInput struct {
ClusterProxy framework.ClusterProxy
Expand Down
120 changes: 120 additions & 0 deletions test/e2e/in_place_upgrade_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,120 @@
//go:build e2e
// +build e2e

/*
Copyright 2021 The Kubernetes Authors.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package e2e

import (
"context"
"fmt"
"path/filepath"

. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
"k8s.io/utils/ptr"
"sigs.k8s.io/cluster-api/test/framework/clusterctl"
"sigs.k8s.io/cluster-api/util"
)

var _ = Describe("In place upgrade", func() {
var (
ctx = context.TODO()
specName = "workload-cluster-inplace"
namespace *corev1.Namespace
cancelWatches context.CancelFunc
result *ApplyClusterTemplateAndWaitResult
clusterName string
clusterctlLogFolder string
infrastructureProvider string
)

BeforeEach(func() {
Expect(e2eConfig.Variables).To(HaveKey(KubernetesVersion))

clusterName = fmt.Sprintf("capick8s-in-place-%s", util.RandomString(6))
infrastructureProvider = "docker"
berkayoz marked this conversation as resolved.
Show resolved Hide resolved

// Setup a Namespace where to host objects for this spec and create a watcher for the namespace events.
namespace, cancelWatches = setupSpecNamespace(ctx, specName, bootstrapClusterProxy, artifactFolder)

result = new(ApplyClusterTemplateAndWaitResult)

clusterctlLogFolder = filepath.Join(artifactFolder, "clusters", bootstrapClusterProxy.GetName())
})

AfterEach(func() {
cleanInput := cleanupInput{
SpecName: specName,
Cluster: result.Cluster,
ClusterProxy: bootstrapClusterProxy,
Namespace: namespace,
CancelWatches: cancelWatches,
IntervalsGetter: e2eConfig.GetIntervals,
SkipCleanup: skipCleanup,
ArtifactFolder: artifactFolder,
}

dumpSpecResourcesAndCleanup(ctx, cleanInput)
})

Context("Performing in-place upgrades", func() {
It("Creating a workload cluster and applying in-place upgrade to control-plane and worker machines [PR-Blocking]", func() {
By("Creating a workload cluster of 1 control plane and 1 worker node")
berkayoz marked this conversation as resolved.
Show resolved Hide resolved
ApplyClusterTemplateAndWait(ctx, ApplyClusterTemplateAndWaitInput{
ClusterProxy: bootstrapClusterProxy,
ConfigCluster: clusterctl.ConfigClusterInput{
LogFolder: clusterctlLogFolder,
ClusterctlConfigPath: clusterctlConfigPath,
KubeconfigPath: bootstrapClusterProxy.GetKubeconfigPath(),
InfrastructureProvider: infrastructureProvider,
Namespace: namespace.Name,
ClusterName: clusterName,
KubernetesVersion: e2eConfig.GetVariable(KubernetesVersion),
ControlPlaneMachineCount: ptr.To(int64(1)),
WorkerMachineCount: ptr.To(int64(1)),
},
WaitForClusterIntervals: e2eConfig.GetIntervals(specName, "wait-cluster"),
WaitForControlPlaneIntervals: e2eConfig.GetIntervals(specName, "wait-control-plane"),
WaitForMachineDeployments: e2eConfig.GetIntervals(specName, "wait-worker-nodes"),
}, result)

bootstrapProxyClient := bootstrapClusterProxy.GetClient()

By("Applying in place upgrade with local path for control plane nodes")
ApplyInPlaceUpgradeForControlPlane(ctx, ApplyInPlaceUpgradeForControlPlaneInput{
Lister: bootstrapProxyClient,
Getter: bootstrapProxyClient,
ClusterProxy: bootstrapClusterProxy,
Cluster: result.Cluster,
WaitForUpgradeIntervals: e2eConfig.GetIntervals(specName, "wait-machine-upgrade"),
})

By("Applying in place upgrade with local path for worker nodes")
ApplyInPlaceUpgradeForWorker(ctx, ApplyInPlaceUpgradeForWorkerInput{
Lister: bootstrapProxyClient,
Getter: bootstrapProxyClient,
ClusterProxy: bootstrapClusterProxy,
Cluster: result.Cluster,
WaitForUpgradeIntervals: e2eConfig.GetIntervals(specName, "wait-machine-upgrade"),
MachineDeployments: result.MachineDeployments,
})
})
})

})
Loading