Skip to content

Commit

Permalink
complete
Browse files Browse the repository at this point in the history
  • Loading branch information
jmdeal committed Dec 10, 2024
1 parent 6df1a3c commit 0fdc2a2
Show file tree
Hide file tree
Showing 26 changed files with 1,942 additions and 1,398 deletions.
8 changes: 6 additions & 2 deletions pkg/controllers/controllers.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,8 +38,10 @@ import (
metricspod "sigs.k8s.io/karpenter/pkg/controllers/metrics/pod"
"sigs.k8s.io/karpenter/pkg/controllers/node/health"
nodehydration "sigs.k8s.io/karpenter/pkg/controllers/node/hydration"
"sigs.k8s.io/karpenter/pkg/controllers/node/termination"
"sigs.k8s.io/karpenter/pkg/controllers/node/termination/drain"
"sigs.k8s.io/karpenter/pkg/controllers/node/termination/eviction"
"sigs.k8s.io/karpenter/pkg/controllers/node/termination/instancetermination"
"sigs.k8s.io/karpenter/pkg/controllers/node/termination/volumedetachment"
nodeclaimconsistency "sigs.k8s.io/karpenter/pkg/controllers/nodeclaim/consistency"
nodeclaimdisruption "sigs.k8s.io/karpenter/pkg/controllers/nodeclaim/disruption"
"sigs.k8s.io/karpenter/pkg/controllers/nodeclaim/expiration"
Expand Down Expand Up @@ -83,7 +85,9 @@ func NewControllers(
informer.NewPodController(kubeClient, cluster),
informer.NewNodePoolController(kubeClient, cloudProvider, cluster),
informer.NewNodeClaimController(kubeClient, cloudProvider, cluster),
termination.NewController(clock, kubeClient, cloudProvider, recorder, evictionQueue),
drain.NewController(clock, kubeClient, cloudProvider, recorder, evictionQueue),
volumedetachment.NewController(clock, kubeClient, cloudProvider, recorder),
instancetermination.NewController(clock, kubeClient, cloudProvider),
metricspod.NewController(kubeClient, cluster),
metricsnodepool.NewController(kubeClient, cloudProvider),
metricsnode.NewController(cluster),
Expand Down
2 changes: 1 addition & 1 deletion pkg/controllers/node/hydration/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ func (c *Controller) Register(_ context.Context, m manager.Manager) error {
return controllerruntime.NewControllerManagedBy(m).
Named(c.Name()).
For(&corev1.Node{}).
Watches(&v1.NodeClaim{}, nodeutils.NodeClaimEventHandler(c.kubeClient)).
Watches(&v1.NodeClaim{}, nodeutils.NodeClaimEventHandler(c.kubeClient, c.cloudProvider)).
WithOptions(controller.Options{
RateLimiter: reasonable.RateLimiter(),
MaxConcurrentReconciles: 1000,
Expand Down
86 changes: 43 additions & 43 deletions pkg/controllers/node/hydration/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,10 @@ import (
. "github.com/onsi/gomega"
"github.com/samber/lo"

corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"

"sigs.k8s.io/karpenter/pkg/apis"
v1 "sigs.k8s.io/karpenter/pkg/apis/v1"
"sigs.k8s.io/karpenter/pkg/cloudprovider/fake"
Expand Down Expand Up @@ -67,66 +69,64 @@ var _ = AfterEach(func() {
})

var _ = Describe("Hydration", func() {
var nodeClaim *v1.NodeClaim
var node *corev1.Node

BeforeEach(func() {
nodeClaim, node = test.NodeClaimAndNode(v1.NodeClaim{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{v1.HydrationAnnotationKey: "not-hydrated"},
},
})
})

It("should hydrate the NodeClass label", func() {
nc, n := test.NodeClaimAndNode()
delete(n.Labels, v1.NodeClassLabelKey(nc.Spec.NodeClassRef.GroupKind()))
ExpectApplied(ctx, env.Client, nc, n)
ExpectObjectReconciled(ctx, env.Client, hydrationController, n)
n = ExpectExists(ctx, env.Client, n)
value := n.Labels[v1.NodeClassLabelKey(nc.Spec.NodeClassRef.GroupKind())]
Expect(value).To(Equal(nc.Spec.NodeClassRef.Name))
delete(node.Labels, v1.NodeClassLabelKey(nodeClaim.Spec.NodeClassRef.GroupKind()))
ExpectApplied(ctx, env.Client, nodeClaim, node)
ExpectObjectReconciled(ctx, env.Client, hydrationController, node)
node = ExpectExists(ctx, env.Client, node)
value := node.Labels[v1.NodeClassLabelKey(nodeClaim.Spec.NodeClassRef.GroupKind())]
Expect(value).To(Equal(nodeClaim.Spec.NodeClassRef.Name))
})
DescribeTable(
"Finalizers",
func(nodeClaimConditions []string, expectedFinailzers []string) {
nc, n := test.NodeClaimAndNode()
for _, cond := range nodeClaimConditions {
nc.StatusConditions().SetTrue(cond)
nodeClaim.StatusConditions().SetTrue(cond)
}
ExpectApplied(ctx, env.Client, nc, n)
ExpectObjectReconciled(ctx, env.Client, hydrationController, n)
n = ExpectExists(ctx, env.Client, n)
Expect(len(n.Finalizers)).To(Equal(len(expectedFinailzers)))
ExpectApplied(ctx, env.Client, nodeClaim, node)
ExpectObjectReconciled(ctx, env.Client, hydrationController, node)
node = ExpectExists(ctx, env.Client, node)
Expect(len(node.Finalizers)).To(Equal(len(expectedFinailzers)))
for _, finalizer := range expectedFinailzers {
Expect(controllerutil.ContainsFinalizer(n, finalizer))
Expect(controllerutil.ContainsFinalizer(node, finalizer))
}
},
Entry("should hydrate all finalizers when none of the requisite status conditions are true", nil, []string{v1.DrainFinalizer, v1.VolumeFinalizer}),
Entry("should hydrate the volume finalizer when only the drain status condition is true", []string{v1.ConditionTypeDrained}, []string{v1.VolumeFinalizer}),
Entry("should hydrate the drain finalizer when only the volume status condition is true", []string{v1.ConditionTypeVolumesDetached}, []string{v1.VolumeFinalizer}),
Entry("shouldn't hydrate finalizers when all requisite conditions are true", []string{v1.ConditionTypeDrained, v1.ConditionTypeVolumesDetached}, nil),
)

It("shouldn't hydrate nodes which have already been hydrated", func() {
nc, n := test.NodeClaimAndNode(v1.NodeClaim{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
v1.HydrationAnnotationKey: operator.Version,
},
},
})
delete(n.Labels, v1.NodeClassLabelKey(nc.Spec.NodeClassRef.GroupKind()))
ExpectApplied(ctx, env.Client, nc, n)
ExpectObjectReconciled(ctx, env.Client, hydrationController, n)
n = ExpectExists(ctx, env.Client, n)
Expect(lo.Keys(n.Labels)).ToNot(ContainElement(v1.NodeClassLabelKey(nc.Spec.NodeClassRef.GroupKind())))
Expect(len(n.Finalizers)).To(Equal(0))
node.Annotations[v1.HydrationAnnotationKey] = operator.Version
delete(node.Labels, v1.NodeClassLabelKey(nodeClaim.Spec.NodeClassRef.GroupKind()))
ExpectApplied(ctx, env.Client, nodeClaim, node)
ExpectObjectReconciled(ctx, env.Client, hydrationController, node)
node = ExpectExists(ctx, env.Client, node)
Expect(lo.Keys(node.Labels)).ToNot(ContainElement(v1.NodeClassLabelKey(nodeClaim.Spec.NodeClassRef.GroupKind())))
Expect(len(node.Finalizers)).To(Equal(0))
})
It("shouldn't hydrate nodes which are not managed by this instance of Karpenter", func() {
nc, n := test.NodeClaimAndNode(v1.NodeClaim{
Spec: v1.NodeClaimSpec{
NodeClassRef: &v1.NodeClassReference{
Group: "karpenter.test.sh",
Kind: "UnmanagedNodeClass",
Name: "default",
},
},
})
delete(n.Labels, v1.NodeClassLabelKey(nc.Spec.NodeClassRef.GroupKind()))
ExpectApplied(ctx, env.Client, nc, n)
ExpectObjectReconciled(ctx, env.Client, hydrationController, n)
n = ExpectExists(ctx, env.Client, n)
Expect(lo.Keys(n.Labels)).ToNot(ContainElement(v1.NodeClassLabelKey(nc.Spec.NodeClassRef.GroupKind())))
Expect(len(n.Finalizers)).To(Equal(0))
nodeClaim.Spec.NodeClassRef = &v1.NodeClassReference{
Group: "karpenter.test.sh",
Kind: "UnmanagedNodeClass",
Name: "default",
}
delete(node.Labels, v1.NodeClassLabelKey(nodeClaim.Spec.NodeClassRef.GroupKind()))
ExpectApplied(ctx, env.Client, nodeClaim, node)
ExpectObjectReconciled(ctx, env.Client, hydrationController, node)
node = ExpectExists(ctx, env.Client, node)
Expect(lo.Keys(node.Labels)).ToNot(ContainElement(v1.NodeClassLabelKey(nodeClaim.Spec.NodeClassRef.GroupKind())))
Expect(len(node.Finalizers)).To(Equal(0))
})
})
202 changes: 0 additions & 202 deletions pkg/controllers/node/termination/controller.go

This file was deleted.

Loading

0 comments on commit 0fdc2a2

Please sign in to comment.