Skip to content

Commit

Permalink
fix: enable webhooks by default v37 (#1616)
Browse files Browse the repository at this point in the history
  • Loading branch information
rschalo authored Sep 5, 2024
1 parent 07ecdd4 commit 47fae88
Show file tree
Hide file tree
Showing 7 changed files with 31 additions and 177 deletions.
1 change: 1 addition & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,7 @@ verify: ## Verify code. Includes codegen, docgen, dependencies, linting, formatt
hack/validation/labels.sh
hack/validation/resources.sh
hack/validation/status.sh
hack/mutation/conversion_webhook_injection.sh
hack/dependabot.sh
@# Use perl instead of sed due to https://stackoverflow.com/questions/4247068/sed-command-with-i-option-failing-on-mac-but-works-on-linux
@# We need to do this "sed replace" until controller-tools fixes this parameterized types issue: https://github.com/kubernetes-sigs/controller-tools/issues/756
Expand Down
5 changes: 5 additions & 0 deletions hack/mutation/conversion_webhook_injection.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
#!/usr/bin/env bash

# Add the conversion stanza to the CRD spec to enable conversion via webhook
yq eval '.spec.conversion = {"strategy": "Webhook", "webhook": {"conversionReviewVersions": ["v1beta1", "v1"], "clientConfig": {"service": {"name": "karpenter", "namespace": "kube-system", "port": 8443}}}}' -i pkg/apis/crds/karpenter.sh_nodeclaims.yaml
yq eval '.spec.conversion = {"strategy": "Webhook", "webhook": {"conversionReviewVersions": ["v1beta1", "v1"], "clientConfig": {"service": {"name": "karpenter", "namespace": "kube-system", "port": 8443}}}}' -i pkg/apis/crds/karpenter.sh_nodepools.yaml
11 changes: 11 additions & 0 deletions pkg/apis/crds/karpenter.sh_nodeclaims.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -811,3 +811,14 @@ spec:
storage: true
subresources:
status: {}
conversion:
strategy: Webhook
webhook:
conversionReviewVersions:
- v1beta1
- v1
clientConfig:
service:
name: karpenter
namespace: kube-system
port: 8443
11 changes: 11 additions & 0 deletions pkg/apis/crds/karpenter.sh_nodepools.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -1002,3 +1002,14 @@ spec:
storage: true
subresources:
status: {}
conversion:
strategy: Webhook
webhook:
conversionReviewVersions:
- v1beta1
- v1
clientConfig:
service:
name: karpenter
namespace: kube-system
port: 8443
174 changes: 0 additions & 174 deletions pkg/controllers/disruption/consolidation_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -4136,180 +4136,6 @@ var _ = Describe("Consolidation", func() {
ExpectExists(ctx, env.Client, nodes[1])
})
})

Context("Timeout", func() {
It("should return the last valid command when multi-nodeclaim consolidation times out", func() {
numNodes := 20
nodeClaims, nodes := test.NodeClaimsAndNodes(numNodes, v1beta1.NodeClaim{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1beta1.NodePoolLabelKey: nodePool.Name,
v1.LabelInstanceTypeStable: leastExpensiveInstance.Name,
v1beta1.CapacityTypeLabelKey: leastExpensiveOffering.Requirements.Get(v1beta1.CapacityTypeLabelKey).Any(),
v1.LabelTopologyZone: leastExpensiveOffering.Requirements.Get(v1.LabelTopologyZone).Any(),
},
},
Status: v1beta1.NodeClaimStatus{
Allocatable: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("32"),
v1.ResourcePods: resource.MustParse("100"),
},
}},
)
// create our RS so we can link a pod to it
rs := test.ReplicaSet()
ExpectApplied(ctx, env.Client, rs)
pods := test.Pods(numNodes, test.PodOptions{
ObjectMeta: metav1.ObjectMeta{Labels: labels,
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: "apps/v1",
Kind: "ReplicaSet",
Name: rs.Name,
UID: rs.UID,
Controller: lo.ToPtr(true),
BlockOwnerDeletion: lo.ToPtr(true),
},
}},
ResourceRequirements: v1.ResourceRequirements{
Requests: v1.ResourceList{
// Make the resource requests small so that many nodes can be consolidated at once.
v1.ResourceCPU: resource.MustParse("10m"),
},
},
})

ExpectApplied(ctx, env.Client, rs, nodePool)
for _, nodeClaim := range nodeClaims {
ExpectApplied(ctx, env.Client, nodeClaim)
}
for _, node := range nodes {
ExpectApplied(ctx, env.Client, node)
}
for i, pod := range pods {
ExpectApplied(ctx, env.Client, pod)
ExpectManualBinding(ctx, env.Client, pod, nodes[i])
}

// inform cluster state about nodes and nodeClaims
ExpectMakeNodesAndNodeClaimsInitializedAndStateUpdated(ctx, env.Client, nodeStateController, nodeClaimStateController, nodes, nodeClaims)

var wg sync.WaitGroup
wg.Add(1)
finished := atomic.Bool{}
go func() {
defer GinkgoRecover()
defer wg.Done()
defer finished.Store(true)
ExpectReconcileSucceeded(ctx, disruptionController, client.ObjectKey{})
}()

// advance the clock so that the timeout expires
fakeClock.Step(disruption.MultiNodeConsolidationTimeoutDuration)

// wait for the controller to block on the validation timeout
Eventually(fakeClock.HasWaiters, time.Second*10).Should(BeTrue())

ExpectTriggerVerifyAction(&wg)

// controller should be blocking during the timeout
Expect(finished.Load()).To(BeFalse())

// and the node should not be deleted yet
for i := range nodeClaims {
ExpectExists(ctx, env.Client, nodeClaims[i])
}

// controller should finish
Eventually(finished.Load, 10*time.Second).Should(BeTrue())
wg.Wait()

ExpectReconcileSucceeded(ctx, queue, types.NamespacedName{})

// should have at least two nodes deleted from multi nodeClaim consolidation
Expect(len(ExpectNodeClaims(ctx, env.Client))).To(BeNumerically("<=", numNodes-2))
})
It("should exit single-nodeclaim consolidation if it times out", func() {
numNodes := 25
nodeClaims, nodes := test.NodeClaimsAndNodes(numNodes, v1beta1.NodeClaim{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1beta1.NodePoolLabelKey: nodePool.Name,
v1.LabelInstanceTypeStable: leastExpensiveInstance.Name,
v1beta1.CapacityTypeLabelKey: leastExpensiveOffering.Requirements.Get(v1beta1.CapacityTypeLabelKey).Any(),
v1.LabelTopologyZone: leastExpensiveOffering.Requirements.Get(v1.LabelTopologyZone).Any(),
},
},
Status: v1beta1.NodeClaimStatus{
Allocatable: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("32"),
v1.ResourcePods: resource.MustParse("100"),
},
}},
)
// create our RS so we can link a pod to it
rs := test.ReplicaSet()
ExpectApplied(ctx, env.Client, rs)
pods := test.Pods(numNodes, test.PodOptions{
ObjectMeta: metav1.ObjectMeta{Labels: labels,
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: "apps/v1",
Kind: "ReplicaSet",
Name: rs.Name,
UID: rs.UID,
Controller: lo.ToPtr(true),
BlockOwnerDeletion: lo.ToPtr(true),
},
}},
ResourceRequirements: v1.ResourceRequirements{
Requests: v1.ResourceList{
// Make the pods more than half of the allocatable so that only one nodeclaim can be done at any time
v1.ResourceCPU: resource.MustParse("20"),
},
},
})

ExpectApplied(ctx, env.Client, rs, nodePool)
for _, nodeClaim := range nodeClaims {
ExpectApplied(ctx, env.Client, nodeClaim)
}
for _, node := range nodes {
ExpectApplied(ctx, env.Client, node)
}
for i, pod := range pods {
ExpectApplied(ctx, env.Client, pod)
ExpectManualBinding(ctx, env.Client, pod, nodes[i])
}

// inform cluster state about nodes and nodeClaims
ExpectMakeNodesAndNodeClaimsInitializedAndStateUpdated(ctx, env.Client, nodeStateController, nodeClaimStateController, nodes, nodeClaims)

var wg sync.WaitGroup
wg.Add(1)
finished := atomic.Bool{}
go func() {
defer GinkgoRecover()
defer wg.Done()
defer finished.Store(true)
ExpectReconcileSucceeded(ctx, disruptionController, client.ObjectKey{})
}()

// advance the clock so that the timeout expires for multi-nodeClaim
fakeClock.Step(disruption.MultiNodeConsolidationTimeoutDuration)
// advance the clock so that the timeout expires for single-nodeClaim
fakeClock.Step(disruption.SingleNodeConsolidationTimeoutDuration)

ExpectTriggerVerifyAction(&wg)

// controller should finish
Eventually(finished.Load, 10*time.Second).Should(BeTrue())
wg.Wait()

// should have no nodeClaims deleted from single nodeClaim consolidation
Expect(ExpectNodeClaims(ctx, env.Client)).To(HaveLen(numNodes))
})
})
Context("Multi-NodeClaim", func() {
var nodeClaims, spotNodeClaims []*v1beta1.NodeClaim
var nodes, spotNodes []*v1.Node
Expand Down
2 changes: 1 addition & 1 deletion pkg/operator/options/options.go
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ func (fs *FlagSet) BoolVarWithEnv(p *bool, name string, envVar string, val bool,

func (o *Options) AddFlags(fs *FlagSet) {
fs.StringVar(&o.ServiceName, "karpenter-service", env.WithDefaultString("KARPENTER_SERVICE", ""), "The Karpenter Service name for the dynamic webhook certificate")
fs.BoolVarWithEnv(&o.DisableWebhook, "disable-webhook", "DISABLE_WEBHOOK", true, "Disable the admission and validation webhooks")
fs.BoolVarWithEnv(&o.DisableWebhook, "disable-webhook", "DISABLE_WEBHOOK", false, "Disable the admission and validation webhooks")
fs.IntVar(&o.WebhookPort, "webhook-port", env.WithDefaultInt("WEBHOOK_PORT", 8443), "The port the webhook endpoint binds to for validation and mutation of resources")
fs.IntVar(&o.MetricsPort, "metrics-port", env.WithDefaultInt("METRICS_PORT", 8000), "The port the metric endpoint binds to for operating metrics about the controller itself")
fs.IntVar(&o.WebhookMetricsPort, "webhook-metrics-port", env.WithDefaultInt("WEBHOOK_METRICS_PORT", 8001), "The port the webhook metric endpoing binds to for operating metrics about the webhook")
Expand Down
4 changes: 2 additions & 2 deletions pkg/operator/options/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ var _ = Describe("Options", func() {
Expect(err).To(BeNil())
expectOptionsMatch(opts, test.Options(test.OptionsFields{
ServiceName: lo.ToPtr(""),
DisableWebhook: lo.ToPtr(true),
DisableWebhook: lo.ToPtr(false),
WebhookPort: lo.ToPtr(8443),
MetricsPort: lo.ToPtr(8000),
WebhookMetricsPort: lo.ToPtr(8001),
Expand Down Expand Up @@ -255,7 +255,7 @@ var _ = Describe("Options", func() {
Entry("explicit true", "--disable-webhook=true", true),
Entry("explicit false", "--disable-webhook=false", false),
Entry("implicit true", "--disable-webhook", true),
Entry("implicit true", "", true),
Entry("implicit false", "", false),
)
})

Expand Down

0 comments on commit 47fae88

Please sign in to comment.