From 0e327a0ef6adc2aee65dd8d934a3b5a68ca30f5c Mon Sep 17 00:00:00 2001 From: Arvind Thirumurugan Date: Sun, 19 Jan 2025 18:21:08 +0530 Subject: [PATCH] add section for PlacementDisruptionBudget --- .../eviction-placement-disruption-budget.md | 167 +++++++++++++++++- examples/eviction/clusterpdb.yaml | 4 +- 2 files changed, 168 insertions(+), 3 deletions(-) diff --git a/docs/howtos/eviction-placement-disruption-budget.md b/docs/howtos/eviction-placement-disruption-budget.md index 650c58282..6d43c4b81 100644 --- a/docs/howtos/eviction-placement-disruption-budget.md +++ b/docs/howtos/eviction-placement-disruption-budget.md @@ -22,6 +22,7 @@ kubectl create ns test-ns Then we will apply a `ClusterResourcePlacement` with the following spec: ```yaml +spec: resourceSelectors: - group: "" kind: Namespace @@ -188,4 +189,168 @@ status: version: v1 ``` -The status shows that the resources have been removed from the cluster and the only reason the scheduler doesn't re-pick the cluster is because of the taint we added. \ No newline at end of file +The status shows that the resources have been removed from the cluster and the only reason the scheduler doesn't re-pick the cluster is because of the taint we added. + +## Protecting resources from voluntary disruptions using ClusterResourcePlacementDisruptionBudget + +In this example, we will create a ClusterResourcePlacement object with PickN placement policy to propagate resources to an existing MemberCluster, +then create a ClusterResourcePlacementDisruptionBudget object to protect resources on the MemberCluster from voluntary disruption and +then try to evict resources from the MemberCluster. + +We will first create a namespace that we will propagate to the member cluster + +``` +kubectl create ns test-ns +``` + +Then we will apply a `ClusterResourcePlacement` with the following spec: + +```yaml +spec: + resourceSelectors: + - group: "" + kind: Namespace + version: v1 + name: test-ns + policy: + placementType: PickN + numberOfClusters: 1 +``` + +The CRP status after applying should look something like this: + +```yaml +status: + conditions: + - lastTransitionTime: "2025-01-19T12:36:54Z" + message: found all cluster needed as specified by the scheduling policy, found + 1 cluster(s) + observedGeneration: 2 + reason: SchedulingPolicyFulfilled + status: "True" + type: ClusterResourcePlacementScheduled + - lastTransitionTime: "2025-01-19T12:36:54Z" + message: All 1 cluster(s) start rolling out the latest resource + observedGeneration: 2 + reason: RolloutStarted + status: "True" + type: ClusterResourcePlacementRolloutStarted + - lastTransitionTime: "2025-01-19T12:36:54Z" + message: No override rules are configured for the selected resources + observedGeneration: 2 + reason: NoOverrideSpecified + status: "True" + type: ClusterResourcePlacementOverridden + - lastTransitionTime: "2025-01-19T12:36:54Z" + message: Works(s) are succcesfully created or updated in 1 target cluster(s)' + namespaces + observedGeneration: 2 + reason: WorkSynchronized + status: "True" + type: ClusterResourcePlacementWorkSynchronized + - lastTransitionTime: "2025-01-19T12:36:54Z" + message: The selected resources are successfully applied to 1 cluster(s) + observedGeneration: 2 + reason: ApplySucceeded + status: "True" + type: ClusterResourcePlacementApplied + - lastTransitionTime: "2025-01-19T12:36:54Z" + message: The selected resources in 1 cluster(s) are available now + observedGeneration: 2 + reason: ResourceAvailable + status: "True" + type: ClusterResourcePlacementAvailable + observedResourceIndex: "0" + placementStatuses: + - clusterName: kind-cluster-1 + conditions: + - lastTransitionTime: "2025-01-19T12:36:54Z" + message: 'Successfully scheduled resources for placement in "kind-cluster-1" + (affinity score: 0, topology spread score: 0): picked by scheduling policy' + observedGeneration: 2 + reason: Scheduled + status: "True" + type: Scheduled + - lastTransitionTime: "2025-01-19T12:36:54Z" + message: Detected the new changes on the resources and started the rollout process + observedGeneration: 2 + reason: RolloutStarted + status: "True" + type: RolloutStarted + - lastTransitionTime: "2025-01-19T12:36:54Z" + message: No override rules are configured for the selected resources + observedGeneration: 2 + reason: NoOverrideSpecified + status: "True" + type: Overridden + - lastTransitionTime: "2025-01-19T12:36:54Z" + message: All of the works are synchronized to the latest + observedGeneration: 2 + reason: AllWorkSynced + status: "True" + type: WorkSynchronized + - lastTransitionTime: "2025-01-19T12:36:54Z" + message: All corresponding work objects are applied + observedGeneration: 2 + reason: AllWorkHaveBeenApplied + status: "True" + type: Applied + - lastTransitionTime: "2025-01-19T12:36:54Z" + message: All corresponding work objects are available + observedGeneration: 2 + reason: AllWorkAreAvailable + status: "True" + type: Available + selectedResources: + - kind: Namespace + name: test-ns + version: v1 +``` +> **Note:** The `ClusterResourcePlacementDisruptionBudget` object is only used as an information source by the eviction controller and hence it doesn't have a status associated to it at the moment. +> +Now we will create a `ClusterResourcePlacementDisruptionBudget` object to protect resources on the member cluster from voluntary disruption: + +```yaml +apiVersion: placement.kubernetes-fleet.io/v1beta1 +kind: ClusterResourcePlacementDisruptionBudget +metadata: + name: test-crp +spec: + minAvailable: 1 +``` + +> **Note:** An eviction object is only reconciled once after which it reaches a terminal state, if the user desires to use the same eviction object they need to delete the existing eviction object and re-create the object for the eviction to occur again. + +Now we will create a `ClusterResourcePlacementEviction` object to evict resources from the member cluster: + +```yaml +apiVersion: placement.kubernetes-fleet.io/v1beta1 +kind: ClusterResourcePlacementEviction +metadata: + name: test-eviction +spec: + placementName: test-crp + clusterName: kind-cluster-1 +``` + +let's take a look at the status to see if the eviction was executed, + +```yaml +status: + conditions: + - lastTransitionTime: "2025-01-19T12:48:42Z" + message: Eviction is valid + observedGeneration: 1 + reason: ClusterResourcePlacementEvictionValid + status: "True" + type: Valid + - lastTransitionTime: "2025-01-19T12:48:42Z" + message: 'Eviction is blocked by specified ClusterResourcePlacementDisruptionBudget, + availablePlacements: 1, totalPlacements: 1' + observedGeneration: 1 + reason: ClusterResourcePlacementEvictionNotExecuted + status: "False" + type: Executed +``` + +from the eviction status we can clearly see the eviction was blocked by the `ClusterResourcePlacementDisruptionBudget` object which protected resources from being evicted. \ No newline at end of file diff --git a/examples/eviction/clusterpdb.yaml b/examples/eviction/clusterpdb.yaml index df2a8ae9d..22bb58bb2 100644 --- a/examples/eviction/clusterpdb.yaml +++ b/examples/eviction/clusterpdb.yaml @@ -1,6 +1,6 @@ -apiVersion: placement.kubernetes-fleet.io/v1alpha1 +apiVersion: placement.kubernetes-fleet.io/v1beta1 kind: ClusterResourcePlacementDisruptionBudget metadata: name: test-crp spec: - maxUnavailable: 1 \ No newline at end of file + minAvailable: 1 \ No newline at end of file