From 914163cb33f7f04550e7434a19b039f5b893af4a Mon Sep 17 00:00:00 2001 From: Youssef Rabie Date: Mon, 10 Feb 2025 13:47:16 +0200 Subject: [PATCH] fix: fix waiting to reach failureThreshold before fallback (#6520) Signed-off-by: Youssef Rabie Signed-off-by: Jorge Turrado Ferrero Co-authored-by: Jorge Turrado Ferrero --- CHANGELOG.md | 3 +- pkg/scaling/executor/scale_executor.go | 7 --- pkg/scaling/executor/scale_scaledobjects.go | 24 ++----- .../executor/scale_scaledobjects_test.go | 62 ------------------- 4 files changed, 8 insertions(+), 88 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 955b2747d76..c3a28343d2a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -56,8 +56,9 @@ To learn more about active deprecations, we recommend checking [GitHub Discussio - [v1.0.0](#v100) ## Unreleased -- **General**: Fix the check whether Fallback is enabled when using ScalingModifiers ([#6521](https://github.com/kedacore/keda/pull/6521)) +- **General**: Fix the check whether Fallback is enabled when using ScalingModifiers ([#6521](https://github.com/kedacore/keda/pull/6521)) +- **General**: Fix waiting to reach `failureThreshold` before fallback ([#6520](https://github.com/kedacore/keda/pull/6520)) - **General**: Introduce new Temporal scaler ([#4724](https://github.com/kedacore/keda/issues/4724)) ### New diff --git a/pkg/scaling/executor/scale_executor.go b/pkg/scaling/executor/scale_executor.go index 05832f82a10..90e66eb6e10 100644 --- a/pkg/scaling/executor/scale_executor.go +++ b/pkg/scaling/executor/scale_executor.go @@ -125,10 +125,3 @@ func (e *scaleExecutor) setActiveCondition(ctx context.Context, logger logr.Logg } return e.setCondition(ctx, logger, object, status, reason, message, active) } - -func (e *scaleExecutor) setFallbackCondition(ctx context.Context, logger logr.Logger, object interface{}, status metav1.ConditionStatus, reason string, message string) error { - fallback := func(conditions kedav1alpha1.Conditions, status metav1.ConditionStatus, reason string, message string) { - conditions.SetFallbackCondition(status, reason, message) - } - return e.setCondition(ctx, logger, object, status, reason, message, fallback) -} diff --git a/pkg/scaling/executor/scale_scaledobjects.go b/pkg/scaling/executor/scale_scaledobjects.go index ee1284fb8e5..ea0b73e2eb4 100644 --- a/pkg/scaling/executor/scale_scaledobjects.go +++ b/pkg/scaling/executor/scale_scaledobjects.go @@ -161,12 +161,12 @@ func (e *scaleExecutor) RequestScale(ctx context.Context, scaledObject *kedav1al // isActive == false switch { case isError && scaledObject.Spec.Fallback != nil && scaledObject.Spec.Fallback.Replicas != 0: - // there are no active triggers, but a scaler responded with an error - // AND - // there is a fallback replicas count defined - - // Scale to the fallback replicas count - e.doFallbackScaling(ctx, scaledObject, currentScale, logger, currentReplicas) + // We need to have this switch case even if just for logging. + // Otherwise, if we have `minReplicas=zero`, we will fall into the third case expression, + // which will scale the target to 0. Scaling the target to 0 means the HPA will not scale it to fallback.replicas + // after fallback.failureThreshold has passed because of what's described here: + // https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#implicit-maintenance-mode-deactivation + logger.V(1).Info("ScaleTarget will fallback to Fallback.Replicas after Fallback.FailureThreshold") case isError && scaledObject.Spec.Fallback == nil: // there are no active triggers, but a scaler responded with an error // AND @@ -230,18 +230,6 @@ func (e *scaleExecutor) RequestScale(ctx context.Context, scaledObject *kedav1al } } -func (e *scaleExecutor) doFallbackScaling(ctx context.Context, scaledObject *kedav1alpha1.ScaledObject, currentScale *autoscalingv1.Scale, logger logr.Logger, currentReplicas int32) { - _, err := e.updateScaleOnScaleTarget(ctx, scaledObject, currentScale, scaledObject.Spec.Fallback.Replicas) - if err == nil { - logger.Info("Successfully set ScaleTarget replicas count to ScaledObject fallback.replicas", - "Original Replicas Count", currentReplicas, - "New Replicas Count", scaledObject.Spec.Fallback.Replicas) - } - if e := e.setFallbackCondition(ctx, logger, scaledObject, metav1.ConditionTrue, "FallbackExists", "At least one trigger is falling back on this scaled object"); e != nil { - logger.Error(e, "Error setting fallback condition") - } -} - // An object will be scaled down to 0 only if it's passed its cooldown period // or if LastActiveTime is nil func (e *scaleExecutor) scaleToZeroOrIdle(ctx context.Context, logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject, scale *autoscalingv1.Scale) { diff --git a/pkg/scaling/executor/scale_scaledobjects_test.go b/pkg/scaling/executor/scale_scaledobjects_test.go index 1a426c9cde7..de0d9ea5891 100644 --- a/pkg/scaling/executor/scale_scaledobjects_test.go +++ b/pkg/scaling/executor/scale_scaledobjects_test.go @@ -33,68 +33,6 @@ import ( "github.com/kedacore/keda/v2/pkg/mock/mock_scale" ) -func TestScaleToFallbackReplicasWhenNotActiveAndIsError(t *testing.T) { - ctrl := gomock.NewController(t) - client := mock_client.NewMockClient(ctrl) - recorder := record.NewFakeRecorder(1) - mockScaleClient := mock_scale.NewMockScalesGetter(ctrl) - mockScaleInterface := mock_scale.NewMockScaleInterface(ctrl) - statusWriter := mock_client.NewMockStatusWriter(ctrl) - - scaleExecutor := NewScaleExecutor(client, mockScaleClient, nil, recorder) - - scaledObject := v1alpha1.ScaledObject{ - ObjectMeta: v1.ObjectMeta{ - Name: "name", - Namespace: "namespace", - }, - Spec: v1alpha1.ScaledObjectSpec{ - ScaleTargetRef: &v1alpha1.ScaleTarget{ - Name: "name", - }, - Fallback: &v1alpha1.Fallback{ - FailureThreshold: 3, - Replicas: 5, - }, - }, - Status: v1alpha1.ScaledObjectStatus{ - ScaleTargetGVKR: &v1alpha1.GroupVersionKindResource{ - Group: "apps", - Kind: "Deployment", - }, - }, - } - - scaledObject.Status.Conditions = *v1alpha1.GetInitializedConditions() - - numberOfReplicas := int32(2) - - client.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).SetArg(2, appsv1.Deployment{ - Spec: appsv1.DeploymentSpec{ - Replicas: &numberOfReplicas, - }, - }) - - scale := &autoscalingv1.Scale{ - Spec: autoscalingv1.ScaleSpec{ - Replicas: numberOfReplicas, - }, - } - - mockScaleClient.EXPECT().Scales(gomock.Any()).Return(mockScaleInterface).Times(2) - mockScaleInterface.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(scale, nil) - mockScaleInterface.EXPECT().Update(gomock.Any(), gomock.Any(), gomock.Eq(scale), gomock.Any()) - - client.EXPECT().Status().Times(2).Return(statusWriter) - statusWriter.EXPECT().Patch(gomock.Any(), gomock.Any(), gomock.Any()).Times(2) - - scaleExecutor.RequestScale(context.TODO(), &scaledObject, false, true, &ScaleExecutorOptions{}) - - assert.Equal(t, int32(5), scale.Spec.Replicas) - condition := scaledObject.Status.Conditions.GetFallbackCondition() - assert.Equal(t, true, condition.IsTrue()) -} - func TestScaleToMinReplicasWhenNotActive(t *testing.T) { ctrl := gomock.NewController(t) client := mock_client.NewMockClient(ctrl)