Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix: fix waiting to reach failureThreshold before fallback #6520

Merged
merged 2 commits into from
Feb 10, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -56,8 +56,9 @@ To learn more about active deprecations, we recommend checking [GitHub Discussio
- [v1.0.0](#v100)

## Unreleased
- **General**: Fix the check whether Fallback is enabled when using ScalingModifiers ([#6521](https://github.com/kedacore/keda/pull/6521))

- **General**: Fix the check whether Fallback is enabled when using ScalingModifiers ([#6521](https://github.com/kedacore/keda/pull/6521))
- **General**: Fix waiting to reach `failureThreshold` before fallback ([#6520](https://github.com/kedacore/keda/pull/6520))
- **General**: Introduce new Temporal scaler ([#4724](https://github.com/kedacore/keda/issues/4724))

### New
Expand Down
7 changes: 0 additions & 7 deletions pkg/scaling/executor/scale_executor.go
Original file line number Diff line number Diff line change
Expand Up @@ -125,10 +125,3 @@ func (e *scaleExecutor) setActiveCondition(ctx context.Context, logger logr.Logg
}
return e.setCondition(ctx, logger, object, status, reason, message, active)
}

func (e *scaleExecutor) setFallbackCondition(ctx context.Context, logger logr.Logger, object interface{}, status metav1.ConditionStatus, reason string, message string) error {
fallback := func(conditions kedav1alpha1.Conditions, status metav1.ConditionStatus, reason string, message string) {
conditions.SetFallbackCondition(status, reason, message)
}
return e.setCondition(ctx, logger, object, status, reason, message, fallback)
}
24 changes: 6 additions & 18 deletions pkg/scaling/executor/scale_scaledobjects.go
Original file line number Diff line number Diff line change
Expand Up @@ -161,12 +161,12 @@ func (e *scaleExecutor) RequestScale(ctx context.Context, scaledObject *kedav1al
// isActive == false
switch {
case isError && scaledObject.Spec.Fallback != nil && scaledObject.Spec.Fallback.Replicas != 0:
// there are no active triggers, but a scaler responded with an error
// AND
// there is a fallback replicas count defined

// Scale to the fallback replicas count
e.doFallbackScaling(ctx, scaledObject, currentScale, logger, currentReplicas)
// We need to have this switch case even if just for logging.
// Otherwise, if we have `minReplicas=zero`, we will fall into the third case expression,
// which will scale the target to 0. Scaling the target to 0 means the HPA will not scale it to fallback.replicas
// after fallback.failureThreshold has passed because of what's described here:
// https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#implicit-maintenance-mode-deactivation
logger.V(1).Info("ScaleTarget will fallback to Fallback.Replicas after Fallback.FailureThreshold")
case isError && scaledObject.Spec.Fallback == nil:
// there are no active triggers, but a scaler responded with an error
// AND
Expand Down Expand Up @@ -230,18 +230,6 @@ func (e *scaleExecutor) RequestScale(ctx context.Context, scaledObject *kedav1al
}
}

func (e *scaleExecutor) doFallbackScaling(ctx context.Context, scaledObject *kedav1alpha1.ScaledObject, currentScale *autoscalingv1.Scale, logger logr.Logger, currentReplicas int32) {
_, err := e.updateScaleOnScaleTarget(ctx, scaledObject, currentScale, scaledObject.Spec.Fallback.Replicas)
if err == nil {
logger.Info("Successfully set ScaleTarget replicas count to ScaledObject fallback.replicas",
"Original Replicas Count", currentReplicas,
"New Replicas Count", scaledObject.Spec.Fallback.Replicas)
}
if e := e.setFallbackCondition(ctx, logger, scaledObject, metav1.ConditionTrue, "FallbackExists", "At least one trigger is falling back on this scaled object"); e != nil {
logger.Error(e, "Error setting fallback condition")
}
}

// An object will be scaled down to 0 only if it's passed its cooldown period
// or if LastActiveTime is nil
func (e *scaleExecutor) scaleToZeroOrIdle(ctx context.Context, logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject, scale *autoscalingv1.Scale) {
Expand Down
62 changes: 0 additions & 62 deletions pkg/scaling/executor/scale_scaledobjects_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,68 +33,6 @@ import (
"github.com/kedacore/keda/v2/pkg/mock/mock_scale"
)

func TestScaleToFallbackReplicasWhenNotActiveAndIsError(t *testing.T) {
ctrl := gomock.NewController(t)
client := mock_client.NewMockClient(ctrl)
recorder := record.NewFakeRecorder(1)
mockScaleClient := mock_scale.NewMockScalesGetter(ctrl)
mockScaleInterface := mock_scale.NewMockScaleInterface(ctrl)
statusWriter := mock_client.NewMockStatusWriter(ctrl)

scaleExecutor := NewScaleExecutor(client, mockScaleClient, nil, recorder)

scaledObject := v1alpha1.ScaledObject{
ObjectMeta: v1.ObjectMeta{
Name: "name",
Namespace: "namespace",
},
Spec: v1alpha1.ScaledObjectSpec{
ScaleTargetRef: &v1alpha1.ScaleTarget{
Name: "name",
},
Fallback: &v1alpha1.Fallback{
FailureThreshold: 3,
Replicas: 5,
},
},
Status: v1alpha1.ScaledObjectStatus{
ScaleTargetGVKR: &v1alpha1.GroupVersionKindResource{
Group: "apps",
Kind: "Deployment",
},
},
}

scaledObject.Status.Conditions = *v1alpha1.GetInitializedConditions()

numberOfReplicas := int32(2)

client.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).SetArg(2, appsv1.Deployment{
Spec: appsv1.DeploymentSpec{
Replicas: &numberOfReplicas,
},
})

scale := &autoscalingv1.Scale{
Spec: autoscalingv1.ScaleSpec{
Replicas: numberOfReplicas,
},
}

mockScaleClient.EXPECT().Scales(gomock.Any()).Return(mockScaleInterface).Times(2)
mockScaleInterface.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(scale, nil)
mockScaleInterface.EXPECT().Update(gomock.Any(), gomock.Any(), gomock.Eq(scale), gomock.Any())

client.EXPECT().Status().Times(2).Return(statusWriter)
statusWriter.EXPECT().Patch(gomock.Any(), gomock.Any(), gomock.Any()).Times(2)

scaleExecutor.RequestScale(context.TODO(), &scaledObject, false, true, &ScaleExecutorOptions{})

assert.Equal(t, int32(5), scale.Spec.Replicas)
condition := scaledObject.Status.Conditions.GetFallbackCondition()
assert.Equal(t, true, condition.IsTrue())
}

func TestScaleToMinReplicasWhenNotActive(t *testing.T) {
ctrl := gomock.NewController(t)
client := mock_client.NewMockClient(ctrl)
Expand Down
Loading