Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 4 additions & 2 deletions pkg/apis/serving/v1/revision_lifecycle.go
Original file line number Diff line number Diff line change
Expand Up @@ -204,8 +204,10 @@ func (rs *RevisionStatus) PropagateAutoscalerStatus(ps *autoscalingv1alpha1.PodA

// Mark resource unavailable if we don't have a Service Name and the deployment is ready
// This can happen when we have initial scale set to 0
if rs.GetCondition(RevisionConditionResourcesAvailable).IsTrue() && ps.ServiceName == "" {
rs.MarkResourcesAvailableUnknown(ReasonDeploying, "")
if rs.GetCondition(RevisionConditionResourcesAvailable).IsTrue() {
if ps.ServiceName == "" || !ps.IsScaleTargetInitialized() {
rs.MarkResourcesAvailableUnknown(ReasonDeploying, "")
}
}

switch cond.Status {
Expand Down
38 changes: 38 additions & 0 deletions pkg/apis/serving/v1/revision_lifecycle_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -584,6 +584,44 @@ func TestPropagateAutoscalerStatus(t *testing.T) {
apistest.CheckConditionSucceeded(r, RevisionConditionResourcesAvailable, t)
}

func TestPropagateAutoscalerStatus_ScaleTargetNotInitialized(t *testing.T) {
r := &RevisionStatus{}
r.InitializeConditions()
apistest.CheckConditionOngoing(r, RevisionConditionReady, t)

// PodAutoscaler has no active condition, so we are just coming up.
r.PropagateAutoscalerStatus(&autoscalingv1alpha1.PodAutoscalerStatus{
Status: duckv1.Status{},
})

apistest.CheckConditionOngoing(r, RevisionConditionActive, t)
apistest.CheckConditionOngoing(r, RevisionConditionResourcesAvailable, t)

// Deployment is created we mark resources created
r.MarkResourcesAvailableTrue()

// PodAutoscaler resources have been created but initial scale target
// has not been achieved
r.PropagateAutoscalerStatus(&autoscalingv1alpha1.PodAutoscalerStatus{
ServiceName: "some-service",
Status: duckv1.Status{
Conditions: duckv1.Conditions{{
Type: autoscalingv1alpha1.PodAutoscalerConditionReady,
Status: corev1.ConditionUnknown,
}, {
Type: autoscalingv1alpha1.PodAutoscalerConditionScaleTargetInitialized,
Status: corev1.ConditionUnknown,
}, {
Type: autoscalingv1alpha1.PodAutoscalerConditionSKSReady,
Status: corev1.ConditionTrue,
}},
},
})

// ResourcesAvailable should be reverted back to on-going
apistest.CheckConditionOngoing(r, RevisionConditionResourcesAvailable, t)
}

func TestPropagateAutoscalerStatus_NoOverridingResourcesAvailable(t *testing.T) {
// Cases to test Ready condition
// we fix ScaleTargetInitialized to True
Expand Down
1 change: 1 addition & 0 deletions pkg/reconciler/revision/table_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -282,6 +282,7 @@ func TestReconcile(t *testing.T) {
WithRoutingState(v1.RoutingStateActive, fc),
MarkRevisionReady, WithRevisionObservedGeneration(1)),
pa("foo", "pa-not-ready",
WithScaleTargetInitialized,
WithPAStatusService("its-not-confidential"),
WithBufferedTraffic,
WithReachabilityReachable),
Expand Down
18 changes: 18 additions & 0 deletions pkg/testing/v1/configuration.go
Original file line number Diff line number Diff line change
Expand Up @@ -138,3 +138,21 @@ func WithConfigRevisionIdleTimeoutSeconds(revisionIdleTimeoutSeconds int64) Conf
cfg.Spec.Template.Spec.IdleTimeoutSeconds = ptr.Int64(revisionIdleTimeoutSeconds)
}
}

// WithVolume adds a volume to the service
func WithConfigVolume(name, mountPath string, volumeSource corev1.VolumeSource) ConfigOption {
return func(c *v1.Configuration) {
rt := &c.Spec.Template.Spec

rt.Containers[0].VolumeMounts = append(rt.Containers[0].VolumeMounts,
corev1.VolumeMount{
Name: name,
MountPath: mountPath,
})

rt.Volumes = append(rt.Volumes, corev1.Volume{
Name: name,
VolumeSource: volumeSource,
})
}
}
1 change: 1 addition & 0 deletions test/conformance.go
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,7 @@ const (
Timeout = "timeout"
Volumes = "volumes"
SlowStart = "slowstart"
ConfigMimic = "configmimic"

// Constants for test image output.
PizzaPlanetText1 = "What a spaceport!"
Expand Down
217 changes: 215 additions & 2 deletions test/e2e/minscale_readiness_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,15 +30,222 @@ import (
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/selection"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/wait"
"knative.dev/pkg/kmeta"
"knative.dev/serving/pkg/apis/autoscaling"
"knative.dev/serving/pkg/apis/serving"
v1 "knative.dev/serving/pkg/apis/serving/v1"
"knative.dev/serving/pkg/resources"
rtesting "knative.dev/serving/pkg/testing/v1"
"knative.dev/serving/test"
v1test "knative.dev/serving/test/v1"
)

func TestMinScaleTransition(t *testing.T) {
t.Parallel()
ctx := t.Context()

const (
minScale = 4
scalingWindow = autoscaling.WindowMin
)

clients := Setup(t)
cmClient := clients.KubeClient.CoreV1().ConfigMaps(test.ServingFlags.TestNamespace)

names := test.ResourceNames{
// Config and Route have different names to avoid false positives
Config: test.ObjectNameForTest(t),
Route: test.ObjectNameForTest(t),
Image: test.ConfigMimic,
}

test.EnsureTearDown(t, clients, &names)

firstRevision := kmeta.ChildName(names.Config, fmt.Sprintf("-%05d", 1))
secondRevision := kmeta.ChildName(names.Config, fmt.Sprintf("-%05d", 2))

cm := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: names.Config,
Namespace: test.ServingFlags.TestNamespace,
},
Data: map[string]string{
// By default the pods of the configuration are ready
names.Config: "startup,ready,live",

// By default the second revision doesn't become ready
secondRevision: "startup",
},
}
cm, err := cmClient.Create(t.Context(), cm, metav1.CreateOptions{})
if err != nil {
t.Fatal("Failed to create create config map:", err)
}

test.EnsureCleanup(t, func() {
cmClient.Delete(context.Background(), cm.Name, metav1.DeleteOptions{})
})

t.Log("Creating route")
if _, err := v1test.CreateRoute(t, clients, names); err != nil {
t.Fatal("Failed to create Route:", err)
}

t.Log("Creating configuration")
cfg, err := v1test.CreateConfiguration(t, clients, names,
withMinScale(minScale),
// Make sure we scale down quickly after panic, before the autoscaler get killed by chaosduck.
withWindow(scalingWindow),
rtesting.WithConfigReadinessProbe(&corev1.Probe{
ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/healthz/ready",
Port: intstr.FromInt(8080),
},
},
}),
rtesting.WithConfigVolume("state", "/etc/config", corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: cm.Name,
},
},
}),
)
if err != nil {
t.Fatal("Failed to create Configuration:", err)
}

// This will wait for the revision to be created
firstRevisionService := PrivateServiceName(t, clients, firstRevision)

t.Log("Waiting for first revision to become ready")
err = v1test.WaitForRevisionState(clients.ServingClient, firstRevision, v1test.IsRevisionReady, "RevisionIsReady")
if err != nil {
t.Fatalf("The Revision %q did not become ready: %v", firstRevision, err)
}

t.Log("Holding revision at minScale after becoming ready")
if lr, ok := ensureDesiredScale(clients, t, firstRevisionService, gte(minScale)); !ok {
t.Fatalf("The revision %q observed scale %d < %d after becoming ready", firstRevision, lr, minScale)
}

t.Log("Updating configuration")
if _, err := v1test.PatchConfig(t, clients, cfg, rtesting.WithConfigEnv(corev1.EnvVar{Name: "FOO", Value: "BAR"})); err != nil {
t.Fatal("Failed to update Configuration:", err)
}

t.Logf("Waiting for %v pods to be created", minScale)
var podList *corev1.PodList

err = wait.PollUntilContextTimeout(ctx, time.Second, time.Minute, true, func(context.Context) (bool, error) {
revLabel, err := labels.NewRequirement(serving.RevisionLabelKey, selection.Equals, []string{secondRevision})
if err != nil {
return false, fmt.Errorf("unable to create rev label: %w", err)
}

pods := clients.KubeClient.CoreV1().Pods(test.ServingFlags.TestNamespace)
podList, err = pods.List(ctx, metav1.ListOptions{
LabelSelector: revLabel.String(),
})
if err != nil {
return false, err
}

runningPods := 0
for _, pod := range podList.Items {
if pod.Status.Phase == corev1.PodRunning {
runningPods++
}
}

return runningPods == minScale, nil
})

if errors.Is(err, context.DeadlineExceeded) {
for _, pod := range podList.Items {
t.Logf("pod %s is in phase %s", pod.Name, pod.Status.Phase)
}
t.Fatal("Timed out waiting for pods to be running", err)
} else if err != nil {
t.Fatal("Failed waiting for pods to be running", err)
}

secondRevisionService := PrivateServiceName(t, clients, secondRevision)

// Go over all the new pods and start marking each one ready
// except the last one
for i, pod := range podList.Items {
podName := pod.Name

t.Logf("Marking revision %s pod %s as ready", secondRevision, podName)
markPodAsReadyAndWait(t, clients, cm.Name, podName)
t.Logf("Revision %s pod %s is ready", secondRevision, podName)

t.Logf("Waiting for 2x scaling window %v to pass", 2*scalingWindow)

// Wait two autoscaling window durations
// Scaling decisions are made at the end of the window
time.Sleep(2 * scalingWindow)

if i >= len(podList.Items)-1 {
// When marking the last pod ready we want to skip
// ensuring the previous revision doesn't scale down
break
}

t.Log("Check original revision holding at min scale", minScale)
if _, ok := ensureDesiredScale(clients, t, firstRevisionService, gte(minScale)); !ok {
t.Fatalf("Revision %q was scaled down prematurely", firstRevision)
}
}

t.Log("Check new revision holding at min scale", minScale)
if _, ok := ensureDesiredScale(clients, t, secondRevisionService, gte(minScale)); !ok {
t.Fatalf("Revision %q was scaled down prematurely", secondRevision)
}

t.Log("Check old revision is scaled down")
if _, err := waitForDesiredScale(clients, firstRevisionService, eq(0)); err != nil {
t.Fatalf("Revision %q was not scaled down: %s", firstRevision, err)
}
}

func markPodAsReadyAndWait(t *testing.T, clients *test.Clients, cmName, podName string) {
ctx := t.Context()
coreClient := clients.KubeClient.CoreV1()
cmClient := coreClient.ConfigMaps(test.ServingFlags.TestNamespace)

patch := fmt.Sprintf(`[{"op":"add", "path":"/data/%s", "value": "startup,ready"}]`, podName)

_, err := cmClient.Patch(ctx, cmName, types.JSONPatchType, []byte(patch), metav1.PatchOptions{})
if err != nil {
t.Fatal("Failed to patch ConfigMap", err)
}

if err = wait.PollUntilContextTimeout(ctx, 10*time.Second, 2*time.Minute, true, func(context.Context) (bool, error) {
pod, err := clients.KubeClient.CoreV1().Pods(test.ServingFlags.TestNamespace).Get(context.Background(), podName, metav1.GetOptions{})
if err != nil {
return false, err
}

for _, cond := range pod.Status.Conditions {
if cond.Type == corev1.PodReady {
return cond.Status == corev1.ConditionTrue, nil
}
}

return false, nil
}); err != nil {
t.Fatalf("Pod %s didn't become ready: %s", podName, err)
}
}

func TestMinScale(t *testing.T) {
t.Parallel()

Expand Down Expand Up @@ -171,6 +378,12 @@ func lt(m int) func(int) bool {
}
}

func eq(m int) func(int) bool {
return func(n int) bool {
return n == m
}
}

func withMinScale(minScale int) func(cfg *v1.Configuration) {
return func(cfg *v1.Configuration) {
if cfg.Spec.Template.Annotations == nil {
Expand Down Expand Up @@ -214,7 +427,7 @@ func waitForDesiredScale(clients *test.Clients, serviceName string, cond func(in
endpoints := clients.KubeClient.CoreV1().Endpoints(test.ServingFlags.TestNamespace)

// See https://github.com/knative/serving/issues/7727#issuecomment-706772507 for context.
return latestReady, wait.PollUntilContextTimeout(context.Background(), 250*time.Millisecond, 3*time.Minute, true, func(context.Context) (bool, error) {
return latestReady, wait.PollUntilContextTimeout(context.Background(), time.Second, 3*time.Minute, true, func(context.Context) (bool, error) {
endpoint, err := endpoints.Get(context.Background(), serviceName, metav1.GetOptions{})
if err != nil {
return false, nil //nolint:nilerr
Expand All @@ -227,7 +440,7 @@ func waitForDesiredScale(clients *test.Clients, serviceName string, cond func(in
func ensureDesiredScale(clients *test.Clients, t *testing.T, serviceName string, cond func(int) bool) (latestReady int, observed bool) {
endpoints := clients.KubeClient.CoreV1().Endpoints(test.ServingFlags.TestNamespace)

err := wait.PollUntilContextTimeout(context.Background(), 250*time.Millisecond, 10*time.Second, true, func(context.Context) (bool, error) {
err := wait.PollUntilContextTimeout(context.Background(), time.Second, 30*time.Second, true, func(context.Context) (bool, error) {
endpoint, err := endpoints.Get(context.Background(), serviceName, metav1.GetOptions{})
if err != nil {
return false, nil //nolint:nilerr
Expand Down
Loading
Loading