Skip to content

Commit d13d0e9

Browse files
committed
Add interceptor
Signed-off-by: Jorge Turrado <[email protected]>
1 parent 45a887e commit d13d0e9

17 files changed

+483
-543
lines changed

interceptor/config/serving.go

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,11 +24,16 @@ type Serving struct {
2424
// ConfigMapCacheRsyncPeriod is the time interval
2525
// for the configmap informer to rsync the local cache.
2626
ConfigMapCacheRsyncPeriod time.Duration `envconfig:"KEDA_HTTP_SCALER_CONFIG_MAP_INFORMER_RSYNC_PERIOD" default:"60m"`
27-
// The interceptor has an internal process that periodically fetches the state
27+
// DEPRECATED: The interceptor has an internal process that periodically fetches the state
2828
// of deployment that is running the servers it forwards to.
2929
//
3030
// This is the interval (in milliseconds) representing how often to do a fetch
3131
DeploymentCachePollIntervalMS int `envconfig:"KEDA_HTTP_DEPLOYMENT_CACHE_POLLING_INTERVAL_MS" default:"250"`
32+
//The interceptor has an internal process that periodically fetches the state
33+
// of endpoints that is running the servers it forwards to.
34+
//
35+
// This is the interval (in milliseconds) representing how often to do a fetch
36+
EndpointsCachePollIntervalMS int `envconfig:"KEDA_HTTP_ENDPOINTS_CACHE_POLLING_INTERVAL_MS" default:"250"`
3237
}
3338

3439
// Parse parses standard configs using envconfig and returns a pointer to the

interceptor/config/timeouts.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -16,9 +16,9 @@ type Timeouts struct {
1616
// ResponseHeaderTimeout is how long to wait between when the HTTP request
1717
// is sent to the backing app and when response headers need to arrive
1818
ResponseHeader time.Duration `envconfig:"KEDA_RESPONSE_HEADER_TIMEOUT" default:"500ms"`
19-
// DeploymentReplicas is how long to wait for the backing deployment
19+
// WorkloadReplicas is how long to wait for the backing workload
2020
// to have 1 or more replicas before connecting and sending the HTTP request.
21-
DeploymentReplicas time.Duration `envconfig:"KEDA_CONDITION_WAIT_TIMEOUT" default:"1500ms"`
21+
WorkloadReplicas time.Duration `envconfig:"KEDA_CONDITION_WAIT_TIMEOUT" default:"1500ms"`
2222
// ForceHTTP2 toggles whether to try to force HTTP2 for all requests
2323
ForceHTTP2 bool `envconfig:"KEDA_HTTP_FORCE_HTTP2" default:"false"`
2424
// MaxIdleConns is the max number of connections that can be idle in the

interceptor/config/validate.go

Lines changed: 26 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -2,16 +2,36 @@ package config
22

33
import (
44
"fmt"
5+
"os"
56
"time"
7+
8+
"github.com/go-logr/logr"
69
)
710

8-
func Validate(srvCfg Serving, timeoutsCfg Timeouts) error {
9-
deplCachePollInterval := time.Duration(srvCfg.DeploymentCachePollIntervalMS) * time.Millisecond
10-
if timeoutsCfg.DeploymentReplicas < deplCachePollInterval {
11+
func Validate(srvCfg *Serving, timeoutsCfg Timeouts, lggr logr.Logger) error {
12+
// TODO(jorturfer): delete this on v0.8.0
13+
_, deploymentEnvExist := os.LookupEnv("KEDA_HTTP_DEPLOYMENT_CACHE_POLLING_INTERVAL_MS")
14+
_, endpointsEnvExist := os.LookupEnv("KEDA_HTTP_ENDPOINTS_CACHE_POLLING_INTERVAL_MS")
15+
if deploymentEnvExist && endpointsEnvExist {
16+
return fmt.Errorf(
17+
"%s and %s are mutual exclusive",
18+
"KEDA_HTTP_DEPLOYMENT_CACHE_POLLING_INTERVAL_MS",
19+
"KEDA_HTTP_ENDPOINTS_CACHE_POLLING_INTERVAL_MS",
20+
)
21+
}
22+
if deploymentEnvExist && !endpointsEnvExist {
23+
srvCfg.EndpointsCachePollIntervalMS = srvCfg.DeploymentCachePollIntervalMS
24+
srvCfg.DeploymentCachePollIntervalMS = 0
25+
lggr.Info("WARNING: KEDA_HTTP_DEPLOYMENT_CACHE_POLLING_INTERVAL_MS has been deprecated in favor of KEDA_HTTP_ENDPOINTS_CACHE_POLLING_INTERVAL_MS and wil be removed on v0.8.0")
26+
}
27+
// END TODO
28+
29+
endpointsCachePollInterval := time.Duration(srvCfg.EndpointsCachePollIntervalMS) * time.Millisecond
30+
if timeoutsCfg.WorkloadReplicas < endpointsCachePollInterval {
1131
return fmt.Errorf(
12-
"deployment replicas timeout (%s) should not be less than the Deployment Cache Poll Interval (%s)",
13-
timeoutsCfg.DeploymentReplicas,
14-
deplCachePollInterval,
32+
"workload replicas timeout (%s) should not be less than the Endpoints Cache Poll Interval (%s)",
33+
timeoutsCfg.WorkloadReplicas,
34+
endpointsCachePollInterval,
1535
)
1636
}
1737
return nil

interceptor/forward_wait_func.go

Lines changed: 30 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -5,65 +5,69 @@ import (
55
"fmt"
66

77
"github.com/go-logr/logr"
8-
appsv1 "k8s.io/api/apps/v1"
8+
v1 "k8s.io/api/core/v1"
99

1010
"github.com/kedacore/http-add-on/pkg/k8s"
1111
)
1212

1313
// forwardWaitFunc is a function that waits for a condition
1414
// before proceeding to serve the request.
15-
type forwardWaitFunc func(context.Context, string, string) (int, error)
15+
type forwardWaitFunc func(context.Context, string, string) (bool, error)
1616

17-
func deploymentCanServe(depl appsv1.Deployment) bool {
18-
return depl.Status.ReadyReplicas > 0
17+
func workloadActiveEndpoints(endpoints v1.Endpoints) int {
18+
total := 0
19+
for _, subset := range endpoints.Subsets {
20+
total += len(subset.Addresses)
21+
}
22+
return total
1923
}
2024

21-
func newDeployReplicasForwardWaitFunc(
25+
func newWorkloadReplicasForwardWaitFunc(
2226
lggr logr.Logger,
23-
deployCache k8s.DeploymentCache,
27+
endpointCache k8s.EndpointsCache,
2428
) forwardWaitFunc {
25-
return func(ctx context.Context, deployNS, deployName string) (int, error) {
29+
return func(ctx context.Context, endpointNS, endpointName string) (bool, error) {
2630
// get a watcher & its result channel before querying the
27-
// deployment cache, to ensure we don't miss events
28-
watcher, err := deployCache.Watch(deployNS, deployName)
31+
// endpoints cache, to ensure we don't miss events
32+
watcher, err := endpointCache.Watch(endpointNS, endpointName)
2933
if err != nil {
30-
return 0, err
34+
return false, err
3135
}
3236
eventCh := watcher.ResultChan()
3337
defer watcher.Stop()
3438

35-
deployment, err := deployCache.Get(deployNS, deployName)
39+
endpoints, err := endpointCache.Get(endpointNS, endpointName)
3640
if err != nil {
37-
// if we didn't get the initial deployment state, bail out
38-
return 0, fmt.Errorf(
39-
"error getting state for deployment %s/%s: %w",
40-
deployNS,
41-
deployName,
41+
// if we didn't get the initial endpoints state, bail out
42+
return false, fmt.Errorf(
43+
"error getting state for endpoints %s/%s: %w",
44+
endpointNS,
45+
endpointName,
4246
err,
4347
)
4448
}
45-
// if there is 1 or more replica, we're done waiting
46-
if deploymentCanServe(deployment) {
47-
return int(deployment.Status.ReadyReplicas), nil
49+
// if there is 1 or more active endpoints, we're done waiting
50+
activeEndpoints := workloadActiveEndpoints(endpoints)
51+
if activeEndpoints > 0 {
52+
return false, nil
4853
}
4954

5055
for {
5156
select {
5257
case event := <-eventCh:
53-
deployment, ok := event.Object.(*appsv1.Deployment)
58+
endpoints, ok := event.Object.(*v1.Endpoints)
5459
if !ok {
5560
lggr.Info(
56-
"Didn't get a deployment back in event",
61+
"Didn't get a endpoints back in event",
5762
)
58-
} else if deploymentCanServe(*deployment) {
59-
return 0, nil
63+
} else if activeEndpoints := workloadActiveEndpoints(*endpoints); activeEndpoints > 0 {
64+
return true, nil
6065
}
6166
case <-ctx.Done():
6267
// otherwise, if the context is marked done before
6368
// we're done waiting, fail.
64-
return 0, fmt.Errorf(
65-
"context marked done while waiting for deployment %s to reach > 0 replicas: %w",
66-
deployName,
69+
return false, fmt.Errorf(
70+
"context marked done while waiting for workload reach > 0 replicas: %w",
6771
ctx.Err(),
6872
)
6973
}

interceptor/forward_wait_func_test.go

Lines changed: 44 additions & 102 deletions
Original file line numberDiff line numberDiff line change
@@ -8,79 +8,63 @@ import (
88
"github.com/go-logr/logr"
99
"github.com/stretchr/testify/require"
1010
"golang.org/x/sync/errgroup"
11-
appsv1 "k8s.io/api/apps/v1"
12-
corev1 "k8s.io/api/core/v1"
11+
v1 "k8s.io/api/core/v1"
1312
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
1413
"k8s.io/apimachinery/pkg/watch"
1514

1615
"github.com/kedacore/http-add-on/pkg/k8s"
1716
)
1817

1918
// Test to make sure the wait function returns a nil error if there is immediately
20-
// one replica on the target deployment
19+
// one active endpoint on the target deployment
2120
func TestForwardWaitFuncOneReplica(t *testing.T) {
2221
ctx := context.Background()
2322

2423
const waitFuncWait = 1 * time.Second
2524
r := require.New(t)
2625
const ns = "testNS"
27-
const deployName = "TestForwardingHandlerDeploy"
28-
cache := k8s.NewFakeDeploymentCache()
29-
cache.AddDeployment(*newDeployment(
30-
ns,
31-
deployName,
32-
"myimage",
33-
[]int32{123},
34-
nil,
35-
map[string]string{},
36-
corev1.PullAlways,
37-
))
26+
const endpointsName = "TestForwardingHandler"
27+
endpoints := *newEndpoint(ns, endpointsName)
28+
cache := k8s.NewFakeEndpointsCache()
29+
cache.Set(endpoints)
30+
cache.SetSubsets(ns, endpointsName, 1)
3831

3932
ctx, done := context.WithTimeout(ctx, waitFuncWait)
4033
defer done()
4134
group, ctx := errgroup.WithContext(ctx)
4235

43-
waitFunc := newDeployReplicasForwardWaitFunc(
36+
waitFunc := newWorkloadReplicasForwardWaitFunc(
4437
logr.Discard(),
4538
cache,
4639
)
4740

4841
group.Go(func() error {
49-
_, err := waitFunc(ctx, ns, deployName)
42+
_, err := waitFunc(ctx, ns, endpointsName)
5043
return err
5144
})
5245
r.NoError(group.Wait(), "wait function failed, but it shouldn't have")
5346
}
5447

55-
// Test to make sure the wait function returns an error if there are no replicas, and that doesn't change
48+
// Test to make sure the wait function returns an error if there are active endpoints, and that doesn't change
5649
// within a timeout
5750
func TestForwardWaitFuncNoReplicas(t *testing.T) {
5851
ctx := context.Background()
5952
const waitFuncWait = 1 * time.Second
6053
r := require.New(t)
6154
const ns = "testNS"
62-
const deployName = "TestForwardingHandlerHoldsDeployment"
63-
deployment := newDeployment(
64-
ns,
65-
deployName,
66-
"myimage",
67-
[]int32{123},
68-
nil,
69-
map[string]string{},
70-
corev1.PullAlways,
71-
)
72-
deployment.Status.ReadyReplicas = 0
73-
cache := k8s.NewFakeDeploymentCache()
74-
cache.AddDeployment(*deployment)
55+
const endpointsName = "TestForwardWaitFuncNoReplicas"
56+
endpoints := *newEndpoint(ns, endpointsName)
57+
cache := k8s.NewFakeEndpointsCache()
58+
cache.Set(endpoints)
7559

7660
ctx, done := context.WithTimeout(ctx, waitFuncWait)
7761
defer done()
78-
waitFunc := newDeployReplicasForwardWaitFunc(
62+
waitFunc := newWorkloadReplicasForwardWaitFunc(
7963
logr.Discard(),
8064
cache,
8165
)
8266

83-
_, err := waitFunc(ctx, ns, deployName)
67+
_, err := waitFunc(ctx, ns, endpointsName)
8468
r.Error(err)
8569
}
8670

@@ -90,100 +74,58 @@ func TestWaitFuncWaitsUntilReplicas(t *testing.T) {
9074
totalWaitDur := 500 * time.Millisecond
9175

9276
const ns = "testNS"
93-
const deployName = "TestForwardingHandlerHoldsDeployment"
94-
deployment := newDeployment(
95-
ns,
96-
deployName,
97-
"myimage",
98-
[]int32{123},
99-
nil,
100-
map[string]string{},
101-
corev1.PullAlways,
102-
)
103-
deployment.Spec.Replicas = k8s.Int32P(0)
104-
cache := k8s.NewFakeDeploymentCache()
105-
cache.AddDeployment(*deployment)
77+
const endpointsName = "TestForwardingHandlerHolds"
78+
79+
endpoints := *newEndpoint(ns, endpointsName)
80+
cache := k8s.NewFakeEndpointsCache()
81+
cache.Set(endpoints)
10682
// create a watcher first so that the goroutine
10783
// can later fetch it and send a message on it
108-
_, err := cache.Watch(ns, deployName)
84+
_, err := cache.Watch(ns, endpointsName)
10985
r.NoError(err)
11086

11187
ctx, done := context.WithTimeout(ctx, totalWaitDur)
112-
waitFunc := newDeployReplicasForwardWaitFunc(
88+
waitFunc := newWorkloadReplicasForwardWaitFunc(
11389
logr.Discard(),
11490
cache,
11591
)
11692

117-
// this channel will be closed immediately after the replicas were increased
118-
replicasIncreasedCh := make(chan struct{})
93+
// this channel will be closed immediately after the active endpoints were increased
94+
activeEndpointsIncreasedCh := make(chan struct{})
11995
go func() {
12096
time.Sleep(totalWaitDur / 2)
121-
watcher := cache.GetWatcher(ns, deployName)
97+
watcher := cache.GetWatcher(ns, endpointsName)
12298
r.NotNil(watcher, "watcher was not found")
123-
modifiedDeployment := deployment.DeepCopy()
124-
modifiedDeployment.Spec.Replicas = k8s.Int32P(1)
125-
watcher.Action(watch.Modified, modifiedDeployment)
126-
close(replicasIncreasedCh)
99+
modifiedEndpoints := endpoints.DeepCopy()
100+
modifiedEndpoints.Subsets = []v1.EndpointSubset{
101+
{
102+
Addresses: []v1.EndpointAddress{
103+
{IP: "1.2.3.4"},
104+
},
105+
},
106+
}
107+
watcher.Action(watch.Modified, modifiedEndpoints)
108+
close(activeEndpointsIncreasedCh)
127109
}()
128-
_, err = waitFunc(ctx, ns, deployName)
110+
_, err = waitFunc(ctx, ns, endpointsName)
129111
r.NoError(err)
130112
done()
131113
}
132114

133-
// newDeployment creates a new deployment object
115+
// newEndpoint creates a new endpoints object
134116
// with the given name and the given image. This does not actually create
135-
// the deployment in the cluster, it just creates the deployment object
117+
// the endpoints in the cluster, it just creates the endpoints object
136118
// in memory
137-
func newDeployment(
119+
func newEndpoint(
138120
namespace,
139-
name,
140-
image string,
141-
ports []int32,
142-
env []corev1.EnvVar,
143-
labels map[string]string,
144-
pullPolicy corev1.PullPolicy,
145-
) *appsv1.Deployment {
146-
containerPorts := make([]corev1.ContainerPort, len(ports))
147-
for i, port := range ports {
148-
containerPorts[i] = corev1.ContainerPort{
149-
ContainerPort: port,
150-
}
151-
}
152-
deployment := &appsv1.Deployment{
153-
TypeMeta: metav1.TypeMeta{
154-
Kind: "Deployment",
155-
},
121+
name string,
122+
) *v1.Endpoints {
123+
endpoints := &v1.Endpoints{
156124
ObjectMeta: metav1.ObjectMeta{
157125
Name: name,
158126
Namespace: namespace,
159-
Labels: labels,
160-
},
161-
Spec: appsv1.DeploymentSpec{
162-
Selector: &metav1.LabelSelector{
163-
MatchLabels: labels,
164-
},
165-
Replicas: k8s.Int32P(1),
166-
Template: corev1.PodTemplateSpec{
167-
ObjectMeta: metav1.ObjectMeta{
168-
Labels: labels,
169-
},
170-
Spec: corev1.PodSpec{
171-
Containers: []corev1.Container{
172-
{
173-
Image: image,
174-
Name: name,
175-
ImagePullPolicy: pullPolicy,
176-
Ports: containerPorts,
177-
Env: env,
178-
},
179-
},
180-
},
181-
},
182-
},
183-
Status: appsv1.DeploymentStatus{
184-
ReadyReplicas: 1,
185127
},
186128
}
187129

188-
return deployment
130+
return endpoints
189131
}

0 commit comments

Comments
 (0)