From d99ca2503f333b06784362ec69958cd880942707 Mon Sep 17 00:00:00 2001 From: Sunjay Bhatia <5337253+sunjayBhatia@users.noreply.github.com> Date: Wed, 18 Oct 2023 16:37:05 -0400 Subject: [PATCH 1/2] test/e2e: Add race detection in e2e tests (#5805) Compile contour binary with -race flag and look for "DATA RACE" in stderr. Fails test if found. Signed-off-by: Sunjay Bhatia --- test/e2e/deployment.go | 9 +++++++-- test/e2e/framework.go | 2 +- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/test/e2e/deployment.go b/test/e2e/deployment.go index a4d95900910..16227b8cc0a 100644 --- a/test/e2e/deployment.go +++ b/test/e2e/deployment.go @@ -27,6 +27,7 @@ import ( "path/filepath" "runtime" "strconv" + "strings" "time" "github.com/onsi/gomega/gexec" @@ -696,6 +697,7 @@ func localAddress() string { } func (d *Deployment) StopLocalContour(contourCmd *gexec.Session, configFile string) error { + defer os.RemoveAll(configFile) // Look for the ENV variable to tell if this test run should use // the ContourConfiguration file or the ContourConfiguration CRD. @@ -714,8 +716,11 @@ func (d *Deployment) StopLocalContour(contourCmd *gexec.Session, configFile stri // Default timeout of 1s produces test flakes, // a minute should be more than enough to avoid them. - contourCmd.Terminate().Wait(time.Minute) - return os.RemoveAll(configFile) + logs := contourCmd.Terminate().Wait(time.Minute).Err.Contents() + if strings.Contains(string(logs), "DATA RACE") { + return errors.New("Detected data race, see log output above to diagnose") + } + return nil } // Convenience method for deploying the pieces of the deployment needed for diff --git a/test/e2e/framework.go b/test/e2e/framework.go index f001bb5700f..3af1dd02c8d 100644 --- a/test/e2e/framework.go +++ b/test/e2e/framework.go @@ -199,7 +199,7 @@ func NewFramework(inClusterTestSuite bool) *Framework { } var err error - contourBin, err = gexec.Build("github.com/projectcontour/contour/cmd/contour") + contourBin, err = gexec.Build("github.com/projectcontour/contour/cmd/contour", "-race") require.NoError(t, err) } From dfb9aef40e291da71dbcad0cb62af5995fea2ba9 Mon Sep 17 00:00:00 2001 From: Sunjay Bhatia <5337253+sunjayBhatia@users.noreply.github.com> Date: Wed, 18 Oct 2023 16:50:08 -0400 Subject: [PATCH 2/2] golangci-lint: Fix revive rules (#5857) When we enabled the use-any rule we disabled all the default rules that are run by revive (see: https://revive.run/docs#golangci-lint) This change grabs all the default rules from https://github.com/mgechev/revive/blob/master/defaults.toml and adds the use-any rule Also fixes outstanding lint issues Signed-off-by: Sunjay Bhatia --- .golangci.yml | 25 +++++++++++- cmd/contour/shutdownmanager.go | 2 +- internal/contour/handler.go | 14 +++---- internal/controller/gateway.go | 8 ++-- internal/controller/gatewayclass.go | 10 ++--- internal/dag/cache.go | 3 +- internal/dag/cache_test.go | 4 +- internal/dag/policy.go | 5 +-- internal/envoy/route.go | 18 ++++----- internal/envoy/v3/bootstrap.go | 4 +- internal/envoy/v3/listener.go | 2 +- internal/k8s/helpers.go | 36 ++++++++--------- internal/k8s/helpers_test.go | 40 +++++++++---------- internal/k8s/log.go | 2 +- internal/k8s/statusaddress.go | 10 ++--- internal/provisioner/objects/secret/secret.go | 2 +- internal/status/gatewayclass.go | 5 +-- internal/xds/v3/contour_test.go | 8 ++-- internal/xdscache/snapshot.go | 2 +- internal/xdscache/v3/endpointstranslator.go | 2 +- internal/xdscache/v3/runtime.go | 14 +++---- test/e2e/incluster/incluster_test.go | 2 +- test/e2e/incluster/leaderelection_test.go | 2 +- 23 files changed, 118 insertions(+), 102 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 5bc02ac688f..c3c40bc5527 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -32,7 +32,30 @@ linters-settings: - http.DefaultTransport revive: rules: - - name: use-any + - name: blank-imports + - name: context-as-argument + - name: context-keys-type + - name: dot-imports + - name: empty-block + - name: error-naming + - name: error-return + - name: error-strings + - name: errorf + - name: exported + - name: increment-decrement + - name: indent-error-flow + - name: package-comments + - name: range + - name: receiver-naming + - name: redefines-builtin-id + - name: superfluous-else + - name: time-naming + - name: unexported-return + - name: unreachable-code + - name: unused-parameter + - name: use-any + - name: var-declaration + - name: var-naming issues: exclude-rules: diff --git a/cmd/contour/shutdownmanager.go b/cmd/contour/shutdownmanager.go index f9dfd7e7874..b41f748ac81 100644 --- a/cmd/contour/shutdownmanager.go +++ b/cmd/contour/shutdownmanager.go @@ -98,7 +98,7 @@ func newShutdownContext() *shutdownContext { } // healthzHandler handles the /healthz endpoint which is used for the shutdown-manager's liveness probe. -func (s *shutdownmanagerContext) healthzHandler(w http.ResponseWriter, r *http.Request) { +func (s *shutdownmanagerContext) healthzHandler(w http.ResponseWriter, _ *http.Request) { if _, err := w.Write([]byte(http.StatusText(http.StatusOK))); err != nil { s.WithField("context", "healthzHandler").Error(err) } diff --git a/internal/contour/handler.go b/internal/contour/handler.go index 6e2767fecae..2e9e6bee6eb 100644 --- a/internal/contour/handler.go +++ b/internal/contour/handler.go @@ -233,22 +233,22 @@ func (e *EventHandler) onUpdate(op any) bool { case opAdd: return e.builder.Source.Insert(op.obj) case opUpdate: - old, oldOk := op.oldObj.(client.Object) - new, newOk := op.newObj.(client.Object) + oldO, oldOk := op.oldObj.(client.Object) + newO, newOk := op.newObj.(client.Object) if oldOk && newOk { - equal, err := k8s.IsObjectEqual(old, new) + equal, err := k8s.IsObjectEqual(oldO, newO) // Error is returned if there was no support for comparing equality of the specific object type. // We can still process the object but it will be always considered as changed. if err != nil { e.WithError(err).WithField("op", "update"). - WithField("name", new.GetName()).WithField("namespace", new.GetNamespace()). - WithField("gvk", reflect.TypeOf(new)).Errorf("error comparing objects") + WithField("name", newO.GetName()).WithField("namespace", newO.GetNamespace()). + WithField("gvk", reflect.TypeOf(newO)).Errorf("error comparing objects") } if equal { // log the object name and namespace to help with debugging. e.WithField("op", "update"). - WithField("name", old.GetName()).WithField("namespace", old.GetNamespace()). - WithField("gvk", reflect.TypeOf(new)).Debugf("skipping update, no changes to relevant fields") + WithField("name", oldO.GetName()).WithField("namespace", oldO.GetNamespace()). + WithField("gvk", reflect.TypeOf(newO)).Debugf("skipping update, no changes to relevant fields") return false } remove := e.builder.Source.Remove(op.oldObj) diff --git a/internal/controller/gateway.go b/internal/controller/gateway.go index 930b789de6d..eeb40fe3045 100644 --- a/internal/controller/gateway.go +++ b/internal/controller/gateway.go @@ -188,7 +188,7 @@ func (r *gatewayReconciler) Reconcile(ctx context.Context, request reconcile.Req r.log.WithField("namespace", request.Namespace).WithField("name", request.Name).Info("reconciling gateway") var gatewayClasses gatewayapi_v1beta1.GatewayClassList - if err := r.client.List(context.Background(), &gatewayClasses); err != nil { + if err := r.client.List(ctx, &gatewayClasses); err != nil { return reconcile.Result{}, fmt.Errorf("error listing gateway classes") } @@ -219,7 +219,7 @@ func (r *gatewayReconciler) Reconcile(ctx context.Context, request reconcile.Req } var allGateways gatewayapi_v1beta1.GatewayList - if err := r.client.List(context.Background(), &allGateways); err != nil { + if err := r.client.List(ctx, &allGateways); err != nil { return reconcile.Result{}, fmt.Errorf("error listing gateways") } @@ -280,8 +280,8 @@ func (r *gatewayReconciler) Reconcile(ctx context.Context, request reconcile.Req }) } else { // this branch makes testing easier by not going through the StatusUpdater. - copy := setGatewayNotAccepted(gw.DeepCopy()) - if err := r.client.Status().Update(context.Background(), copy); err != nil { + gwCopy := setGatewayNotAccepted(gw.DeepCopy()) + if err := r.client.Status().Update(ctx, gwCopy); err != nil { r.log.WithError(err).Error("error updating gateway status") return reconcile.Result{}, fmt.Errorf("error updating status of gateway %s/%s: %v", gw.Namespace, gw.Name, err) } diff --git a/internal/controller/gatewayclass.go b/internal/controller/gatewayclass.go index 41fad5c61c2..429fc375481 100644 --- a/internal/controller/gatewayclass.go +++ b/internal/controller/gatewayclass.go @@ -136,7 +136,7 @@ func (r *gatewayClassReconciler) Reconcile(ctx context.Context, request reconcil r.log.WithField("name", request.Name).Info("reconciling gatewayclass") var gatewayClasses gatewayapi_v1beta1.GatewayClassList - if err := r.client.List(context.Background(), &gatewayClasses); err != nil { + if err := r.client.List(ctx, &gatewayClasses); err != nil { return reconcile.Result{}, fmt.Errorf("error listing gatewayclasses: %w", err) } @@ -177,15 +177,15 @@ func (r *gatewayClassReconciler) Reconcile(ctx context.Context, request reconcil panic(fmt.Sprintf("unsupported object type %T", obj)) } - return status.SetGatewayClassAccepted(context.Background(), r.client, gwc.DeepCopy(), accepted) + return status.SetGatewayClassAccepted(gwc.DeepCopy(), accepted) }), }) } else { // this branch makes testing easier by not going through the StatusUpdater. - copy := status.SetGatewayClassAccepted(context.Background(), r.client, gc.DeepCopy(), accepted) + gcCopy := status.SetGatewayClassAccepted(gc.DeepCopy(), accepted) - if err := r.client.Status().Update(context.Background(), copy); err != nil { - return fmt.Errorf("error updating status of gateway class %s: %v", copy.Name, err) + if err := r.client.Status().Update(ctx, gcCopy); err != nil { + return fmt.Errorf("error updating status of gateway class %s: %v", gcCopy.Name, err) } } return nil diff --git a/internal/dag/cache.go b/internal/dag/cache.go index ed79adfc572..132df4cf9fd 100644 --- a/internal/dag/cache.go +++ b/internal/dag/cache.go @@ -671,9 +671,8 @@ func (kc *KubernetesCache) LookupUpstreamValidation(uv *contour_api_v1.UpstreamV if err != nil { if _, ok := err.(DelegationNotPermittedError); ok { return nil, err - } else { - return nil, fmt.Errorf("invalid CA Secret %q: %s", caCertificate, err) } + return nil, fmt.Errorf("invalid CA Secret %q: %s", caCertificate, err) } if uv.SubjectName == "" { diff --git a/internal/dag/cache_test.go b/internal/dag/cache_test.go index 7dcb892a9ac..540885c17ba 100644 --- a/internal/dag/cache_test.go +++ b/internal/dag/cache_test.go @@ -1146,11 +1146,11 @@ func TestKubernetesCacheInsert(t *testing.T) { // correctly. type fakeReader struct{} -func (r *fakeReader) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { +func (r *fakeReader) Get(_ context.Context, _ client.ObjectKey, _ client.Object, _ ...client.GetOption) error { return errors.New("not implemented") } -func (r *fakeReader) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { +func (r *fakeReader) List(_ context.Context, _ client.ObjectList, _ ...client.ListOption) error { panic("not implemented") } diff --git a/internal/dag/policy.go b/internal/dag/policy.go index 85231724c05..39354334932 100644 --- a/internal/dag/policy.go +++ b/internal/dag/policy.go @@ -185,10 +185,9 @@ func headersPolicyRoute(policy *contour_api_v1.HeadersPolicy, allowHostRewrite b if extractedHostRewriteHeader := extractHostRewriteHeaderValue(entry.Value); extractedHostRewriteHeader != "" { hostRewriteHeader = http.CanonicalHeaderKey(extractedHostRewriteHeader) continue - } else { - hostRewrite = entry.Value - continue } + hostRewrite = entry.Value + continue } if msgs := validation.IsHTTPHeaderName(key); len(msgs) != 0 { return nil, fmt.Errorf("invalid set header %q: %v", key, msgs) diff --git a/internal/envoy/route.go b/internal/envoy/route.go index c10817fe306..ab3c5fe4a13 100644 --- a/internal/envoy/route.go +++ b/internal/envoy/route.go @@ -71,18 +71,16 @@ func SingleSimpleCluster(route *dag.Route) bool { // If the target cluster performs any kind of header manipulation, // then we should use a WeightedCluster to encode the additional // configuration. - if cluster.RequestHeadersPolicy == nil { - // no request headers policy - } else if len(cluster.RequestHeadersPolicy.Set) != 0 || - len(cluster.RequestHeadersPolicy.Add) != 0 || - len(cluster.RequestHeadersPolicy.Remove) != 0 || - len(cluster.RequestHeadersPolicy.HostRewrite) != 0 { + if cluster.RequestHeadersPolicy != nil && + (len(cluster.RequestHeadersPolicy.Set) != 0 || + len(cluster.RequestHeadersPolicy.Add) != 0 || + len(cluster.RequestHeadersPolicy.Remove) != 0 || + len(cluster.RequestHeadersPolicy.HostRewrite) != 0) { return false } - if cluster.ResponseHeadersPolicy == nil { - // no response headers policy - } else if len(cluster.ResponseHeadersPolicy.Set) != 0 || - len(cluster.ResponseHeadersPolicy.Remove) != 0 { + if cluster.ResponseHeadersPolicy != nil && + (len(cluster.ResponseHeadersPolicy.Set) != 0 || + len(cluster.ResponseHeadersPolicy.Remove) != 0) { return false } if len(cluster.CookieRewritePolicies) > 0 { diff --git a/internal/envoy/v3/bootstrap.go b/internal/envoy/v3/bootstrap.go index 904c5288f26..ba39f1e2366 100644 --- a/internal/envoy/v3/bootstrap.go +++ b/internal/envoy/v3/bootstrap.go @@ -241,7 +241,7 @@ func bootstrapConfig(c *envoy.BootstrapConfig) *envoy_bootstrap_v3.Bootstrap { LoadAssignment: &envoy_endpoint_v3.ClusterLoadAssignment{ ClusterName: "envoy-admin", Endpoints: Endpoints( - UnixSocketAddress(c.GetAdminAddress(), c.GetAdminPort()), + UnixSocketAddress(c.GetAdminAddress()), ), }, }}, @@ -252,7 +252,7 @@ func bootstrapConfig(c *envoy.BootstrapConfig) *envoy_bootstrap_v3.Bootstrap { }, Admin: &envoy_bootstrap_v3.Admin{ AccessLog: adminAccessLog(c.GetAdminAccessLogPath()), - Address: UnixSocketAddress(c.GetAdminAddress(), c.GetAdminPort()), + Address: UnixSocketAddress(c.GetAdminAddress()), }, } if c.MaximumHeapSizeBytes > 0 { diff --git a/internal/envoy/v3/listener.go b/internal/envoy/v3/listener.go index 5ac6e971598..73403867ff9 100644 --- a/internal/envoy/v3/listener.go +++ b/internal/envoy/v3/listener.go @@ -660,7 +660,7 @@ func TCPProxy(statPrefix string, proxy *dag.TCPProxy, accesslogger []*accesslog. } // UnixSocketAddress creates a new Unix Socket envoy_core_v3.Address. -func UnixSocketAddress(address string, port int) *envoy_core_v3.Address { +func UnixSocketAddress(address string) *envoy_core_v3.Address { return &envoy_core_v3.Address{ Address: &envoy_core_v3.Address_Pipe{ Pipe: &envoy_core_v3.Pipe{ diff --git a/internal/k8s/helpers.go b/internal/k8s/helpers.go index ed4363edd01..a0ac86baddc 100644 --- a/internal/k8s/helpers.go +++ b/internal/k8s/helpers.go @@ -94,22 +94,22 @@ func isStatusEqual(objA, objB any) bool { // // Make an attempt to avoid comparing full objects since it can be very CPU intensive. // Prefer comparing Generation when only interested in spec changes. -func IsObjectEqual(old, new client.Object) (bool, error) { +func IsObjectEqual(oldObj, newObj client.Object) (bool, error) { // Fast path for any object: when ResourceVersions are equal, the objects are equal. // NOTE: This optimizes the case when controller-runtime executes full sync and sends updates for all objects. - if isResourceVersionEqual(old, new) { + if isResourceVersionEqual(oldObj, newObj) { return true, nil } - switch old := old.(type) { + switch oldObj := oldObj.(type) { // Fast path for objects that implement Generation and where only spec changes matter. // Status/annotations/labels changes are ignored. // Generation is implemented in CRDs, Ingress and IngressClass. case *contour_api_v1alpha1.ExtensionService, *contour_api_v1.TLSCertificateDelegation: - return isGenerationEqual(old, new), nil + return isGenerationEqual(oldObj, newObj), nil case *gatewayapi_v1beta1.GatewayClass, *gatewayapi_v1beta1.Gateway, @@ -118,36 +118,36 @@ func IsObjectEqual(old, new client.Object) (bool, error) { *gatewayapi_v1alpha2.TLSRoute, *gatewayapi_v1alpha2.GRPCRoute, *gatewayapi_v1alpha2.TCPRoute: - return isGenerationEqual(old, new), nil + return isGenerationEqual(oldObj, newObj), nil // Slow path: compare the content of the objects. case *contour_api_v1.HTTPProxy, *networking_v1.Ingress: - return isGenerationEqual(old, new) && - apiequality.Semantic.DeepEqual(old.GetAnnotations(), new.GetAnnotations()), nil + return isGenerationEqual(oldObj, newObj) && + apiequality.Semantic.DeepEqual(oldObj.GetAnnotations(), newObj.GetAnnotations()), nil case *v1.Secret: - if new, ok := new.(*v1.Secret); ok { - return reflect.DeepEqual(old.Data, new.Data), nil + if newObj, ok := newObj.(*v1.Secret); ok { + return reflect.DeepEqual(oldObj.Data, newObj.Data), nil } case *v1.Service: - if new, ok := new.(*v1.Service); ok { - return apiequality.Semantic.DeepEqual(old.Spec, new.Spec) && - apiequality.Semantic.DeepEqual(old.Status, new.Status) && - apiequality.Semantic.DeepEqual(old.GetAnnotations(), new.GetAnnotations()), nil + if newObj, ok := newObj.(*v1.Service); ok { + return apiequality.Semantic.DeepEqual(oldObj.Spec, newObj.Spec) && + apiequality.Semantic.DeepEqual(oldObj.Status, newObj.Status) && + apiequality.Semantic.DeepEqual(oldObj.GetAnnotations(), newObj.GetAnnotations()), nil } case *v1.Endpoints: - if new, ok := new.(*v1.Endpoints); ok { - return apiequality.Semantic.DeepEqual(old.Subsets, new.Subsets), nil + if newObj, ok := newObj.(*v1.Endpoints); ok { + return apiequality.Semantic.DeepEqual(oldObj.Subsets, newObj.Subsets), nil } case *v1.Namespace: - if new, ok := new.(*v1.Namespace); ok { - return apiequality.Semantic.DeepEqual(old.Labels, new.Labels), nil + if newObj, ok := newObj.(*v1.Namespace); ok { + return apiequality.Semantic.DeepEqual(oldObj.Labels, newObj.Labels), nil } } // ResourceVersions are not equal and we don't know how to compare the object type. // This should never happen and indicates that new type was added to the code but is missing in the switch above. - return false, fmt.Errorf("do not know how to compare %T", new) + return false, fmt.Errorf("do not know how to compare %T", newObj) } func isGenerationEqual(a, b client.Object) bool { diff --git a/internal/k8s/helpers_test.go b/internal/k8s/helpers_test.go index 1f605d5e49d..362cfb097fd 100644 --- a/internal/k8s/helpers_test.go +++ b/internal/k8s/helpers_test.go @@ -96,12 +96,12 @@ func TestIsObjectEqual(t *testing.T) { assert.Equal(t, 2, len(objects), "expected 2 objects in file") // Decode the objects. - old, _, err := deserializer.Decode([]byte(objects[0]), nil, nil) + oldObj, _, err := deserializer.Decode([]byte(objects[0]), nil, nil) assert.NoError(t, err) - new, _, err := deserializer.Decode([]byte(objects[1]), nil, nil) + newObj, _, err := deserializer.Decode([]byte(objects[1]), nil, nil) assert.NoError(t, err) - got, err := IsObjectEqual(old.(client.Object), new.(client.Object)) + got, err := IsObjectEqual(oldObj.(client.Object), newObj.(client.Object)) assert.NoError(t, err) assert.Equal(t, tc.equals, got) }) @@ -109,7 +109,7 @@ func TestIsObjectEqual(t *testing.T) { } func TestIsEqualForResourceVersion(t *testing.T) { - old := &v1.Secret{ + oldS := &v1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: "test", Namespace: "default", @@ -120,23 +120,23 @@ func TestIsEqualForResourceVersion(t *testing.T) { }, } - new := old.DeepCopy() + newS := oldS.DeepCopy() // Objects with equal ResourceVersion should evaluate to true. - got, err := IsObjectEqual(old, new) + got, err := IsObjectEqual(oldS, newS) assert.NoError(t, err) assert.True(t, got) // Differences in data should be ignored. - new.Data["foo"] = []byte("baz") - got, err = IsObjectEqual(old, new) + newS.Data["foo"] = []byte("baz") + got, err = IsObjectEqual(oldS, newS) assert.NoError(t, err) assert.True(t, got) } // TestIsEqualFallback compares with ConfigMap objects, which are not supported. func TestIsEqualFallback(t *testing.T) { - old := &v1.ConfigMap{ + oldObj := &v1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: "test", Namespace: "default", @@ -147,37 +147,37 @@ func TestIsEqualFallback(t *testing.T) { }, } - new := old.DeepCopy() + newObj := oldObj.DeepCopy() // Any object (even unsupported types) with equal ResourceVersion should evaluate to true. - got, err := IsObjectEqual(old, new) + got, err := IsObjectEqual(oldObj, newObj) assert.NoError(t, err) assert.True(t, got) // Unsupported types with unequal ResourceVersion should return an error. - new.ResourceVersion = "456" - got, err = IsObjectEqual(old, new) + newObj.ResourceVersion = "456" + got, err = IsObjectEqual(oldObj, newObj) assert.Error(t, err) assert.False(t, got) } func TestIsEqualForGeneration(t *testing.T) { - run := func(t *testing.T, old client.Object) { + run := func(t *testing.T, oldObj client.Object) { t.Helper() - new := old.DeepCopyObject().(client.Object) + newObj := oldObj.DeepCopyObject().(client.Object) // Set different ResourceVersion to ensure that Generation is the only difference. - old.SetResourceVersion("123") - new.SetResourceVersion("456") + oldObj.SetResourceVersion("123") + newObj.SetResourceVersion("456") // Objects with equal Generation should evaluate to true. - got, err := IsObjectEqual(old, new) + got, err := IsObjectEqual(oldObj, newObj) assert.NoError(t, err) assert.True(t, got) // Objects with unequal Generation should evaluate to false. - new.SetGeneration(old.GetGeneration() + 1) - got, err = IsObjectEqual(old, new) + newObj.SetGeneration(oldObj.GetGeneration() + 1) + got, err = IsObjectEqual(oldObj, newObj) assert.NoError(t, err) assert.False(t, got) } diff --git a/internal/k8s/log.go b/internal/k8s/log.go index 6347878ecf9..ebc16b26150 100644 --- a/internal/k8s/log.go +++ b/internal/k8s/log.go @@ -138,6 +138,6 @@ func (l *alwaysEnabledLogSink) WithCallDepth(depth int) logr.LogSink { // Override Enabled to always return true since we rely on klog itself to do log // level filtering. -func (l *alwaysEnabledLogSink) Enabled(level int) bool { +func (l *alwaysEnabledLogSink) Enabled(_ int) bool { return true } diff --git a/internal/k8s/statusaddress.go b/internal/k8s/statusaddress.go index 3b5d2e0c002..8544e8c05f9 100644 --- a/internal/k8s/statusaddress.go +++ b/internal/k8s/statusaddress.go @@ -61,7 +61,7 @@ func (s *StatusAddressUpdater) Set(status v1.LoadBalancerStatus) { // OnAdd updates the given Ingress/HTTPProxy/Gateway object with the // current load balancer address. Note that this method can be called // concurrently from an informer or from Contour itself. -func (s *StatusAddressUpdater) OnAdd(obj any, isInInitialList bool) { +func (s *StatusAddressUpdater) OnAdd(obj any, _ bool) { // Hold the mutex to get a shallow copy. We don't need to // deep copy, since all the references are read-only. s.mu.Lock() @@ -191,7 +191,7 @@ func (s *StatusAddressUpdater) OnAdd(obj any, isInInitialList bool) { } } -func (s *StatusAddressUpdater) OnUpdate(oldObj, newObj any) { +func (s *StatusAddressUpdater) OnUpdate(_, newObj any) { // We only care about the new object, because we're only updating its status. // So, we can get away with just passing this call to OnAdd. @@ -199,7 +199,7 @@ func (s *StatusAddressUpdater) OnUpdate(oldObj, newObj any) { } -func (s *StatusAddressUpdater) OnDelete(obj any) { +func (s *StatusAddressUpdater) OnDelete(_ any) { // we don't need to update the status on resources that // have been deleted. } @@ -214,7 +214,7 @@ type ServiceStatusLoadBalancerWatcher struct { Log logrus.FieldLogger } -func (s *ServiceStatusLoadBalancerWatcher) OnAdd(obj any, isInInitialList bool) { +func (s *ServiceStatusLoadBalancerWatcher) OnAdd(obj any, _ bool) { svc, ok := obj.(*v1.Service) if !ok { // not a service @@ -230,7 +230,7 @@ func (s *ServiceStatusLoadBalancerWatcher) OnAdd(obj any, isInInitialList bool) s.notify(svc.Status.LoadBalancer) } -func (s *ServiceStatusLoadBalancerWatcher) OnUpdate(oldObj, newObj any) { +func (s *ServiceStatusLoadBalancerWatcher) OnUpdate(_, newObj any) { svc, ok := newObj.(*v1.Service) if !ok { // not a service diff --git a/internal/provisioner/objects/secret/secret.go b/internal/provisioner/objects/secret/secret.go index c29a1d31ecc..9d3bbfc2835 100644 --- a/internal/provisioner/objects/secret/secret.go +++ b/internal/provisioner/objects/secret/secret.go @@ -134,7 +134,7 @@ func EnsureXDSSecretsDeleted(ctx context.Context, cli client.Client, contour *mo }, } - if err := cli.Delete(context.Background(), s); err != nil && !errors.IsNotFound(err) { + if err := cli.Delete(ctx, s); err != nil && !errors.IsNotFound(err) { return err } } diff --git a/internal/status/gatewayclass.go b/internal/status/gatewayclass.go index 2cd162a8814..44c84f8c857 100644 --- a/internal/status/gatewayclass.go +++ b/internal/status/gatewayclass.go @@ -14,15 +14,12 @@ package status import ( - "context" - - "sigs.k8s.io/controller-runtime/pkg/client" gatewayapi_v1beta1 "sigs.k8s.io/gateway-api/apis/v1beta1" ) // SetGatewayClassAccepted inserts or updates the Accepted condition // for the provided GatewayClass. -func SetGatewayClassAccepted(ctx context.Context, cli client.Client, gc *gatewayapi_v1beta1.GatewayClass, accepted bool) *gatewayapi_v1beta1.GatewayClass { +func SetGatewayClassAccepted(gc *gatewayapi_v1beta1.GatewayClass, accepted bool) *gatewayapi_v1beta1.GatewayClass { gc.Status.Conditions = mergeConditions(gc.Status.Conditions, computeGatewayClassAcceptedCondition(gc, accepted)) return gc } diff --git a/internal/xds/v3/contour_test.go b/internal/xds/v3/contour_test.go index 3d631cefd77..99b07c93567 100644 --- a/internal/xds/v3/contour_test.go +++ b/internal/xds/v3/contour_test.go @@ -225,7 +225,7 @@ type mockResource struct { typeurl func() string } -func (m *mockResource) Contents() []proto.Message { return m.contents() } -func (m *mockResource) Query(names []string) []proto.Message { return m.query(names) } -func (m *mockResource) Register(ch chan int, last int, hints ...string) { m.register(ch, last) } -func (m *mockResource) TypeURL() string { return m.typeurl() } +func (m *mockResource) Contents() []proto.Message { return m.contents() } +func (m *mockResource) Query(names []string) []proto.Message { return m.query(names) } +func (m *mockResource) Register(ch chan int, last int, _ ...string) { m.register(ch, last) } +func (m *mockResource) TypeURL() string { return m.typeurl() } diff --git a/internal/xdscache/snapshot.go b/internal/xdscache/snapshot.go index 26fa69839ab..c652b487668 100644 --- a/internal/xdscache/snapshot.go +++ b/internal/xdscache/snapshot.go @@ -63,7 +63,7 @@ func (s *SnapshotHandler) Refresh() { } // OnChange is called when the DAG is rebuilt and a new snapshot is needed. -func (s *SnapshotHandler) OnChange(root *dag.DAG) { +func (s *SnapshotHandler) OnChange(_ *dag.DAG) { s.generateNewSnapshot() } diff --git a/internal/xdscache/v3/endpointstranslator.go b/internal/xdscache/v3/endpointstranslator.go index 48f539ef30a..fe15c3feb09 100644 --- a/internal/xdscache/v3/endpointstranslator.go +++ b/internal/xdscache/v3/endpointstranslator.go @@ -348,7 +348,7 @@ func equal(a, b map[string]*envoy_endpoint_v3.ClusterLoadAssignment) bool { return true } -func (e *EndpointsTranslator) OnAdd(obj any, isInInitialList bool) { +func (e *EndpointsTranslator) OnAdd(obj any, _ bool) { switch obj := obj.(type) { case *v1.Endpoints: if !e.cache.UpdateEndpoint(obj) { diff --git a/internal/xdscache/v3/runtime.go b/internal/xdscache/v3/runtime.go index b4124bfa217..bab794e1f04 100644 --- a/internal/xdscache/v3/runtime.go +++ b/internal/xdscache/v3/runtime.go @@ -28,28 +28,28 @@ type ConfigurableRuntimeSettings struct { } // RuntimeCache manages the contents of the gRPC RTDS cache. -type runtimeCache struct { +type RuntimeCache struct { contour.Cond runtimeKV map[string]*structpb.Value } // NewRuntimeCache builds a RuntimeCache with the provided runtime // settings that will be set in the runtime layer configured by Contour. -func NewRuntimeCache(runtimeSettings ConfigurableRuntimeSettings) *runtimeCache { +func NewRuntimeCache(runtimeSettings ConfigurableRuntimeSettings) *RuntimeCache { runtimeKV := make(map[string]*structpb.Value) if runtimeSettings.MaxRequestsPerIOCycle != nil && *runtimeSettings.MaxRequestsPerIOCycle > 0 { runtimeKV["http.max_requests_per_io_cycle"] = structpb.NewNumberValue(float64(*runtimeSettings.MaxRequestsPerIOCycle)) } - return &runtimeCache{runtimeKV: runtimeKV} + return &RuntimeCache{runtimeKV: runtimeKV} } // Contents returns all Runtime layers. -func (c *runtimeCache) Contents() []proto.Message { +func (c *RuntimeCache) Contents() []proto.Message { return protobuf.AsMessages(envoy_v3.RuntimeLayers(c.runtimeKV)) } // Query returns only the "dynamic" layer if requested, otherwise empty. -func (c *runtimeCache) Query(names []string) []proto.Message { +func (c *RuntimeCache) Query(names []string) []proto.Message { for _, name := range names { if name == envoy_v3.DynamicRuntimeLayerName { return protobuf.AsMessages(envoy_v3.RuntimeLayers(c.runtimeKV)) @@ -58,8 +58,8 @@ func (c *runtimeCache) Query(names []string) []proto.Message { return []proto.Message{} } -func (*runtimeCache) TypeURL() string { return resource.RuntimeType } +func (*RuntimeCache) TypeURL() string { return resource.RuntimeType } -func (c *runtimeCache) OnChange(root *dag.DAG) { +func (c *RuntimeCache) OnChange(_ *dag.DAG) { // DAG changes do not affect runtime layers at the moment. } diff --git a/test/e2e/incluster/incluster_test.go b/test/e2e/incluster/incluster_test.go index 89280ff5eaa..a4621ea95df 100644 --- a/test/e2e/incluster/incluster_test.go +++ b/test/e2e/incluster/incluster_test.go @@ -101,7 +101,7 @@ var _ = Describe("Incluster", func() { f.NamespacedTest("smoke-test", testSimpleSmoke) - f.NamespacedTest("leader-election", testLeaderElection) + testLeaderElection() f.NamespacedTest("projectcontour-resource-rbac", testProjectcontourResourcesRBAC) diff --git a/test/e2e/incluster/leaderelection_test.go b/test/e2e/incluster/leaderelection_test.go index 3458b33453e..2f42aa3c1f0 100644 --- a/test/e2e/incluster/leaderelection_test.go +++ b/test/e2e/incluster/leaderelection_test.go @@ -30,7 +30,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) -func testLeaderElection(namespace string) { +func testLeaderElection() { // This test is solely a check on the fact that we have set up leader // election resources as expected. This does not test that internal // components (e.g. status writers) are set up properly given a contour