diff --git a/.github/workflows/pr-golangci-lint.yaml b/.github/workflows/pr-golangci-lint.yaml index 9fb188962ff6..544ab381ae05 100644 --- a/.github/workflows/pr-golangci-lint.yaml +++ b/.github/workflows/pr-golangci-lint.yaml @@ -30,6 +30,6 @@ jobs: - name: golangci-lint uses: golangci/golangci-lint-action@aaa42aa0628b4ae2578232a66b541047968fac86 # tag=v6.1.0 with: - version: v1.59.0 + version: v1.60.2 args: --out-format=colored-line-number working-directory: ${{matrix.working-directory}} diff --git a/.golangci.yml b/.golangci.yml index 513c7c88c8d1..afe04e8ffa64 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -55,6 +55,10 @@ linters: - whitespace # unnecessary newlines linters-settings: + gosec: + excludes: + # integer overflow conversion int -> int32 + - G115 gci: sections: - standard # Standard section: captures all standard packages. @@ -348,3 +352,7 @@ issues: - gocritic text: "deferInLoop: Possible resource leak, 'defer' is called in the 'for' loop" path: _test\.go + # Ignore non-constant format string in call to condition utils + - linters: + - govet + text: "non-constant format string in call to sigs\\.k8s\\.io\\/cluster-api\\/util\\/conditions\\." diff --git a/cmd/clusterctl/cmd/describe_cluster.go b/cmd/clusterctl/cmd/describe_cluster.go index 7a7994376133..9b15556e668e 100644 --- a/cmd/clusterctl/cmd/describe_cluster.go +++ b/cmd/clusterctl/cmd/describe_cluster.go @@ -338,7 +338,7 @@ func getRowName(obj ctrlclient.Object) string { name := objName if objectPrefix := tree.GetMetaName(obj); objectPrefix != "" { - name = fmt.Sprintf("%s - %s", objectPrefix, gray.Sprintf(name)) + name = fmt.Sprintf("%s - %s", objectPrefix, gray.Sprintf("%s", name)) } if !obj.GetDeletionTimestamp().IsZero() { diff --git a/internal/controllers/machinedeployment/mdutil/util.go b/internal/controllers/machinedeployment/mdutil/util.go index 9f234b0d1da4..eb6c1c6ce4a0 100644 --- a/internal/controllers/machinedeployment/mdutil/util.go +++ b/internal/controllers/machinedeployment/mdutil/util.go @@ -120,16 +120,16 @@ func SetDeploymentRevision(deployment *clusterv1.MachineDeployment, revision str func MaxRevision(ctx context.Context, allMSs []*clusterv1.MachineSet) int64 { log := ctrl.LoggerFrom(ctx) - max := int64(0) + maxVal := int64(0) for _, ms := range allMSs { if v, err := Revision(ms); err != nil { // Skip the machine sets when it failed to parse their revision information log.Error(err, fmt.Sprintf("Couldn't parse revision for MachineSet %s, deployment controller will skip it when reconciling revisions", ms.Name)) - } else if v > max { - max = v + } else if v > maxVal { + maxVal = v } } - return max + return maxVal } // Revision returns the revision number of the input object. diff --git a/internal/controllers/machinehealthcheck/machinehealthcheck_controller.go b/internal/controllers/machinehealthcheck/machinehealthcheck_controller.go index 7dc2428b355f..5c09a39c7069 100644 --- a/internal/controllers/machinehealthcheck/machinehealthcheck_controller.go +++ b/internal/controllers/machinehealthcheck/machinehealthcheck_controller.go @@ -580,13 +580,13 @@ func isAllowedRemediation(mhc *clusterv1.MachineHealthCheck) (bool, int32, error var remediationAllowed bool var remediationCount int32 if mhc.Spec.UnhealthyRange != nil { - min, max, err := getUnhealthyRange(mhc) + minVal, maxVal, err := getUnhealthyRange(mhc) if err != nil { return false, 0, err } unhealthyMachineCount := unhealthyMachineCount(mhc) - remediationAllowed = unhealthyMachineCount >= min && unhealthyMachineCount <= max - remediationCount = int32(max - unhealthyMachineCount) + remediationAllowed = unhealthyMachineCount >= minVal && unhealthyMachineCount <= maxVal + remediationCount = int32(maxVal - unhealthyMachineCount) return remediationAllowed, remediationCount, nil } @@ -610,21 +610,21 @@ func getUnhealthyRange(mhc *clusterv1.MachineHealthCheck) (int, int, error) { parts := strings.Split(unhealthyRange, "-") - min, err := strconv.ParseUint(parts[0], 10, 32) + minVal, err := strconv.ParseUint(parts[0], 10, 32) if err != nil { return 0, 0, err } - max, err := strconv.ParseUint(parts[1], 10, 32) + maxVal, err := strconv.ParseUint(parts[1], 10, 32) if err != nil { return 0, 0, err } - if max < min { - return 0, 0, errors.Errorf("max value %d cannot be less than min value %d for unhealthyRange", max, min) + if maxVal < minVal { + return 0, 0, errors.Errorf("max value %d cannot be less than min value %d for unhealthyRange", maxVal, minVal) } - return int(min), int(max), nil + return int(minVal), int(maxVal), nil } func getMaxUnhealthy(mhc *clusterv1.MachineHealthCheck) (int, error) { diff --git a/internal/controllers/machinehealthcheck/machinehealthcheck_targets_test.go b/internal/controllers/machinehealthcheck/machinehealthcheck_targets_test.go index a95b895e7597..a44b338d2e33 100644 --- a/internal/controllers/machinehealthcheck/machinehealthcheck_targets_test.go +++ b/internal/controllers/machinehealthcheck/machinehealthcheck_targets_test.go @@ -406,7 +406,7 @@ func TestHealthCheckTargets(t *testing.T) { machineFailureMsgCondition := newFailedHealthCheckCondition(clusterv1.MachineHasFailureReason, "FailureMessage: %s", failureMsg) // Target for when the machine has the remediate machine annotation - annotationRemediationMsg := "Marked for remediation via remediate-machine annotation" + const annotationRemediationMsg = "Marked for remediation via remediate-machine annotation" testMachineAnnotationRemediation := testMachine.DeepCopy() testMachineAnnotationRemediation.Annotations = map[string]string{clusterv1.RemediateMachineAnnotation: ""} machineAnnotationRemediation := healthCheckTarget{ diff --git a/test/framework/docker_logcollector.go b/test/framework/docker_logcollector.go index 6b63062d1f5b..4225b330c717 100644 --- a/test/framework/docker_logcollector.go +++ b/test/framework/docker_logcollector.go @@ -147,7 +147,7 @@ func (k DockerLogCollector) collectLogsFromNode(ctx context.Context, outputPath "tar", "--hard-dereference", "--dereference", "--directory", containerDir, "--create", "--file", "-", ".", ) if err != nil { - return errors.Wrapf(err, execErr) + return errors.Wrap(err, execErr) } err = os.MkdirAll(outputDir, 0750) diff --git a/test/infrastructure/docker/internal/docker/loadbalancer.go b/test/infrastructure/docker/internal/docker/loadbalancer.go index f9a3c16117d9..c52b5002ed47 100644 --- a/test/infrastructure/docker/internal/docker/loadbalancer.go +++ b/test/infrastructure/docker/internal/docker/loadbalancer.go @@ -65,7 +65,10 @@ func NewLoadBalancer(ctx context.Context, cluster *clusterv1.Cluster, dockerClus return nil, err } - ipFamily, err := cluster.GetIPFamily() //nolint:staticcheck // We tolerate this until removal; after removal IPFamily will become an internal CAPD concept. See https://github.com/kubernetes-sigs/cluster-api/issues/7521. + // We tolerate this until removal; + // after removal IPFamily will become an internal CAPD concept. + // See https://github.com/kubernetes-sigs/cluster-api/issues/7521. + ipFamily, err := cluster.GetIPFamily() //nolint:staticcheck if err != nil { return nil, fmt.Errorf("create load balancer: %s", err) } diff --git a/test/infrastructure/docker/internal/docker/machine.go b/test/infrastructure/docker/internal/docker/machine.go index b354ed894d27..332e9b15f7e9 100644 --- a/test/infrastructure/docker/internal/docker/machine.go +++ b/test/infrastructure/docker/internal/docker/machine.go @@ -92,7 +92,10 @@ func NewMachine(ctx context.Context, cluster *clusterv1.Cluster, machine string, return nil, err } - ipFamily, err := cluster.GetIPFamily() //nolint:staticcheck // We tolerate this until removal; after removal IPFamily will become an internal CAPD concept. See https://github.com/kubernetes-sigs/cluster-api/issues/7521. + // We tolerate this until removal; + // after removal IPFamily will become an internal CAPD concept. + // See https://github.com/kubernetes-sigs/cluster-api/issues/7521. + ipFamily, err := cluster.GetIPFamily() //nolint:staticcheck if err != nil { return nil, fmt.Errorf("create docker machine: %s", err) } @@ -126,7 +129,10 @@ func ListMachinesByCluster(ctx context.Context, cluster *clusterv1.Cluster, labe return nil, err } - ipFamily, err := cluster.GetIPFamily() //nolint:staticcheck // We tolerate this until removal; after removal IPFamily will become an internal CAPD concept. See https://github.com/kubernetes-sigs/cluster-api/issues/7521. + // We tolerate this until removal; + // after removal IPFamily will become an internal CAPD concept. + // See https://github.com/kubernetes-sigs/cluster-api/issues/7521 . + ipFamily, err := cluster.GetIPFamily() //nolint:staticcheck if err != nil { return nil, fmt.Errorf("list docker machines by cluster: %s", err) } diff --git a/test/infrastructure/inmemory/pkg/runtime/cache/client.go b/test/infrastructure/inmemory/pkg/runtime/cache/client.go index ff01f7523095..a06fd17acf2b 100644 --- a/test/infrastructure/inmemory/pkg/runtime/cache/client.go +++ b/test/infrastructure/inmemory/pkg/runtime/cache/client.go @@ -212,9 +212,8 @@ func (c *cache) store(resourceGroup string, obj client.Object, replaceExisting b return apierrors.NewConflict(unsafeGuessGroupVersionResource(objGVK).GroupResource(), objKey.String(), fmt.Errorf("object has been modified")) } - if err := c.beforeUpdate(resourceGroup, trackedObj, obj); err != nil { - return err - } + c.beforeUpdate(resourceGroup, trackedObj, obj) + tracker.objects[objGVK][objKey] = obj.DeepCopyObject().(client.Object) updateTrackerOwnerReferences(tracker, trackedObj, obj, objRef) c.afterUpdate(resourceGroup, trackedObj, obj) @@ -227,9 +226,8 @@ func (c *cache) store(resourceGroup string, obj client.Object, replaceExisting b return apierrors.NewNotFound(unsafeGuessGroupVersionResource(objGVK).GroupResource(), objKey.String()) } - if err := c.beforeCreate(resourceGroup, obj); err != nil { - return err - } + c.beforeCreate(resourceGroup, obj) + tracker.objects[objGVK][objKey] = obj.DeepCopyObject().(client.Object) updateTrackerOwnerReferences(tracker, nil, obj, objRef) c.afterCreate(resourceGroup, obj) @@ -424,9 +422,7 @@ func (c *cache) doTryDeleteLocked(resourceGroup string, tracker *resourceGroupTr oldObj := obj.DeepCopyObject().(client.Object) now := metav1.Time{Time: time.Now().UTC()} obj.SetDeletionTimestamp(&now) - if err := c.beforeUpdate(resourceGroup, oldObj, obj); err != nil { - return false, apierrors.NewBadRequest(err.Error()) - } + c.beforeUpdate(resourceGroup, oldObj, obj) objects[objKey] = obj c.afterUpdate(resourceGroup, oldObj, obj) diff --git a/test/infrastructure/inmemory/pkg/runtime/cache/hooks.go b/test/infrastructure/inmemory/pkg/runtime/cache/hooks.go index 44b65e1d1c8c..9b840bad40af 100644 --- a/test/infrastructure/inmemory/pkg/runtime/cache/hooks.go +++ b/test/infrastructure/inmemory/pkg/runtime/cache/hooks.go @@ -27,20 +27,19 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) -func (c *cache) beforeCreate(_ string, obj client.Object) error { +func (c *cache) beforeCreate(_ string, obj client.Object) { now := time.Now().UTC() obj.SetCreationTimestamp(metav1.Time{Time: now}) // TODO: UID obj.SetAnnotations(appendAnnotations(obj, lastSyncTimeAnnotation, now.Format(time.RFC3339))) obj.SetResourceVersion(fmt.Sprintf("v%d", 1)) - return nil } func (c *cache) afterCreate(resourceGroup string, obj client.Object) { c.informCreate(resourceGroup, obj) } -func (c *cache) beforeUpdate(_ string, oldObj, newObj client.Object) error { +func (c *cache) beforeUpdate(_ string, oldObj, newObj client.Object) { newObj.SetCreationTimestamp(oldObj.GetCreationTimestamp()) newObj.SetResourceVersion(oldObj.GetResourceVersion()) // TODO: UID @@ -55,7 +54,6 @@ func (c *cache) beforeUpdate(_ string, oldObj, newObj client.Object) error { oldResourceVersion, _ := strconv.Atoi(strings.TrimPrefix(oldObj.GetResourceVersion(), "v")) newObj.SetResourceVersion(fmt.Sprintf("v%d", oldResourceVersion+1)) } - return nil } func (c *cache) afterUpdate(resourceGroup string, oldObj, newObj client.Object) {