diff --git a/.golangci.yml b/.golangci.yml index 16d3e83ba4..f77b17e7c5 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -12,371 +12,59 @@ # See the License for the specific language governing permissions and # limitations under the License. -# This file contains all available configuration options -# with their default values. - -# options for analysis running run: - # default concurrency is a available CPU number - concurrency: 4 - - # timeout for analysis, e.g. 30s, 5m, default is 1m - timeout: 1m - - # exit code when at least one issue was found, default is 1 - issues-exit-code: 1 - - # include test files or not, default is true - tests: true - - # list of build tags, all linters use it. Default is empty list. - build-tags: - - mytag - - # which dirs to skip: issues from them won't be reported; - # can use regexp here: generated.*, regexp is applied on full path; - # default value is empty list, but default dirs are skipped independently - # from this option's value (see skip-dirs-use-default). - # "/" will be replaced by current OS file path separator to properly work - # on Windows. - skip-dirs: - - src/external_libs - - autogenerated_by_my_lib - - # default is true. Enables skipping of directories: - # vendor$, third_party$, testdata$, examples$, Godeps$, builtin$ - skip-dirs-use-default: true - - # which files to skip: they will be analyzed, but issues from them - # won't be reported. Default value is empty list, but there is - # no need to include all autogenerated files, we confidently recognize - # autogenerated files. If it's not please let us know. - # "/" will be replaced by current OS file path separator to properly work - # on Windows. - skip-files: - - ".*\\.my\\.go$" - - lib/bad.go - - # by default isn't set. If set we pass it to "go list -mod={option}". From "go help modules": - # If invoked with -mod=readonly, the go command is disallowed from the implicit - # automatic updating of go.mod described above. Instead, it fails when any changes - # to go.mod are needed. This setting is most useful to check that go.mod does - # not need updates, such as in a continuous integration and testing system. - # If invoked with -mod=vendor, the go command assumes that the vendor - # directory holds the correct copies of dependencies and ignores - # the dependency descriptions in go.mod. modules-download-mode: readonly - - # Allow multiple parallel golangci-lint instances running. - # If false (default) - golangci-lint acquires file lock on start. - allow-parallel-runners: false - - -# output configuration options -output: - # colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number" - format: colored-line-number - - # print lines of code with issue, default is true - print-issued-lines: true - - # print linter name in the end of issue text, default is true - print-linter-name: true - - # make issues output unique by line, default is true - uniq-by-line: true - - # add a prefix to the output file references; default is no prefix - path-prefix: "" - - -# all available settings of specific linters +issues: + exclude-rules: + # gosec recommends ignoring test files + - path: (.+)_test.go + linters: + - gosec + - path: tests/e2e + linters: + - gosec linters-settings: - dogsled: - # checks assignments with too many blank identifiers; default is 2 - max-blank-identifiers: 2 - dupl: - # tokens count to trigger issue, 150 by default - threshold: 100 - errcheck: - # report about not checking of errors in type assertions: `a := b.(MyStruct)`; - # default is false: such cases aren't reported by default. - check-type-assertions: false - - # report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`; - # default is false: such cases aren't reported by default. - check-blank: false - - # [deprecated] comma-separated list of pairs of the form pkg:regex - # the regex is used to ignore names within pkg. (default "fmt:.*"). - # see https://github.com/kisielk/errcheck#the-deprecated-method for details - ignore: fmt:.*,io/ioutil:^Read.* - - # path to a file containing a list of functions to exclude from checking - # see https://github.com/kisielk/errcheck#excluding-functions for details - #exclude: - exhaustive: - # indicates that switch statements are to be considered exhaustive if a - # 'default' case is present, even if all enum members aren't listed in the - # switch - default-signifies-exhaustive: false - funlen: - lines: 60 - statements: 40 - gocognit: - # minimal code complexity to report, 30 by default (but we recommend 10-20) - min-complexity: 10 - nestif: - # minimal complexity of if statements to report, 5 by default - min-complexity: 4 - goconst: - # minimal length of string constant, 3 by default - min-len: 3 - # minimal occurrences count to trigger, 3 by default - min-occurrences: 3 - gocritic: - # Which checks should be enabled; can't be combined with 'disabled-checks'; - # See https://go-critic.github.io/overview#checks-overview - # To check which checks are enabled run `GL_DEBUG=gocritic golangci-lint run` - # By default list of stable checks is used. - #enabled-checks: - # - rangeValCopy - - # Which checks should be disabled; can't be combined with 'enabled-checks'; default is empty - disabled-checks: - - regexpMust - - # Enable multiple checks by tags, run `GL_DEBUG=gocritic golangci-lint run` to see all tags and checks. - # Empty list by default. See https://github.com/go-critic/go-critic#usage -> section "Tags". - enabled-tags: - - performance - disabled-tags: - - experimental - - settings: # settings passed to gocritic - captLocal: # must be valid enabled check name - paramsOnly: true - # rangeValCopy: - # sizeThreshold: 32 - gocyclo: - # minimal code complexity to report, 30 by default (but we recommend 10-20) - min-complexity: 10 - godot: - # check all top-level comments, not only declarations - check-all: false - godox: - # report any comments starting with keywords, this is useful for TODO or FIXME comments that - # might be left in the code accidentally and should be resolved before merging - keywords: # default keywords are TODO, BUG, and FIXME, these can be overwritten by this setting - - NOTE - - OPTIMIZE # marks code that should be optimized before merging - - HACK # marks hack-arounds that should be removed before merging - gofmt: - # simplify code: gofmt with `-s` option, true by default - simplify: true - goheader: - values: - const: - # define here const type values in format k:v, for example: - # YEAR: 2020 - # COMPANY: MY COMPANY - regexp: - # define here regexp type values, for example - # AUTHOR: .*@mycompany\.com - template: - # put here copyright header template for source code files, for example: - # {{ AUTHOR }} {{ COMPANY }} {{ YEAR }} - # SPDX-License-Identifier: Apache-2.0 - # - # Licensed under the Apache License, Version 2.0 (the "License"); - # you may not use this file except in compliance with the License. - # You may obtain a copy of the License at: - # - # http://www.apache.org/licenses/LICENSE-2.0 - # - # Unless required by applicable law or agreed to in writing, software - # distributed under the License is distributed on an "AS IS" BASIS, - # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - # See the License for the specific language governing permissions and - # limitations under the License. - template-path: - # also as alternative of directive 'template' you may put the path to file with the template source - goimports: - # put imports beginning with prefix after 3rd-party packages; - # it's a comma-separated list of prefixes - local-prefixes: github.com/org/project - golint: - # minimal confidence for issues, default is 0.8 - min-confidence: 0.8 - gomnd: - settings: - mnd: - # the list of enabled checks, see https://github.com/tommy-muehle/go-mnd/#checks for description. - checks: argument,case,condition,operation,return,assign - gomodguard: - allowed: - modules: # List of allowed modules - # - gopkg.in/yaml.v2 - domains: # List of allowed module domains - # - golang.org - blocked: - modules: # List of blocked modules - # - github.com/uudashr/go-module: # Blocked module - # recommendations: # Recommended modules that should be used instead (Optional) - # - golang.org/x/mod - # reason: "`mod` is the official go.mod parser library." # Reason why the recommended module should be used (Optional) - versions: # List of blocked module version constraints - # - github.com/mitchellh/go-homedir: # Blocked module with version constraint - # version: "< 1.1.0" # Version constraint, see https://github.com/Masterminds/semver#basic-comparisons - # reason: "testing if blocked version constraint works." # Reason why the version constraint exists. (Optional) - govet: - # report about shadowed variables - check-shadowing: true - - # settings per analyzer - settings: - printf: # analyzer name, run `go tool vet help` to see all analyzers - funcs: # run `go tool vet help printf` to see available settings for `printf` analyzer - - (github.com/golangci/golangci-lint/pkg/logutils.Log).Infof - - (github.com/golangci/golangci-lint/pkg/logutils.Log).Warnf - - (github.com/golangci/golangci-lint/pkg/logutils.Log).Errorf - - (github.com/golangci/golangci-lint/pkg/logutils.Log).Fatalf - - # enable or disable analyzers by name - enable: - - atomicalign - enable-all: false - disable: - - shadow - disable-all: false - depguard: - list-type: blacklist - include-go-root: false - packages: - - github.com/sirupsen/logrus - packages-with-error-message: - # specify an error message to output when a blacklisted package is used - - github.com/sirupsen/logrus: "logging is allowed only by logutils.Log" - lll: - # max line length, lines longer will be reported. Default is 120. - # '\t' is counted as 1 character by default, and can be changed with the tab-width option - line-length: 120 - # tab width in spaces. Default to 1. - tab-width: 1 - maligned: - # print struct with more effective memory layout or not, false by default - suggest-new: true - misspell: - # Correct spellings using locale preferences for US or UK. - # Default is to use a neutral variety of English. - # Setting locale to US will correct the British spelling of 'colour' to 'color'. - locale: US - ignore-words: - - someword - nakedret: - # make an issue if func has more lines of code than this setting and it has naked returns; default is 30 - max-func-lines: 30 - prealloc: - # XXX: we don't recommend using this linter before doing performance profiling. - # For most programs usage of prealloc will be a premature optimization. - - # Report preallocation suggestions only on simple loops that have no returns/breaks/continues/gotos in them. - # True by default. - simple: true - range-loops: true # Report preallocation suggestions on range loops, true by default - for-loops: false # Report preallocation suggestions on for loops, false by default - nolintlint: - # Enable to ensure that nolint directives are all used. Default is true. - allow-unused: false - # Disable to ensure that nolint directives don't have a leading space. Default is true. - allow-leading-space: true - # Exclude following linters from requiring an explanation. Default is []. - allow-no-explanation: [] - # Enable to require an explanation of nonzero length after each nolint directive. Default is false. - require-explanation: true - # Enable to require nolint directives to mention the specific linter being suppressed. Default is false. - require-specific: true - rowserrcheck: - packages: - - github.com/jmoiron/sqlx - testpackage: - # regexp pattern to skip files - skip-regexp: (export|internal)_test\.go - unparam: - # Inspect exported functions, default is false. Set to true if no external program/library imports your code. - # XXX: if you enable this setting, unparam will report a lot of false-positives in text editors: - # if it's called for subdir of a project it can't find external interfaces. All text editor integrations - # with golangci-lint call it on a directory with the changed file. - check-exported: false - unused: - # treat code as a program (not a library) and report unused exported identifiers; default is false. - # XXX: if you enable this setting, unused will report a lot of false-positives in text editors: - # if it's called for subdir of a project it can't find funcs usages. All text editor integrations - # with golangci-lint call it on a directory with the changed file. - check-exported: false - whitespace: - multi-if: false # Enforces newlines (or comments) after every multi-line if statement - multi-func: false # Enforces newlines (or comments) after every multi-line function signature - wsl: - # If true append is only allowed to be cuddled if appending value is - # matching variables, fields or types on line above. Default is true. - strict-append: true - # Allow calls and assignments to be cuddled as long as the lines have any - # matching variables, fields or types. Default is true. - allow-assign-and-call: true - # Allow multiline assignments to be cuddled. Default is true. - allow-multiline-assign: true - # Allow declarations (var) to be cuddled. - allow-cuddle-declarations: false - # Allow trailing comments in ending of blocks - allow-trailing-comment: false - # Force newlines in end of case at this limit (0 = never). - force-case-trailing-whitespace: 0 - # Force cuddling of err checks with err var assignment - force-err-cuddling: false - # Allow leading comments to be separated with empty liens - allow-separated-leading-comment: false - gofumpt: - # Choose whether or not to use the extra rules that are disabled - # by default - extra-rules: false - + revive: + rules: + # Using += 1 instead of ++ is fine + - name: increment-decrement + disabled: true + stylecheck: + # Dot importing ginkgo and gomega is standard practice + dot-import-whitelist: + - "github.com/onsi/gomega" + - "github.com/onsi/ginkgo/v2" linters: - enable: - - megacheck - - govet + enable-all: true disable: - - maligned - - prealloc - - scopelint - - gosec - disable-all: false - presets: - - bugs - - unused - fast: false - -severity: - # Default value is empty string. - # Set the default severity for issues. If severity rules are defined and the issues - # do not match or no severity is provided to the rule this will be the default - # severity applied. Severities should match the supported severity names of the - # selected out format. - # - Code climate: https://docs.codeclimate.com/docs/issues#issue-severity - # - Checkstyle: https://checkstyle.sourceforge.io/property_types.html#severity - # - Github: https://help.github.com/en/actions/reference/workflow-commands-for-github-actions#setting-an-error-message - default-severity: error - - # The default value is false. - # If set to true severity-rules regular expressions become case sensitive. - case-sensitive: false - - # Default value is empty list. - # When a list of severity rules are provided, severity information will be added to lint - # issues. Severity rules have the same filtering capability as exclude rules except you - # are allowed to specify one matcher per severity rule. - # Only affects out formats that support setting severity information. - rules: - - linters: - - dupl - severity: info + - govet # We already run with `make verify/govet` + # We do not use + - cyclop # Cyclomatic complexity + - depguard # We don't guard against dependencies + - dupl # Tracks code duplication. Too much duplication in tests. False positives in non-tests + - execinquery # Deprecated but enabled-by-default + - exhaustruct # Explicitly instantiating all structs is painful for K8s structs + - exportloopref # Deprecated but enabled-by-default + - funlen # Long func names happen + - gocognit # Cognitive complexity + - gocyclo # Cyclomatic complexity + - gofumpt # We don't rely on gofumpt + - gomnd # Magic Number Detection. Many false positives. + - gomoddirectives # We need `replace` in `go.mod` + - interfacebloat # No more than 10 interface methods + - ireturn # Accept interfaces return concrete types + - lll # Limit line length + - maintidx # Maintainability index + - mnd # Magic Number Detection. Many false positives + - nestif # Don't allow too many nested if statements + - nlreturn # Always have empty line before return + - testpackage # Require separate test package to catch leaky unexported dependencies + - varnamelen # Long var names happen + - wsl # Too strict of a whitespace linter + # Consider adding in future + - err113 # Do not create errors dynamically from scratch. Instead, wrap static (package-level) error. + - wrapcheck # Same as err113 + - gochecknoglobals # Do not allow global variables + - godox # Do not allow TODOs + - nonamedreturns # Need to nolint/refactor a few places our code + - paralleltest # There are many tests that aren't parallelized diff --git a/cmd/hooks/prestop.go b/cmd/hooks/prestop.go index 2f84424822..d948673fd3 100644 --- a/cmd/hooks/prestop.go +++ b/cmd/hooks/prestop.go @@ -16,6 +16,7 @@ package hooks import ( "context" + "errors" "fmt" "os" @@ -46,7 +47,7 @@ const clusterAutoscalerTaint = "ToBeDeletedByClusterAutoscaler" const v1KarpenterTaint = "karpenter.sh/disrupted" const v1beta1KarpenterTaint = "karpenter.sh/disruption" -// drainTaints includes taints used by K8s or autoscalers that signify node draining or pod eviction +// drainTaints includes taints used by K8s or autoscalers that signify node draining or pod eviction. var drainTaints = map[string]struct{}{ v1.TaintNodeUnschedulable: {}, // Kubernetes common eviction taint (kubectl drain) clusterAutoscalerTaint: {}, @@ -59,18 +60,19 @@ func PreStop(clientset kubernetes.Interface) error { nodeName := os.Getenv("CSI_NODE_NAME") if nodeName == "" { - return fmt.Errorf("PreStop: CSI_NODE_NAME missing") + return errors.New("PreStop: CSI_NODE_NAME missing") } node, err := fetchNode(clientset, nodeName) - if k8serrors.IsNotFound(err) { + switch { + case k8serrors.IsNotFound(err): klog.InfoS("PreStop: node does not exist - assuming this is a termination event, checking for remaining VolumeAttachments", "node", nodeName) - } else if err != nil { + case err != nil: return err - } else if !isNodeBeingDrained(node) { + case !isNodeBeingDrained(node): klog.InfoS("PreStop: node is not being drained, skipping VolumeAttachments check", "node", nodeName) return nil - } else { + default: klog.InfoS("PreStop: node is being drained, checking for remaining VolumeAttachments", "node", nodeName) } @@ -104,7 +106,10 @@ func waitForVolumeAttachments(clientset kubernetes.Interface, nodeName string) e _, err := informer.AddEventHandler(cache.ResourceEventHandlerFuncs{ DeleteFunc: func(obj interface{}) { klog.V(5).InfoS("DeleteFunc: VolumeAttachment deleted", "node", nodeName) - va := obj.(*storagev1.VolumeAttachment) + va, ok := obj.(*storagev1.VolumeAttachment) + if !ok { + klog.Error("DeleteFunc: error asserting object as type VolumeAttachment", "obj", va) + } if va.Spec.NodeName == nodeName { if err := checkVolumeAttachments(clientset, nodeName, allAttachmentsDeleted); err != nil { klog.ErrorS(err, "DeleteFunc: error checking VolumeAttachments") @@ -113,7 +118,10 @@ func waitForVolumeAttachments(clientset kubernetes.Interface, nodeName string) e }, UpdateFunc: func(oldObj, newObj interface{}) { klog.V(5).InfoS("UpdateFunc: VolumeAttachment updated", "node", nodeName) - va := newObj.(*storagev1.VolumeAttachment) + va, ok := newObj.(*storagev1.VolumeAttachment) + if !ok { + klog.Error("UpdateFunc: error asserting object as type VolumeAttachment", "obj", va) + } if va.Spec.NodeName == nodeName { if err := checkVolumeAttachments(clientset, nodeName, allAttachmentsDeleted); err != nil { klog.ErrorS(err, "UpdateFunc: error checking VolumeAttachments") diff --git a/cmd/hooks/prestop_test.go b/cmd/hooks/prestop_test.go index 9b72ee1b26..49df9cd3e6 100644 --- a/cmd/hooks/prestop_test.go +++ b/cmd/hooks/prestop_test.go @@ -15,7 +15,7 @@ package hooks import ( - "fmt" + "errors" "testing" "github.com/golang/mock/gomock" @@ -37,7 +37,7 @@ func TestPreStopHook(t *testing.T) { { name: "TestPreStopHook: CSI_NODE_NAME not set", nodeName: "", - expErr: fmt.Errorf("PreStop: CSI_NODE_NAME missing"), + expErr: errors.New("PreStop: CSI_NODE_NAME missing"), mockFunc: func(nodeName string, mockClient *driver.MockKubernetesClient, mockCoreV1 *driver.MockCoreV1Interface, mockNode *driver.MockNodeInterface, mockStorageV1 *driver.MockVolumeAttachmentInterface, mockStorageV1Interface *driver.MockStorageV1Interface) error { return nil }, @@ -45,11 +45,11 @@ func TestPreStopHook(t *testing.T) { { name: "TestPreStopHook: failed to retrieve node information", nodeName: "test-node", - expErr: fmt.Errorf("fetchNode: failed to retrieve node information: non-existent node"), + expErr: errors.New("fetchNode: failed to retrieve node information: non-existent node"), mockFunc: func(nodeName string, mockClient *driver.MockKubernetesClient, mockCoreV1 *driver.MockCoreV1Interface, mockNode *driver.MockNodeInterface, mockStorageV1 *driver.MockVolumeAttachmentInterface, mockStorageV1Interface *driver.MockStorageV1Interface) error { mockClient.EXPECT().CoreV1().Return(mockCoreV1).Times(1) mockCoreV1.EXPECT().Nodes().Return(mockNode).Times(1) - mockNode.EXPECT().Get(gomock.Any(), gomock.Eq(nodeName), gomock.Any()).Return(nil, fmt.Errorf("non-existent node")).Times(1) + mockNode.EXPECT().Get(gomock.Any(), gomock.Eq(nodeName), gomock.Any()).Return(nil, errors.New("non-existent node")).Times(1) return nil }, @@ -77,7 +77,6 @@ func TestPreStopHook(t *testing.T) { nodeName: "test-node", expErr: nil, mockFunc: func(nodeName string, mockClient *driver.MockKubernetesClient, mockCoreV1 *driver.MockCoreV1Interface, mockNode *driver.MockNodeInterface, mockVolumeAttachments *driver.MockVolumeAttachmentInterface, mockStorageV1Interface *driver.MockStorageV1Interface) error { - fakeNode := &v1.Node{ Spec: v1.NodeSpec{ Taints: []v1.Taint{ @@ -109,7 +108,6 @@ func TestPreStopHook(t *testing.T) { nodeName: "test-node", expErr: nil, mockFunc: func(nodeName string, mockClient *driver.MockKubernetesClient, mockCoreV1 *driver.MockCoreV1Interface, mockNode *driver.MockNodeInterface, mockVolumeAttachments *driver.MockVolumeAttachmentInterface, mockStorageV1Interface *driver.MockStorageV1Interface) error { - fakeNode := &v1.Node{ Spec: v1.NodeSpec{ Taints: []v1.Taint{ @@ -149,7 +147,6 @@ func TestPreStopHook(t *testing.T) { nodeName: "test-node", expErr: nil, mockFunc: func(nodeName string, mockClient *driver.MockKubernetesClient, mockCoreV1 *driver.MockCoreV1Interface, mockNode *driver.MockNodeInterface, mockVolumeAttachments *driver.MockVolumeAttachmentInterface, mockStorageV1Interface *driver.MockStorageV1Interface) error { - fakeNode := &v1.Node{ Spec: v1.NodeSpec{ Taints: []v1.Taint{ @@ -206,7 +203,6 @@ func TestPreStopHook(t *testing.T) { nodeName: "test-karpenter-node", expErr: nil, mockFunc: func(nodeName string, mockClient *driver.MockKubernetesClient, mockCoreV1 *driver.MockCoreV1Interface, mockNode *driver.MockNodeInterface, mockVolumeAttachments *driver.MockVolumeAttachmentInterface, mockStorageV1Interface *driver.MockStorageV1Interface) error { - fakeNode := &v1.Node{ Spec: v1.NodeSpec{ Taints: []v1.Taint{ @@ -238,7 +234,6 @@ func TestPreStopHook(t *testing.T) { nodeName: "test-karpenter-node", expErr: nil, mockFunc: func(nodeName string, mockClient *driver.MockKubernetesClient, mockCoreV1 *driver.MockCoreV1Interface, mockNode *driver.MockNodeInterface, mockVolumeAttachments *driver.MockVolumeAttachmentInterface, mockStorageV1Interface *driver.MockStorageV1Interface) error { - fakeNode := &v1.Node{ Spec: v1.NodeSpec{ Taints: []v1.Taint{ @@ -278,7 +273,6 @@ func TestPreStopHook(t *testing.T) { nodeName: "test-karpenter-node", expErr: nil, mockFunc: func(nodeName string, mockClient *driver.MockKubernetesClient, mockCoreV1 *driver.MockCoreV1Interface, mockNode *driver.MockNodeInterface, mockVolumeAttachments *driver.MockVolumeAttachmentInterface, mockStorageV1Interface *driver.MockStorageV1Interface) error { - fakeNode := &v1.Node{ Spec: v1.NodeSpec{ Taints: []v1.Taint{ diff --git a/cmd/main.go b/cmd/main.go index 1428a69dc1..3579f23350 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -108,6 +108,7 @@ func main() { klog.ErrorS(err, "failed to get version") klog.FlushAndExit(klog.ExitFlushTimeout, 1) } + //nolint:forbidigo // Print version info without klog/timestamp fmt.Println(versionInfo) os.Exit(0) } @@ -133,9 +134,9 @@ func main() { }() } - if options.HttpEndpoint != "" { + if options.HTTPEndpoint != "" { r := metrics.InitializeRecorder() - r.InitializeMetricsHandler(options.HttpEndpoint, "/metrics", options.MetricsCertFile, options.MetricsKeyFile) + r.InitializeMetricsHandler(options.HTTPEndpoint, "/metrics", options.MetricsCertFile, options.MetricsKeyFile) } cfg := metadata.MetadataServiceConfig{ diff --git a/pkg/batcher/batcher_test.go b/pkg/batcher/batcher_test.go index 8b1b9af9bd..01e1688bb4 100644 --- a/pkg/batcher/batcher_test.go +++ b/pkg/batcher/batcher_test.go @@ -15,6 +15,7 @@ package batcher import ( + "errors" "fmt" "sync" "testing" @@ -39,10 +40,11 @@ func mockExecutionWithError(inputs []string) (map[string]string, error) { for _, input := range inputs { results[input] = input } - return results, fmt.Errorf("mock execution error") + return results, errors.New("mock execution error") } func TestBatcher(t *testing.T) { + t.Parallel() type testCase struct { name string mockFunc func(inputs []string) (map[string]string, error) @@ -127,7 +129,6 @@ func TestBatcher(t *testing.T) { } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() @@ -136,7 +137,7 @@ func TestBatcher(t *testing.T) { var wg sync.WaitGroup - for i := 0; i < len(tc.tasks); i++ { + for i := range tc.tasks { wg.Add(1) go func(taskNum int) { defer wg.Done() @@ -148,7 +149,7 @@ func TestBatcher(t *testing.T) { wg.Wait() - for i := 0; i < len(tc.tasks); i++ { + for i := range tc.tasks { select { case r := <-resultChans[i]: task := fmt.Sprintf("task%d", i) @@ -175,7 +176,7 @@ func TestBatcherConcurrentTaskAdditions(t *testing.T) { b := New(numTasks, 1*time.Second, mockExecution) resultChans := make([]chan BatchResult[string], numTasks) - for i := 0; i < numTasks; i++ { + for i := range numTasks { wg.Add(1) go func(taskNum int) { defer wg.Done() @@ -187,7 +188,7 @@ func TestBatcherConcurrentTaskAdditions(t *testing.T) { wg.Wait() - for i := 0; i < numTasks; i++ { + for i := range numTasks { r := <-resultChans[i] task := fmt.Sprintf("task%d", i) if r.Err != nil { diff --git a/pkg/cloud/cloud.go b/pkg/cloud/cloud.go index fe3cd993ab..a5b86308ab 100644 --- a/pkg/cloud/cloud.go +++ b/pkg/cloud/cloud.go @@ -41,7 +41,7 @@ import ( "k8s.io/klog/v2" ) -// AWS volume types +// AWS volume types. const ( // VolumeTypeIO1 represents a provisioned IOPS SSD type of volume. VolumeTypeIO1 = "io1" @@ -111,13 +111,13 @@ const ( MaxTagValueLength = 256 ) -// Defaults +// Defaults. const ( // DefaultVolumeSize represents the default volume size. DefaultVolumeSize int64 = 100 * util.GiB ) -// Tags +// Tags. const ( // VolumeNameTagKey is the key value that refers to the volume's name. VolumeNameTagKey = "CSIVolumeName" @@ -127,11 +127,11 @@ const ( KubernetesTagKeyPrefix = "kubernetes.io" // AWSTagKeyPrefix is the prefix of the key value that is reserved for AWS. AWSTagKeyPrefix = "aws:" - //AwsEbsDriverTagKey is the tag to identify if a volume/snapshot is managed by ebs csi driver + // AwsEbsDriverTagKey is the tag to identify if a volume/snapshot is managed by ebs csi driver. AwsEbsDriverTagKey = "ebs.csi.aws.com/cluster" ) -// Batcher +// Batcher. const ( volumeIDBatcher volumeBatcherType = iota volumeTagBatcher @@ -146,39 +146,39 @@ const ( var ( // ErrMultiDisks is an error that is returned when multiple // disks are found with the same volume name. - ErrMultiDisks = errors.New("Multiple disks with same name") + ErrMultiDisks = errors.New("multiple disks with same name") // ErrDiskExistsDiffSize is an error that is returned if a disk with a given // name, but different size, is found. - ErrDiskExistsDiffSize = errors.New("There is already a disk with same name and different size") + ErrDiskExistsDiffSize = errors.New("there is already a disk with same name and different size") // ErrNotFound is returned when a resource is not found. - ErrNotFound = errors.New("Resource was not found") + ErrNotFound = errors.New("resource was not found") - // ErrIdempotent is returned when another request with same idempotent token is in-flight. - ErrIdempotentParameterMismatch = errors.New("Parameters on this idempotent request are inconsistent with parameters used in previous request(s)") + // ErrIdempotentParameterMismatch is returned when another request with same idempotent token is in-flight. + ErrIdempotentParameterMismatch = errors.New("parameters on this idempotent request are inconsistent with parameters used in previous request(s)") // ErrAlreadyExists is returned when a resource is already existent. - ErrAlreadyExists = errors.New("Resource already exists") + ErrAlreadyExists = errors.New("resource already exists") // ErrMultiSnapshots is returned when multiple snapshots are found - // with the same ID - ErrMultiSnapshots = errors.New("Multiple snapshots with the same name found") + // with the same ID. + ErrMultiSnapshots = errors.New("multiple snapshots with the same name found") - // ErrInvalidMaxResults is returned when a MaxResults pagination parameter is between 1 and 4 - ErrInvalidMaxResults = errors.New("MaxResults parameter must be 0 or greater than or equal to 5") + // ErrInvalidMaxResults is returned when a MaxResults pagination parameter is between 1 and 4. + ErrInvalidMaxResults = errors.New("maxResults parameter must be 0 or greater than or equal to 5") - // VolumeNotBeingModified is returned if volume being described is not being modified - VolumeNotBeingModified = fmt.Errorf("volume is not being modified") + // ErrVolumeNotBeingModified is returned if volume being described is not being modified. + ErrVolumeNotBeingModified = errors.New("volume is not being modified") - // ErrInvalidArgument is returned if parameters were rejected by cloud provider + // ErrInvalidArgument is returned if parameters were rejected by cloud provider. ErrInvalidArgument = errors.New("invalid argument") - // ErrInvalidRequest is returned if parameters were rejected by driver + // ErrInvalidRequest is returned if parameters were rejected by driver. ErrInvalidRequest = errors.New("invalid request") ) -// Set during build time via -ldflags +// Set during build time via -ldflags. var driverVersion string var invalidParameterErrorCodes = map[string]struct{}{ @@ -192,7 +192,7 @@ var invalidParameterErrorCodes = map[string]struct{}{ "ValidationError": {}, } -// Disk represents a EBS volume +// Disk represents a EBS volume. type Disk struct { VolumeID string CapacityGiB int32 @@ -202,7 +202,7 @@ type Disk struct { Attachments []string } -// DiskOptions represents parameters to create an EBS volume +// DiskOptions represents parameters to create an EBS volume. type DiskOptions struct { CapacityBytes int64 Tags map[string]string @@ -222,20 +222,20 @@ type DiskOptions struct { SnapshotID string } -// ModifyDiskOptions represents parameters to modify an EBS volume +// ModifyDiskOptions represents parameters to modify an EBS volume. type ModifyDiskOptions struct { VolumeType string IOPS int32 Throughput int32 } -// ModifyTagsOptions represents parameter to modify the tags of an existing EBS volume +// ModifyTagsOptions represents parameter to modify the tags of an existing EBS volume. type ModifyTagsOptions struct { TagsToAdd map[string]string TagsToDelete []string } -// Snapshot represents an EBS volume snapshot +// Snapshot represents an EBS volume snapshot. type Snapshot struct { SnapshotID string SourceVolumeID string @@ -244,19 +244,19 @@ type Snapshot struct { ReadyToUse bool } -// ListSnapshotsResponse is the container for our snapshots along with a pagination token to pass back to the caller +// ListSnapshotsResponse is the container for our snapshots along with a pagination token to pass back to the caller. type ListSnapshotsResponse struct { Snapshots []*Snapshot NextToken string } -// SnapshotOptions represents parameters to create an EBS volume +// SnapshotOptions represents parameters to create an EBS volume. type SnapshotOptions struct { Tags map[string]string OutpostArn string } -// ec2ListSnapshotsResponse is a helper struct returned from the AWS API calling function to the main ListSnapshots function +// ec2ListSnapshotsResponse is a helper struct returned from the AWS API calling function to the main ListSnapshots function. type ec2ListSnapshotsResponse struct { Snapshots []types.Snapshot NextToken *string @@ -333,7 +333,7 @@ type cloud struct { var _ Cloud = &cloud{} // NewCloud returns a new instance of AWS cloud -// It panics if session is invalid +// It panics if session is invalid. func NewCloud(region string, awsSdkDebugLog bool, userAgentExtra string, batching bool) (Cloud, error) { c := newEC2Cloud(region, awsSdkDebugLog, userAgentExtra, batching) return c, nil @@ -438,7 +438,7 @@ func execBatchDescribeVolumes(svc EC2API, input []string, batcher volumeBatcherT } default: - return nil, fmt.Errorf("execBatchDescribeVolumes: unsupported request type") + return nil, errors.New("execBatchDescribeVolumes: unsupported request type") } ctx, cancel := context.WithTimeout(context.Background(), batchDescribeTimeout) @@ -538,7 +538,7 @@ func (c *cloud) CreateDisk(ctx context.Context, volumeName string, diskOptions * capacityGiB := util.BytesToGiB(diskOptions.CapacityBytes) if diskOptions.IOPS > 0 && diskOptions.IOPSPerGB > 0 { - return nil, fmt.Errorf("invalid StorageClass parameters; specify either IOPS or IOPSPerGb, not both") + return nil, errors.New("invalid StorageClass parameters; specify either IOPS or IOPSPerGb, not both") } createType = diskOptions.VolumeType @@ -571,7 +571,7 @@ func (c *cloud) CreateDisk(ctx context.Context, volumeName string, diskOptions * } if diskOptions.MultiAttachEnabled && createType != VolumeTypeIO2 { - return nil, fmt.Errorf("CreateDisk: multi-attach is only supported for io2 volumes") + return nil, errors.New("CreateDisk: multi-attach is only supported for io2 volumes") } if maxIops > 0 { @@ -583,7 +583,7 @@ func (c *cloud) CreateDisk(ctx context.Context, volumeName string, diskOptions * iops = capIOPS(createType, capacityGiB, requestedIops, minIops, maxIops, maxIopsPerGb, diskOptions.AllowIOPSPerGBIncrease) } - var tags []types.Tag + tags := make([]types.Tag, 0, len(diskOptions.Tags)) for key, value := range diskOptions.Tags { tags = append(tags, types.Tag{Key: aws.String(key), Value: aws.String(value)}) } @@ -671,12 +671,12 @@ func (c *cloud) CreateDisk(ctx context.Context, volumeName string, diskOptions * volumeID := aws.ToString(response.VolumeId) if len(volumeID) == 0 { - return nil, fmt.Errorf("volume ID was not returned by CreateVolume") + return nil, errors.New("volume ID was not returned by CreateVolume") } size := *response.Size if size == 0 { - return nil, fmt.Errorf("disk size was not returned by CreateVolume") + return nil, errors.New("disk size was not returned by CreateVolume") } if err := c.waitForVolume(ctx, volumeID); err != nil { @@ -694,8 +694,8 @@ func (c *cloud) CreateDisk(ctx context.Context, volumeName string, diskOptions * if err != nil { // To avoid leaking volume, we should delete the volume just created // TODO: Need to figure out how to handle DeleteDisk failed scenario instead of just log the error - if _, error := c.DeleteDisk(ctx, volumeID); err != nil { - klog.ErrorS(error, "failed to be deleted, this may cause volume leak", "volumeID", volumeID) + if _, deleteDiskErr := c.DeleteDisk(ctx, volumeID); deleteDiskErr != nil { + klog.ErrorS(deleteDiskErr, "failed to be deleted, this may cause volume leak", "volumeID", volumeID) } else { klog.V(5).InfoS("volume is deleted because there was an error while attaching the tags", "volumeID", volumeID) } @@ -705,7 +705,7 @@ func (c *cloud) CreateDisk(ctx context.Context, volumeName string, diskOptions * return &Disk{CapacityGiB: size, VolumeID: volumeID, AvailabilityZone: zone, SnapshotID: snapshotID, OutpostArn: outpostArn}, nil } -// execBatchDescribeVolumesModifications executes a batched DescribeVolumesModifications API call +// execBatchDescribeVolumesModifications executes a batched DescribeVolumesModifications API call. func execBatchDescribeVolumesModifications(svc EC2API, input []string) (map[string]*types.VolumeModification, error) { klog.V(7).InfoS("execBatchDescribeVolumeModifications", "volumeIds", input) request := &ec2.DescribeVolumesModificationsInput{ @@ -845,7 +845,7 @@ func (c *cloud) ResizeOrModifyDisk(ctx context.Context, volumeID string, newSize } } // Perform one final check on the volume - return c.checkDesiredState(ctx, volumeID, int32(newSizeGiB), options) + return c.checkDesiredState(ctx, volumeID, newSizeGiB, options) } func (c *cloud) DeleteDisk(ctx context.Context, volumeID string) (bool, error) { @@ -861,7 +861,7 @@ func (c *cloud) DeleteDisk(ctx context.Context, volumeID string) (bool, error) { return true, nil } -// execBatchDescribeInstances executes a batched DescribeInstances API call +// execBatchDescribeInstances executes a batched DescribeInstances API call. func execBatchDescribeInstances(svc EC2API, input []string) (map[string]*types.Instance, error) { klog.V(7).InfoS("execBatchDescribeInstances", "instanceIds", input) request := &ec2.DescribeInstancesInput{ @@ -1057,7 +1057,6 @@ func (c *cloud) WaitForAttachmentState(ctx context.Context, volumeID, expectedSt attachmentState := "" for _, a := range volume.Attachments { - a := a if a.InstanceId != nil { if aws.ToString(a.InstanceId) == expectedInstance { attachmentState = string(a.State) @@ -1188,7 +1187,7 @@ func execBatchDescribeSnapshots(svc EC2API, input []string, batcher snapshotBatc } default: - return nil, fmt.Errorf("execBatchDescribeSnapshots: unsupported request type") + return nil, errors.New("execBatchDescribeSnapshots: unsupported request type") } ctx, cancel := context.WithTimeout(context.Background(), batchDescribeTimeout) @@ -1202,7 +1201,6 @@ func execBatchDescribeSnapshots(svc EC2API, input []string, batcher snapshotBatc result := make(map[string]*types.Snapshot) for _, snapshot := range resp { - snapshot := snapshot key, err := extractSnapshotKey(&snapshot, batcher) if err != nil { klog.Warningf("execBatchDescribeSnapshots: skipping snapshot: %v, reason: %v", snapshot, err) @@ -1276,8 +1274,9 @@ func extractSnapshotKey(s *types.Snapshot, batcher snapshotBatcherType) (string, func (c *cloud) CreateSnapshot(ctx context.Context, volumeID string, snapshotOptions *SnapshotOptions) (snapshot *Snapshot, err error) { descriptions := "Created by AWS EBS CSI driver for volume " + volumeID - var tags []types.Tag var request *ec2.CreateSnapshotInput + + tags := make([]types.Tag, 0, len(snapshotOptions.Tags)) for key, value := range snapshotOptions.Tags { tags = append(tags, types.Tag{Key: aws.String(key), Value: aws.String(value)}) } @@ -1300,7 +1299,7 @@ func (c *cloud) CreateSnapshot(ctx context.Context, volumeID string, snapshotOpt return nil, fmt.Errorf("error creating snapshot of volume %s: %w", volumeID, err) } if res == nil { - return nil, fmt.Errorf("nil CreateSnapshotResponse") + return nil, errors.New("nil CreateSnapshotResponse") } return &Snapshot{ @@ -1386,7 +1385,8 @@ func (c *cloud) ListSnapshots(ctx context.Context, volumeID string, maxResults i if err != nil { return nil, err } - var snapshots []*Snapshot + + snapshots := make([]*Snapshot, 0, len(ec2SnapshotsResponse.Snapshots)) for _, ec2Snapshot := range ec2SnapshotsResponse.Snapshots { snapshots = append(snapshots, c.ec2SnapshotResponseToStruct(ec2Snapshot)) } @@ -1401,7 +1401,7 @@ func (c *cloud) ListSnapshots(ctx context.Context, volumeID string, maxResults i }, nil } -// Helper method converting EC2 snapshot type to the internal struct +// Helper method converting EC2 snapshot type to the internal struct. func (c *cloud) ec2SnapshotResponseToStruct(ec2Snapshot types.Snapshot) *Snapshot { snapshotSize := *ec2Snapshot.VolumeSize snapshot := &Snapshot{ @@ -1557,7 +1557,7 @@ func (c *cloud) getSnapshot(ctx context.Context, request *ec2.DescribeSnapshotsI } } -// listSnapshots returns all snapshots based from a request +// listSnapshots returns all snapshots based from a request. func (c *cloud) listSnapshots(ctx context.Context, request *ec2.DescribeSnapshotsInput) (*ec2ListSnapshotsResponse, error) { var snapshots []types.Snapshot var nextToken *string @@ -1643,7 +1643,7 @@ func isAWSErrorInvalidAttachmentNotFound(err error) bool { } // isAWSErrorModificationNotFound returns a boolean indicating whether the given -// error is an AWS InvalidVolumeModification.NotFound error +// error is an AWS InvalidVolumeModification.NotFound error. func isAWSErrorModificationNotFound(err error) bool { return isAWSError(err, "InvalidVolumeModification.NotFound") } @@ -1657,7 +1657,7 @@ func isAWSErrorSnapshotNotFound(err error) bool { // isAWSErrorIdempotentParameterMismatch returns a boolean indicating whether the // given error is an AWS IdempotentParameterMismatch error. -// This error is reported when the two request contains same client-token but different parameters +// This error is reported when the two request contains same client-token but different parameters. func isAWSErrorIdempotentParameterMismatch(err error) bool { return isAWSError(err, "IdempotentParameterMismatch") } @@ -1702,13 +1702,14 @@ func (c *cloud) checkDesiredState(ctx context.Context, volumeID string, desiredS // Check if there is a mismatch between the requested modification and the current volume // If there is, the volume is still modifying and we should not return a success - if realSizeGiB < desiredSizeGiB { + switch { + case realSizeGiB < desiredSizeGiB: return realSizeGiB, fmt.Errorf("volume %q is still being expanded to %d size", volumeID, desiredSizeGiB) - } else if options.IOPS != 0 && (volume.Iops == nil || *volume.Iops != options.IOPS) { + case options.IOPS != 0 && (volume.Iops == nil || *volume.Iops != options.IOPS): return realSizeGiB, fmt.Errorf("volume %q is still being modified to iops %d", volumeID, options.IOPS) - } else if options.VolumeType != "" && !strings.EqualFold(string(volume.VolumeType), options.VolumeType) { + case options.VolumeType != "" && !strings.EqualFold(string(volume.VolumeType), options.VolumeType): return realSizeGiB, fmt.Errorf("volume %q is still being modified to type %q", volumeID, options.VolumeType) - } else if options.Throughput != 0 && (volume.Throughput == nil || *volume.Throughput != options.Throughput) { + case options.Throughput != 0 && (volume.Throughput == nil || *volume.Throughput != options.Throughput): return realSizeGiB, fmt.Errorf("volume %q is still being modified to throughput %d", volumeID, options.Throughput) } @@ -1720,7 +1721,7 @@ func (c *cloud) waitForVolumeModification(ctx context.Context, volumeID string) waitErr := wait.ExponentialBackoff(c.vwp.modificationBackoff, func() (bool, error) { m, err := c.getLatestVolumeModification(ctx, volumeID, true) // Consider volumes that have never been modified as done - if err != nil && errors.Is(err, VolumeNotBeingModified) { + if err != nil && errors.Is(err, ErrVolumeNotBeingModified) { return true, nil } else if err != nil { return false, err @@ -1748,7 +1749,7 @@ func describeVolumesModifications(ctx context.Context, svc EC2API, request *ec2. response, err := svc.DescribeVolumesModifications(ctx, request) if err != nil { if isAWSErrorModificationNotFound(err) { - return nil, VolumeNotBeingModified + return nil, ErrVolumeNotBeingModified } return nil, fmt.Errorf("error describing volume modifications: %w", err) } @@ -1776,14 +1777,14 @@ func (c *cloud) getLatestVolumeModification(ctx context.Context, volumeID string }) if err != nil { if isAWSErrorModificationNotFound(err) { - return nil, VolumeNotBeingModified + return nil, ErrVolumeNotBeingModified } return nil, fmt.Errorf("error describing modifications in volume %q: %w", volumeID, err) } volumeMods := mod.VolumesModifications if len(volumeMods) == 0 { - return nil, VolumeNotBeingModified + return nil, ErrVolumeNotBeingModified } return &volumeMods[len(volumeMods)-1], nil @@ -1793,7 +1794,7 @@ func (c *cloud) getLatestVolumeModification(ctx context.Context, volumeID string } // randomAvailabilityZone returns a random zone from the given region -// the randomness relies on the response of DescribeAvailabilityZones +// the randomness relies on the response of DescribeAvailabilityZones. func (c *cloud) randomAvailabilityZone(ctx context.Context) (string, error) { request := &ec2.DescribeAvailabilityZonesInput{} response, err := c.ec2.DescribeAvailabilityZones(ctx, request) @@ -1809,7 +1810,7 @@ func (c *cloud) randomAvailabilityZone(ctx context.Context) (string, error) { return zones[0], nil } -// AvailabilityZones returns availability zones from the given region +// AvailabilityZones returns availability zones from the given region. func (c *cloud) AvailabilityZones(ctx context.Context) (map[string]struct{}, error) { response, err := c.ec2.DescribeAvailabilityZones(ctx, &ec2.DescribeAvailabilityZonesInput{}) if err != nil { @@ -1862,7 +1863,7 @@ func (c *cloud) validateModifyVolume(ctx context.Context, volumeID string, newSi // This call must NOT be batched because a missing volume modification will return client error latestMod, err := c.getLatestVolumeModification(ctx, volumeID, false) - if err != nil && !errors.Is(err, VolumeNotBeingModified) { + if err != nil && !errors.Is(err, ErrVolumeNotBeingModified) { return true, oldSizeGiB, fmt.Errorf("error fetching volume modifications for %q: %w", volumeID, err) } diff --git a/pkg/cloud/cloud_test.go b/pkg/cloud/cloud_test.go index 84100c8562..3b09e107ff 100644 --- a/pkg/cloud/cloud_test.go +++ b/pkg/cloud/cloud_test.go @@ -26,20 +26,17 @@ import ( "testing" "time" - "k8s.io/apimachinery/pkg/util/wait" - "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/ec2" "github.com/aws/aws-sdk-go-v2/service/ec2/types" - "github.com/aws/smithy-go" - "github.com/golang/mock/gomock" dm "github.com/kubernetes-sigs/aws-ebs-csi-driver/pkg/cloud/devicemanager" "github.com/kubernetes-sigs/aws-ebs-csi-driver/pkg/expiringcache" "github.com/kubernetes-sigs/aws-ebs-csi-driver/pkg/util" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "k8s.io/apimachinery/pkg/util/wait" ) const ( @@ -53,15 +50,15 @@ const ( defaultCreateDiskDeadline = time.Second * 5 ) -func generateVolumes(volIdCount, volTagCount int) []types.Volume { - volumes := make([]types.Volume, 0, volIdCount+volTagCount) +func generateVolumes(volIDCount, volTagCount int) []types.Volume { + volumes := make([]types.Volume, 0, volIDCount+volTagCount) - for i := 0; i < volIdCount; i++ { + for i := range volIDCount { volumeID := fmt.Sprintf("vol-%d", i) volumes = append(volumes, types.Volume{VolumeId: aws.String(volumeID)}) } - for i := 0; i < volTagCount; i++ { + for i := range volTagCount { volumeName := fmt.Sprintf("vol-name-%d", i) volumes = append(volumes, types.Volume{Tags: []types.Tag{{Key: aws.String(VolumeNameTagKey), Value: aws.String(volumeName)}}}) } @@ -82,8 +79,8 @@ func extractVolumeIdentifiers(volumes []types.Volume) (volumeIDs []string, volum } return volumeIDs, volumeNames } -func TestNewCloud(t *testing.T) { +func TestNewCloud(t *testing.T) { testCases := []struct { name string region string @@ -114,7 +111,10 @@ func TestNewCloud(t *testing.T) { if err != nil { t.Fatalf("error %v", err) } - ec2CloudAscloud := ec2Cloud.(*cloud) + ec2CloudAscloud, ok := ec2Cloud.(*cloud) + if !ok { + t.Fatalf("could not assert object ec2Cloud as cloud type, %v", ec2Cloud) + } assert.Equal(t, ec2CloudAscloud.region, tc.region) if tc.batchingEnabled { assert.NotNil(t, ec2CloudAscloud.bm) @@ -124,6 +124,7 @@ func TestNewCloud(t *testing.T) { } } func TestBatchDescribeVolumes(t *testing.T) { + t.Parallel() testCases := []struct { name string volumes []types.Volume @@ -181,7 +182,7 @@ func TestBatchDescribeVolumes(t *testing.T) { mockFunc: func(mockEC2 *MockEC2API, expErr error, volumes []types.Volume) { mockEC2.EXPECT().DescribeVolumes(gomock.Any(), gomock.Any()).Return(nil, expErr).Times(1) }, - expErr: fmt.Errorf("Generic EC2 API error"), + expErr: errors.New("Generic EC2 API error"), }, { name: "fail: volume not found", @@ -189,7 +190,7 @@ func TestBatchDescribeVolumes(t *testing.T) { mockFunc: func(mockEC2 *MockEC2API, expErr error, volumes []types.Volume) { mockEC2.EXPECT().DescribeVolumes(gomock.Any(), gomock.Any()).Return(nil, expErr).Times(1) }, - expErr: fmt.Errorf("volume not found"), + expErr: errors.New("volume not found"), }, { name: "fail: invalid tag", @@ -204,7 +205,7 @@ func TestBatchDescribeVolumes(t *testing.T) { volumeOutput := &ec2.DescribeVolumesOutput{Volumes: volumes} mockEC2.EXPECT().DescribeVolumes(gomock.Any(), gomock.Any()).Return(volumeOutput, expErr).Times(0) }, - expErr: fmt.Errorf("invalid tag"), + expErr: errors.New("invalid tag"), }, { name: "fail: invalid request", @@ -217,7 +218,6 @@ func TestBatchDescribeVolumes(t *testing.T) { } for _, tc := range testCases { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() mockCtrl := gomock.NewController(t) @@ -225,7 +225,10 @@ func TestBatchDescribeVolumes(t *testing.T) { mockEC2 := NewMockEC2API(mockCtrl) c := newCloud(mockEC2) - cloudInstance := c.(*cloud) + cloudInstance, ok := c.(*cloud) + if !ok { + t.Fatalf("could not assert cloudInstance as type cloud, %v", cloudInstance) + } cloudInstance.bm = newBatcherManager(cloudInstance.ec2) tc.mockFunc(mockEC2, tc.expErr, tc.volumes) @@ -235,6 +238,7 @@ func TestBatchDescribeVolumes(t *testing.T) { } } func executeDescribeVolumesTest(t *testing.T, c *cloud, volumeIDs, volumeNames []string, expErr error) { + t.Helper() var wg sync.WaitGroup getRequestForID := func(id string) *ec2.DescribeVolumesInput { @@ -300,15 +304,16 @@ func executeDescribeVolumesTest(t *testing.T, c *cloud, volumeIDs, volumeNames [ } func TestBatchDescribeInstances(t *testing.T) { + t.Parallel() testCases := []struct { name string - instanceIds []string + instanceIDs []string mockFunc func(mockEC2 *MockEC2API, expErr error, reservations []types.Reservation) expErr error }{ { name: "success: instance by ID", - instanceIds: []string{"i-001", "i-002", "i-003"}, + instanceIDs: []string{"i-001", "i-002", "i-003"}, mockFunc: func(mockEC2 *MockEC2API, expErr error, reservations []types.Reservation) { reservationOutput := &ec2.DescribeInstancesOutput{Reservations: reservations} mockEC2.EXPECT().DescribeInstances(gomock.Any(), gomock.Any()).Return(reservationOutput, expErr).Times(1) @@ -317,15 +322,15 @@ func TestBatchDescribeInstances(t *testing.T) { }, { name: "fail: EC2 API generic error", - instanceIds: []string{"i-001", "i-002", "i-003"}, + instanceIDs: []string{"i-001", "i-002", "i-003"}, mockFunc: func(mockEC2 *MockEC2API, expErr error, reservations []types.Reservation) { mockEC2.EXPECT().DescribeInstances(gomock.Any(), gomock.Any()).Return(nil, expErr).Times(1) }, - expErr: fmt.Errorf("generic EC2 API error"), + expErr: errors.New("generic EC2 API error"), }, { name: "fail: invalid request", - instanceIds: []string{""}, + instanceIDs: []string{""}, mockFunc: func(mockEC2 *MockEC2API, expErr error, reservations []types.Reservation) { mockEC2.EXPECT().DescribeInstances(gomock.Any(), gomock.Any()).Return(nil, nil).Times(0) }, @@ -334,7 +339,6 @@ func TestBatchDescribeInstances(t *testing.T) { } for _, tc := range testCases { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() mockCtrl := gomock.NewController(t) @@ -342,32 +346,36 @@ func TestBatchDescribeInstances(t *testing.T) { mockEC2 := NewMockEC2API(mockCtrl) c := newCloud(mockEC2) - cloudInstance := c.(*cloud) + cloudInstance, ok := c.(*cloud) + if !ok { + t.Fatalf("could not assert cloudInstance as type cloud, %v", cloudInstance) + } cloudInstance.bm = newBatcherManager(cloudInstance.ec2) // Setup mocks var instances []types.Instance - for _, instanceId := range tc.instanceIds { - instances = append(instances, types.Instance{InstanceId: aws.String(instanceId)}) + for _, instanceID := range tc.instanceIDs { + instances = append(instances, types.Instance{InstanceId: aws.String(instanceID)}) } reservation := types.Reservation{Instances: instances} reservations := []types.Reservation{reservation} tc.mockFunc(mockEC2, tc.expErr, reservations) - executeDescribeInstancesTest(t, cloudInstance, tc.instanceIds, tc.expErr) + executeDescribeInstancesTest(t, cloudInstance, tc.instanceIDs, tc.expErr) }) } } -func executeDescribeInstancesTest(t *testing.T, c *cloud, instanceIds []string, expErr error) { +func executeDescribeInstancesTest(t *testing.T, c *cloud, instanceIDs []string, expErr error) { + t.Helper() var wg sync.WaitGroup getRequestForID := func(id string) *ec2.DescribeInstancesInput { return &ec2.DescribeInstancesInput{InstanceIds: []string{id}} } - requests := make([]*ec2.DescribeInstancesInput, 0, len(instanceIds)) - for _, instanceID := range instanceIds { + requests := make([]*ec2.DescribeInstancesInput, 0, len(instanceIDs)) + for _, instanceID := range instanceIDs { requests = append(requests, getRequestForID(instanceID)) } @@ -414,12 +422,12 @@ func executeDescribeInstancesTest(t *testing.T, c *cloud, instanceIds []string, func generateSnapshots(snapIDCount, snapTagCount int) []types.Snapshot { snapshots := make([]types.Snapshot, 0, snapIDCount+snapTagCount) - for i := 0; i < snapIDCount; i++ { + for i := range snapIDCount { snapID := fmt.Sprintf("snap-%d", i) snapshots = append(snapshots, types.Snapshot{SnapshotId: aws.String(snapID)}) } - for i := 0; i < snapTagCount; i++ { + for i := range snapTagCount { snapshotName := fmt.Sprintf("snap-name-%d", i) snapshots = append(snapshots, types.Snapshot{Tags: []types.Tag{{Key: aws.String(SnapshotNameTagKey), Value: aws.String(snapshotName)}}}) } @@ -442,6 +450,7 @@ func extractSnapshotIdentifiers(snapshots []types.Snapshot) (snapshotIDs []strin } func TestBatchDescribeSnapshots(t *testing.T) { + t.Parallel() testCases := []struct { name string snapshots []types.Snapshot @@ -478,7 +487,7 @@ func TestBatchDescribeSnapshots(t *testing.T) { mockFunc: func(mockEC2 *MockEC2API, expErr error, snapshots []types.Snapshot) { mockEC2.EXPECT().DescribeSnapshots(gomock.Any(), gomock.Any()).Return(nil, expErr).Times(2) }, - expErr: fmt.Errorf("generic EC2 API error"), + expErr: errors.New("generic EC2 API error"), }, { name: "fail: Snapshot not found by ID", @@ -509,7 +518,6 @@ func TestBatchDescribeSnapshots(t *testing.T) { } for _, tc := range testCases { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() mockCtrl := gomock.NewController(t) @@ -517,7 +525,10 @@ func TestBatchDescribeSnapshots(t *testing.T) { mockEC2 := NewMockEC2API(mockCtrl) c := newCloud(mockEC2) - cloudInstance := c.(*cloud) + cloudInstance, ok := c.(*cloud) + if !ok { + t.Fatalf("could not assert cloudInstance as type cloud, %v", cloudInstance) + } cloudInstance.bm = newBatcherManager(cloudInstance.ec2) tc.mockFunc(mockEC2, tc.expErr, tc.snapshots) @@ -528,6 +539,7 @@ func TestBatchDescribeSnapshots(t *testing.T) { } func executeDescribeSnapshotsTest(t *testing.T, c *cloud, snapshotIDs, snapshotNames []string, expErr error) { + t.Helper() var wg sync.WaitGroup getRequestForID := func(id string) *ec2.DescribeSnapshotsInput { @@ -596,14 +608,14 @@ func executeDescribeSnapshotsTest(t *testing.T, c *cloud, snapshotIDs, snapshotN func TestCheckDesiredState(t *testing.T) { testCases := []struct { name string - volumeId string + volumeID string desiredSizeGiB int32 options *ModifyDiskOptions expErr error }{ { name: "success: normal path", - volumeId: "vol-001", + volumeID: "vol-001", desiredSizeGiB: 5, options: &ModifyDiskOptions{ VolumeType: VolumeTypeGP2, @@ -613,29 +625,29 @@ func TestCheckDesiredState(t *testing.T) { }, { name: "failure: volume is still being expanded", - volumeId: "vol-001", + volumeID: "vol-001", desiredSizeGiB: 500, options: &ModifyDiskOptions{ VolumeType: VolumeTypeGP2, IOPS: 3000, Throughput: 1000, }, - expErr: fmt.Errorf("volume \"vol-001\" is still being expanded to 500 size"), + expErr: errors.New("volume \"vol-001\" is still being expanded to 500 size"), }, { name: "failure: volume is still being modified to iops", - volumeId: "vol-001", + volumeID: "vol-001", desiredSizeGiB: 50, options: &ModifyDiskOptions{ VolumeType: VolumeTypeGP2, IOPS: 4000, Throughput: 1000, }, - expErr: fmt.Errorf("volume \"vol-001\" is still being modified to iops 4000"), + expErr: errors.New("volume \"vol-001\" is still being modified to iops 4000"), }, { name: "failure: volume is still being modifed to type", - volumeId: "vol-001", + volumeID: "vol-001", desiredSizeGiB: 50, options: &ModifyDiskOptions{ VolumeType: VolumeTypeGP3, @@ -646,14 +658,14 @@ func TestCheckDesiredState(t *testing.T) { }, { name: "failure: volume is still being modified to throughput", - volumeId: "vol-001", + volumeID: "vol-001", desiredSizeGiB: 5, options: &ModifyDiskOptions{ VolumeType: VolumeTypeGP2, IOPS: 3000, Throughput: 2000, }, - expErr: fmt.Errorf("volume \"vol-001\" is still being modified to throughput 2000"), + expErr: errors.New("volume \"vol-001\" is still being modified to throughput 2000"), }, } for _, tc := range testCases { @@ -661,7 +673,10 @@ func TestCheckDesiredState(t *testing.T) { defer mockCtrl.Finish() mockEC2 := NewMockEC2API(mockCtrl) c := newCloud(mockEC2) - cloudInstance := c.(*cloud) + cloudInstance, ok := c.(*cloud) + if !ok { + t.Fatalf("could not assert cloudInstance as type cloud, %v", cloudInstance) + } mockEC2.EXPECT().DescribeVolumes(gomock.Any(), gomock.Any()).Return(&ec2.DescribeVolumesOutput{ Volumes: []types.Volume{ { @@ -673,7 +688,7 @@ func TestCheckDesiredState(t *testing.T) { }, }, }, nil) - _, err := cloudInstance.checkDesiredState(context.Background(), tc.volumeId, tc.desiredSizeGiB, tc.options) + _, err := cloudInstance.checkDesiredState(context.Background(), tc.volumeID, tc.desiredSizeGiB, tc.options) if err != nil { if tc.expErr == nil { t.Fatalf("Did not expect to get an error but got %q", err) @@ -689,15 +704,16 @@ func TestCheckDesiredState(t *testing.T) { } func TestBatchDescribeVolumesModifications(t *testing.T) { + t.Parallel() testCases := []struct { name string - volumeIds []string + volumeIDs []string mockFunc func(mockEC2 *MockEC2API, expErr error, volumeModifications []types.VolumeModification) expErr error }{ { name: "success: volumeModification by ID", - volumeIds: []string{"vol-001", "vol-002", "vol-003"}, + volumeIDs: []string{"vol-001", "vol-002", "vol-003"}, mockFunc: func(mockEC2 *MockEC2API, expErr error, volumeModifications []types.VolumeModification) { volumeModificationsOutput := &ec2.DescribeVolumesModificationsOutput{VolumesModifications: volumeModifications} mockEC2.EXPECT().DescribeVolumesModifications(gomock.Any(), gomock.Any()).Return(volumeModificationsOutput, expErr).Times(1) @@ -706,15 +722,15 @@ func TestBatchDescribeVolumesModifications(t *testing.T) { }, { name: "fail: EC2 API generic error", - volumeIds: []string{"vol-001", "vol-002", "vol-003"}, + volumeIDs: []string{"vol-001", "vol-002", "vol-003"}, mockFunc: func(mockEC2 *MockEC2API, expErr error, volumeModifications []types.VolumeModification) { mockEC2.EXPECT().DescribeVolumesModifications(gomock.Any(), gomock.Any()).Return(nil, expErr).Times(1) }, - expErr: fmt.Errorf("generic EC2 API error"), + expErr: errors.New("generic EC2 API error"), }, { name: "fail: invalid request", - volumeIds: []string{""}, + volumeIDs: []string{""}, mockFunc: func(mockEC2 *MockEC2API, expErr error, volumeModifications []types.VolumeModification) { mockEC2.EXPECT().DescribeVolumesModifications(gomock.Any(), gomock.Any()).Return(nil, expErr).Times(0) }, @@ -723,7 +739,6 @@ func TestBatchDescribeVolumesModifications(t *testing.T) { } for _, tc := range testCases { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() mockCtrl := gomock.NewController(t) @@ -731,31 +746,35 @@ func TestBatchDescribeVolumesModifications(t *testing.T) { mockEC2 := NewMockEC2API(mockCtrl) c := newCloud(mockEC2) - cloudInstance := c.(*cloud) + cloudInstance, ok := c.(*cloud) + if !ok { + t.Fatalf("could not assert cloudInstance as type cloud, %v", cloudInstance) + } cloudInstance.bm = newBatcherManager(cloudInstance.ec2) // Setup mocks var volumeModifications []types.VolumeModification - for _, volumeId := range tc.volumeIds { - volumeModifications = append(volumeModifications, types.VolumeModification{VolumeId: aws.String(volumeId)}) + for _, volumeID := range tc.volumeIDs { + volumeModifications = append(volumeModifications, types.VolumeModification{VolumeId: aws.String(volumeID)}) } tc.mockFunc(mockEC2, tc.expErr, volumeModifications) - executeDescribeVolumesModificationsTest(t, cloudInstance, tc.volumeIds, tc.expErr) + executeDescribeVolumesModificationsTest(t, cloudInstance, tc.volumeIDs, tc.expErr) }) } } -func executeDescribeVolumesModificationsTest(t *testing.T, c *cloud, volumeIds []string, expErr error) { +func executeDescribeVolumesModificationsTest(t *testing.T, c *cloud, volumeIDs []string, expErr error) { + t.Helper() var wg sync.WaitGroup getRequestForID := func(id string) *ec2.DescribeVolumesModificationsInput { return &ec2.DescribeVolumesModificationsInput{VolumeIds: []string{id}} } - requests := make([]*ec2.DescribeVolumesModificationsInput, 0, len(volumeIds)) - for _, volumeId := range volumeIds { - requests = append(requests, getRequestForID(volumeId)) + requests := make([]*ec2.DescribeVolumesModificationsInput, 0, len(volumeIDs)) + for _, volumeID := range volumeIDs { + requests = append(requests, getRequestForID(volumeID)) } r := make([]chan types.VolumeModification, len(requests)) @@ -799,6 +818,7 @@ func executeDescribeVolumesModificationsTest(t *testing.T, c *cloud, volumeIds [ } func TestCreateDisk(t *testing.T) { + t.Parallel() testCases := []struct { name string volumeName string @@ -1016,8 +1036,8 @@ func TestCreateDisk(t *testing.T) { AvailabilityZone: expZone, }, expCreateVolumeInput: &ec2.CreateVolumeInput{}, - expErr: fmt.Errorf("could not create volume in EC2: CreateVolume generic error"), - expCreateVolumeErr: fmt.Errorf("CreateVolume generic error"), + expErr: errors.New("could not create volume in EC2: CreateVolume generic error"), + expCreateVolumeErr: errors.New("CreateVolume generic error"), }, { name: "fail: ec2.CreateVolume returned snapshot not found error", @@ -1056,8 +1076,8 @@ func TestCreateDisk(t *testing.T) { AvailabilityZone: "", }, expCreateVolumeInput: &ec2.CreateVolumeInput{}, - expErr: fmt.Errorf("timed out waiting for volume to create: DescribeVolumes generic error"), - expDescVolumeErr: fmt.Errorf("DescribeVolumes generic error"), + expErr: errors.New("timed out waiting for volume to create: DescribeVolumes generic error"), + expDescVolumeErr: errors.New("DescribeVolumes generic error"), }, { name: "fail: Volume is not ready to use, volume stuck in creating status and controller context deadline exceeded", @@ -1069,7 +1089,7 @@ func TestCreateDisk(t *testing.T) { AvailabilityZone: "", }, expCreateVolumeInput: &ec2.CreateVolumeInput{}, - expErr: fmt.Errorf("timed out waiting for volume to create: timed out waiting for the condition"), + expErr: errors.New("timed out waiting for volume to create: timed out waiting for the condition"), }, { name: "success: normal from snapshot", @@ -1124,7 +1144,7 @@ func TestCreateDisk(t *testing.T) { AvailabilityZone: defaultZone, }, expCreateVolumeInput: nil, - expErr: fmt.Errorf("invalid StorageClass parameters; specify either IOPS or IOPSPerGb, not both"), + expErr: errors.New("invalid StorageClass parameters; specify either IOPS or IOPSPerGb, not both"), }, { name: "success: small io1 with too high iopsPerGB", @@ -1269,13 +1289,13 @@ func TestCreateDisk(t *testing.T) { VolumeType: "sbg1", }, expCreateVolumeInput: &ec2.CreateVolumeInput{}, - expCreateTagsErr: fmt.Errorf("CreateTags generic error"), + expCreateTagsErr: errors.New("CreateTags generic error"), expDisk: &Disk{ VolumeID: "vol-test", CapacityGiB: 1, AvailabilityZone: snowZone, }, - expErr: fmt.Errorf("could not attach tags to volume: vol-test. CreateTags generic error"), + expErr: errors.New("could not attach tags to volume: vol-test. CreateTags generic error"), }, { name: "success: create default volume with throughput", @@ -1330,7 +1350,7 @@ func TestCreateDisk(t *testing.T) { CapacityGiB: 4, AvailabilityZone: defaultZone, }, - expErr: fmt.Errorf("CreateDisk: multi-attach is only supported for io2 volumes"), + expErr: errors.New("CreateDisk: multi-attach is only supported for io2 volumes"), }, { name: "failure: invalid VolumeType", @@ -1349,7 +1369,6 @@ func TestCreateDisk(t *testing.T) { }, } for _, tc := range testCases { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() mockCtrl := gomock.NewController(t) @@ -1432,12 +1451,12 @@ func TestCreateDisk(t *testing.T) { } } -// Test client error IdempotentParameterMismatch by forcing it to progress twice +// Test client error IdempotentParameterMismatch by forcing it to progress twice. func TestCreateDiskClientToken(t *testing.T) { t.Parallel() const volumeName = "test-vol-client-token" - const volumeId = "vol-abcd1234" + const volumeID = "vol-abcd1234" diskOptions := &DiskOptions{ CapacityBytes: util.GiBToBytes(1), Tags: map[string]string{VolumeNameTagKey: volumeName, AwsEbsDriverTagKey: "true"}, @@ -1470,14 +1489,14 @@ func TestCreateDiskClientToken(t *testing.T) { func(_ context.Context, input *ec2.CreateVolumeInput, _ ...func(*ec2.Options)) (*ec2.CreateVolumeOutput, error) { assert.Equal(t, expectedClientToken3, *input.ClientToken) return &ec2.CreateVolumeOutput{ - VolumeId: aws.String(volumeId), + VolumeId: aws.String(volumeID), Size: aws.Int32(util.BytesToGiB(diskOptions.CapacityBytes)), }, nil }), mockEC2.EXPECT().DescribeVolumes(gomock.Any(), gomock.Any()).Return(&ec2.DescribeVolumesOutput{ Volumes: []types.Volume{ { - VolumeId: aws.String(volumeId), + VolumeId: aws.String(volumeID), Size: aws.Int32(util.BytesToGiB(diskOptions.CapacityBytes)), State: types.VolumeState("available"), AvailabilityZone: aws.String(diskOptions.AvailabilityZone), @@ -1515,13 +1534,13 @@ func TestDeleteDisk(t *testing.T) { name: "fail: DeleteVolume returned generic error", volumeID: "vol-test-1234", expResp: false, - expErr: fmt.Errorf("DeleteVolume generic error"), + expErr: errors.New("DeleteVolume generic error"), }, { name: "fail: DeleteVolume returned not found error", volumeID: "vol-test-1234", expResp: false, - expErr: fmt.Errorf("InvalidVolume.NotFound"), + expErr: errors.New("InvalidVolume.NotFound"), }, } @@ -1734,11 +1753,15 @@ func TestAttachDisk(t *testing.T) { mockCtrl := gomock.NewController(t) mockEC2 := NewMockEC2API(mockCtrl) c := newCloud(mockEC2) + cloudInstance, ok := c.(*cloud) + if !ok { + t.Fatalf("could not assert c as type cloud, %v", c) + } ctx := context.Background() - dm := c.(*cloud).dm + deviceManager := cloudInstance.dm - tc.mockFunc(mockEC2, ctx, tc.volumeID, tc.nodeID, tc.nodeID2, tc.path, dm) + tc.mockFunc(mockEC2, ctx, tc.volumeID, tc.nodeID, tc.nodeID2, tc.path, deviceManager) devicePath, err := c.AttachDisk(ctx, tc.volumeID, tc.nodeID) @@ -1869,7 +1892,7 @@ func TestGetDiskByName(t *testing.T) { name: "fail: DescribeVolumes returned generic error", volumeName: "vol-test-1234", volumeCapacity: util.GiBToBytes(1), - expErr: fmt.Errorf("DescribeVolumes generic error"), + expErr: errors.New("DescribeVolumes generic error"), }, } @@ -1976,7 +1999,7 @@ func TestGetDiskByID(t *testing.T) { { name: "fail: DescribeVolumes returned generic error", volumeID: "vol-test-1234", - expErr: fmt.Errorf("DescribeVolumes generic error"), + expErr: errors.New("DescribeVolumes generic error"), }, } @@ -2186,14 +2209,14 @@ func TestEnableFastSnapshotRestores(t *testing.T) { }, }}, }, - expErr: fmt.Errorf("failed to create fast snapshot restores for snapshot"), + expErr: errors.New("failed to create fast snapshot restores for snapshot"), }, { name: "fail: error", snapshotID: "", availabilityZones: nil, expOutput: nil, - expErr: fmt.Errorf("EnableFastSnapshotRestores error"), + expErr: errors.New("EnableFastSnapshotRestores error"), }, } @@ -2256,7 +2279,7 @@ func TestAvailabilityZones(t *testing.T) { name: "fail: error", availabilityZone: "", expOutput: nil, - expErr: fmt.Errorf("TestAvailabilityZones error"), + expErr: errors.New("TestAvailabilityZones error"), }, } @@ -2302,7 +2325,7 @@ func TestDeleteSnapshot(t *testing.T) { { name: "fail: delete snapshot return generic error", snapshotName: "snap-test-name", - expErr: fmt.Errorf("DeleteSnapshot generic error"), + expErr: errors.New("DeleteSnapshot generic error"), }, { name: "fail: delete snapshot return not found error", @@ -2483,9 +2506,9 @@ func TestResizeOrModifyDisk(t *testing.T) { name: "fail: volume doesn't exist", volumeID: "vol-test", existingVolume: &types.Volume{}, - existingVolumeError: fmt.Errorf("DescribeVolumes generic error"), + existingVolumeError: errors.New("DescribeVolumes generic error"), reqSizeGiB: 2, - expErr: fmt.Errorf("ResizeDisk generic error"), + expErr: errors.New("ResizeDisk generic error"), }, { name: "failure: volume in modifying state", @@ -2505,7 +2528,7 @@ func TestResizeOrModifyDisk(t *testing.T) { }, }, reqSizeGiB: 2, - expErr: fmt.Errorf("ResizeDisk generic error"), + expErr: errors.New("ResizeDisk generic error"), }, { name: "failure: ModifyVolume returned generic error", @@ -2519,8 +2542,8 @@ func TestResizeOrModifyDisk(t *testing.T) { AvailabilityZone: aws.String(defaultZone), VolumeType: types.VolumeTypeGp2, }, - modifiedVolumeError: fmt.Errorf("GenericErr"), - expErr: fmt.Errorf("GenericErr"), + modifiedVolumeError: errors.New("GenericErr"), + expErr: errors.New("GenericErr"), }, { name: "failure: returned ErrInvalidArgument when ModifyVolume returned InvalidParameterCombination", @@ -2535,11 +2558,11 @@ func TestResizeOrModifyDisk(t *testing.T) { VolumeType: types.VolumeTypeGp2, Size: aws.Int32(1), }, - modifiedVolumeError: fmt.Errorf("InvalidParameterCombination: The parameter iops is not supported for gp2 volumes"), - expErr: fmt.Errorf("InvalidParameterCombination: The parameter iops is not supported for gp2 volumes"), + modifiedVolumeError: errors.New("InvalidParameterCombination: The parameter iops is not supported for gp2 volumes"), + expErr: errors.New("InvalidParameterCombination: The parameter iops is not supported for gp2 volumes"), }, { - name: "failure: returned returned ErrInvalidArgument when ModifyVolume returned UnknownVolumeType", + name: "failure: returned ErrInvalidArgument when ModifyVolume returned UnknownVolumeType", volumeID: "vol-test", modifyDiskOptions: &ModifyDiskOptions{ VolumeType: "GPFake", @@ -2550,8 +2573,8 @@ func TestResizeOrModifyDisk(t *testing.T) { VolumeType: types.VolumeTypeGp2, Size: aws.Int32(1), }, - modifiedVolumeError: fmt.Errorf("UnknownVolumeType: Unknown volume type: GPFake"), - expErr: fmt.Errorf("UnknownVolumeType: Unknown volume type: GPFake"), + modifiedVolumeError: errors.New("UnknownVolumeType: Unknown volume type: GPFake"), + expErr: errors.New("UnknownVolumeType: Unknown volume type: GPFake"), }, { name: "failure: returned ErrInvalidArgument when ModifyVolume returned InvalidParameterValue", @@ -2566,8 +2589,8 @@ func TestResizeOrModifyDisk(t *testing.T) { VolumeType: types.VolumeTypeGp2, Size: aws.Int32(1), }, - modifiedVolumeError: fmt.Errorf("InvalidParameterValue: iops value 9999999 is not valid"), - expErr: fmt.Errorf("InvalidParameterValue: iops value 9999999 is not valid"), + modifiedVolumeError: errors.New("InvalidParameterValue: iops value 9999999 is not valid"), + expErr: errors.New("InvalidParameterValue: iops value 9999999 is not valid"), }, { name: "success: does not call ModifyVolume when no modification required", @@ -2653,24 +2676,14 @@ func TestResizeOrModifyDisk(t *testing.T) { } newSize, err := c.ResizeOrModifyDisk(ctx, tc.volumeID, util.GiBToBytes(tc.reqSizeGiB), tc.modifyDiskOptions) - if err != nil { - if tc.expErr == nil { - t.Fatalf("ResizeOrModifyDisk() failed: expected no error, got: %v", err) - } else { - if errors.Is(tc.expErr, ErrInvalidArgument) { - if !errors.Is(err, ErrInvalidArgument) { - t.Fatalf("ResizeOrModifyDisk() failed: expected ErrInvalidArgument, got: %v", err) - } - } - } - } else { - if tc.expErr != nil { - t.Fatal("ResizeOrModifyDisk() failed: expected error, got nothing") - } else { - if tc.reqSizeGiB != newSize { - t.Fatalf("ResizeOrModifyDisk() failed: expected capacity %d, got %d", tc.reqSizeGiB, newSize) - } - } + switch { + case errors.Is(tc.expErr, ErrInvalidArgument): + require.ErrorIs(t, err, ErrInvalidArgument, "ResizeOrModifyDisk() should return ErrInvalidArgument") + case tc.expErr != nil: + require.Error(t, err, "ResizeOrModifyDisk() should return error") + default: + require.NoError(t, err, "ResizeOrModifyDisk() should not return error") + assert.Equal(t, tc.reqSizeGiB, newSize, "ResizeOrModifyDisk() returned unexpected capacity") } mockCtrl.Finish() @@ -2729,7 +2742,7 @@ func TestModifyTags(t *testing.T) { name: "fail: EC2 API generic error TagsToAdd", volumeID: "mod-tag-test-name", negativeCase: true, - expErr: fmt.Errorf("Generic EC2 API error"), + expErr: errors.New("Generic EC2 API error"), modifyTagsOptions: ModifyTagsOptions{ TagsToAdd: validTagsToAddInput, TagsToDelete: emptyTagsToDeleteInput, @@ -2739,7 +2752,7 @@ func TestModifyTags(t *testing.T) { name: "fail: EC2 API generic error TagsToDelete", volumeID: "mod-tag-test-name", negativeCase: true, - expErr: fmt.Errorf("Generic EC2 API error"), + expErr: errors.New("Generic EC2 API error"), modifyTagsOptions: ModifyTagsOptions{ TagsToAdd: emptyTagsToAddInput, TagsToDelete: validTagsToDeleteInput, @@ -2774,15 +2787,11 @@ func TestModifyTags(t *testing.T) { if err != nil { if tc.expErr == nil { t.Fatalf("ModifyTags() failed: expected no error, got: %v", err) - } else { - if !strings.Contains(err.Error(), tc.expErr.Error()) { - t.Fatalf("ModifyTags() failed: expected error %v, got: %v", tc.expErr, err) - } - } - } else { - if tc.expErr != nil { - t.Fatal("ModifyTags() failed: expected error, got nothing") + } else if !strings.Contains(err.Error(), tc.expErr.Error()) { + t.Fatalf("ModifyTags() failed: expected error %v, got: %v", tc.expErr, err) } + } else if tc.expErr != nil { + t.Fatal("ModifyTags() failed: expected error, got nothing") } mockCtrl.Finish() @@ -2965,6 +2974,7 @@ func TestListSnapshots(t *testing.T) { { name: "success: normal", testFunc: func(t *testing.T) { + t.Helper() expSnapshots := []*Snapshot{ { SourceVolumeID: "snap-test-volume1", @@ -3038,6 +3048,7 @@ func TestListSnapshots(t *testing.T) { { name: "success: with volume ID", testFunc: func(t *testing.T) { + t.Helper() sourceVolumeID := "snap-test-volume" expSnapshots := []*Snapshot{ { @@ -3112,10 +3123,11 @@ func TestListSnapshots(t *testing.T) { { name: "success: max results, next token", testFunc: func(t *testing.T) { + t.Helper() maxResults := 5 nextTokenValue := "nextTokenValue" var expSnapshots []*Snapshot - for i := 0; i < maxResults*2; i++ { + for i := range maxResults * 2 { expSnapshots = append(expSnapshots, &Snapshot{ SourceVolumeID: "snap-test-volume1", SnapshotID: fmt.Sprintf("snap-test-name%d", i), @@ -3125,7 +3137,7 @@ func TestListSnapshots(t *testing.T) { } var ec2Snapshots []types.Snapshot - for i := 0; i < maxResults*2; i++ { + for i := range maxResults * 2 { ec2Snapshots = append(ec2Snapshots, types.Snapshot{ SnapshotId: aws.String(expSnapshots[i].SnapshotID), VolumeId: aws.String(fmt.Sprintf("snap-test-volume%d", i)), @@ -3184,6 +3196,7 @@ func TestListSnapshots(t *testing.T) { { name: "fail: AWS DescribeSnapshots error", testFunc: func(t *testing.T) { + t.Helper() mockCtl := gomock.NewController(t) defer mockCtl.Finish() mockEC2 := NewMockEC2API(mockCtl) @@ -3201,6 +3214,7 @@ func TestListSnapshots(t *testing.T) { { name: "fail: no snapshots ErrNotFound", testFunc: func(t *testing.T) { + t.Helper() mockCtl := gomock.NewController(t) defer mockCtl.Finish() mockEC2 := NewMockEC2API(mockCtl) diff --git a/pkg/cloud/devicemanager/allocator.go b/pkg/cloud/devicemanager/allocator.go index 683eee5b03..95567396fb 100644 --- a/pkg/cloud/devicemanager/allocator.go +++ b/pkg/cloud/devicemanager/allocator.go @@ -17,7 +17,7 @@ limitations under the License. package devicemanager import ( - "fmt" + "errors" "sync" ) @@ -46,7 +46,7 @@ var _ NameAllocator = &nameAllocator{} // It does this by using a list of legal EBS device names from device_names.go // // likelyBadNames is a map of names that have previously returned an "in use" error when attempting to mount to them -// These names are unlikely to result in a successful mount, and may be permanently unavailable, so use them last +// These names are unlikely to result in a successful mount, and may be permanently unavailable, so use them last. func (d *nameAllocator) GetNext(existingNames ExistingNames, likelyBadNames *sync.Map) (string, error) { for _, name := range deviceNames { _, existing := existingNames[name] @@ -70,5 +70,5 @@ func (d *nameAllocator) GetNext(existingNames ExistingNames, likelyBadNames *syn return finalResortName, nil } - return "", fmt.Errorf("there are no names available") + return "", errors.New("there are no names available") } diff --git a/pkg/cloud/devicemanager/allocator_test.go b/pkg/cloud/devicemanager/allocator_test.go index a0d68003b5..5b581246e0 100644 --- a/pkg/cloud/devicemanager/allocator_test.go +++ b/pkg/cloud/devicemanager/allocator_test.go @@ -90,7 +90,7 @@ func TestNameAllocatorError(t *testing.T) { allocator := nameAllocator{} existingNames := map[string]string{} - for i := 0; i < len(deviceNames); i++ { + for range deviceNames { name, _ := allocator.GetNext(existingNames, new(sync.Map)) existingNames[name] = "" } diff --git a/pkg/cloud/devicemanager/manager.go b/pkg/cloud/devicemanager/manager.go index 76fa703677..85d4d18ec6 100644 --- a/pkg/cloud/devicemanager/manager.go +++ b/pkg/cloud/devicemanager/manager.go @@ -17,6 +17,7 @@ limitations under the License. package devicemanager import ( + "errors" "fmt" "sync" @@ -43,7 +44,7 @@ func (d *Device) Release(force bool) { } } -// Taint marks the device as no longer reusable +// Taint marks the device as no longer reusable. func (d *Device) Taint() { d.isTainted = true } @@ -108,7 +109,7 @@ func (d *deviceManager) NewDevice(instance *types.Instance, volumeID string, lik defer d.mux.Unlock() if instance == nil { - return nil, fmt.Errorf("instance is nil") + return nil, errors.New("instance is nil") } // Get device names being attached and already attached to this instance @@ -193,7 +194,7 @@ func (d *deviceManager) release(device *Device) error { } // getDeviceNamesInUse returns the device to volume ID mapping -// the mapping includes both already attached and being attached volumes +// the mapping includes both already attached and being attached volumes. func (d *deviceManager) getDeviceNamesInUse(instance *types.Instance) map[string]string { nodeID := aws.ToString(instance.InstanceId) inUse := map[string]string{} @@ -220,7 +221,7 @@ func (d *deviceManager) getPath(inUse map[string]string, volumeID string) string func getInstanceID(instance *types.Instance) (string, error) { if instance == nil { - return "", fmt.Errorf("can't get ID from a nil instance") + return "", errors.New("can't get ID from a nil instance") } return aws.ToString(instance.InstanceId), nil } diff --git a/pkg/cloud/devicemanager/manager_test.go b/pkg/cloud/devicemanager/manager_test.go index 88b16e24d0..859da2f68f 100644 --- a/pkg/cloud/devicemanager/manager_test.go +++ b/pkg/cloud/devicemanager/manager_test.go @@ -239,14 +239,11 @@ func newFakeInstance(instanceID, volumeID, devicePath string) *types.Instance { } func assertDevice(t *testing.T, d *Device, assigned bool, err error) { + t.Helper() if err != nil { t.Fatalf("Expected no error, got %v", err) } - if d == nil { - t.Fatalf("Expected valid device, got nil") - } - if d.IsAlreadyAssigned != assigned { t.Fatalf("Expected IsAlreadyAssigned to be %v, got %v", assigned, d.IsAlreadyAssigned) } diff --git a/pkg/cloud/handlers.go b/pkg/cloud/handlers.go index 2b82c8a8af..2f725943d7 100644 --- a/pkg/cloud/handlers.go +++ b/pkg/cloud/handlers.go @@ -29,7 +29,7 @@ import ( "k8s.io/klog/v2" ) -// RecordRequestsHandler is added to the Complete chain; called after any request +// RecordRequestsHandler is added to the Complete chain; called after any request. func RecordRequestsMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { return stack.Finalize.Add(middleware.FinalizeMiddlewareFunc("RecordRequestsMiddleware", func(ctx context.Context, input middleware.FinalizeInput, next middleware.FinalizeHandler) (output middleware.FinalizeOutput, metadata middleware.Metadata, err error) { @@ -60,7 +60,7 @@ func RecordRequestsMiddleware() func(*middleware.Stack) error { // LogServerErrorsMiddleware is a middleware that logs server errors received when attempting to contact the AWS API // A specialized middleware is used instead of the SDK's built-in retry logging to allow for customizing the verbosity -// of throttle errors vs server/unknown errors, to prevent flooding the logs with throttle error +// of throttle errors vs server/unknown errors, to prevent flooding the logs with throttle error. func LogServerErrorsMiddleware() func(*middleware.Stack) error { return func(stack *middleware.Stack) error { return stack.Finalize.Add(middleware.FinalizeMiddlewareFunc("LogServerErrorsMiddleware", func(ctx context.Context, input middleware.FinalizeInput, next middleware.FinalizeHandler) (output middleware.FinalizeOutput, metadata middleware.Metadata, err error) { diff --git a/pkg/cloud/metadata/ec2.go b/pkg/cloud/metadata/ec2.go index a20afd6342..b0c39a6b48 100644 --- a/pkg/cloud/metadata/ec2.go +++ b/pkg/cloud/metadata/ec2.go @@ -16,6 +16,7 @@ package metadata import ( "context" + "errors" "fmt" "io" "strings" @@ -28,13 +29,13 @@ import ( ) const ( - // OutpostArnEndpoint is the ec2 instance metadata endpoint to query to get the outpost arn + // OutpostArnEndpoint is the ec2 instance metadata endpoint to query to get the outpost arn. OutpostArnEndpoint string = "outpost-arn" - // enisEndpoint is the ec2 instance metadata endpoint to query the number of attached ENIs + // enisEndpoint is the ec2 instance metadata endpoint to query the number of attached ENIs. EnisEndpoint string = "network/interfaces/macs" - // blockDevicesEndpoint is the ec2 instance metadata endpoint to query the number of attached block devices + // blockDevicesEndpoint is the ec2 instance metadata endpoint to query the number of attached block devices. BlockDevicesEndpoint string = "block-device-mapping" ) @@ -57,18 +58,18 @@ func EC2MetadataInstanceInfo(svc EC2Metadata, regionFromSession string) (*Metada doc := docOutput.InstanceIdentityDocument if len(doc.InstanceID) == 0 { - return nil, fmt.Errorf("could not get valid EC2 instance ID") + return nil, errors.New("could not get valid EC2 instance ID") } if len(doc.InstanceType) == 0 { - return nil, fmt.Errorf("could not get valid EC2 instance type") + return nil, errors.New("could not get valid EC2 instance type") } if len(doc.Region) == 0 { if len(regionFromSession) != 0 && util.IsSBE(regionFromSession) { doc.Region = regionFromSession } else { - return nil, fmt.Errorf("could not get valid EC2 region") + return nil, errors.New("could not get valid EC2 region") } } @@ -76,7 +77,7 @@ func EC2MetadataInstanceInfo(svc EC2Metadata, regionFromSession string) (*Metada if len(regionFromSession) != 0 && util.IsSBE(regionFromSession) { doc.AvailabilityZone = regionFromSession } else { - return nil, fmt.Errorf("could not get valid EC2 availability zone") + return nil, errors.New("could not get valid EC2 availability zone") } } diff --git a/pkg/cloud/metadata/k8s.go b/pkg/cloud/metadata/k8s.go index c57a220417..22c96f737c 100644 --- a/pkg/cloud/metadata/k8s.go +++ b/pkg/cloud/metadata/k8s.go @@ -55,7 +55,7 @@ func DefaultKubernetesAPIClient(kubeconfig string) KubernetesAPIClient { // it provides the absolute host path to the container volume. sandboxMountPoint := os.Getenv("CONTAINER_SANDBOX_MOUNT_POINT") if sandboxMountPoint == "" { - return nil, fmt.Errorf("CONTAINER_SANDBOX_MOUNT_POINT environment variable is not set") + return nil, errors.New("CONTAINER_SANDBOX_MOUNT_POINT environment variable is not set") } tokenFile := filepath.Join(sandboxMountPoint, "var", "run", "secrets", "kubernetes.io", "serviceaccount", "token") @@ -98,7 +98,7 @@ func DefaultKubernetesAPIClient(kubeconfig string) KubernetesAPIClient { func KubernetesAPIInstanceInfo(clientset kubernetes.Interface) (*Metadata, error) { nodeName := os.Getenv("CSI_NODE_NAME") if nodeName == "" { - return nil, fmt.Errorf("CSI_NODE_NAME env var not set") + return nil, errors.New("CSI_NODE_NAME env var not set") } // get node with k8s API @@ -109,7 +109,7 @@ func KubernetesAPIInstanceInfo(clientset kubernetes.Interface) (*Metadata, error providerID := node.Spec.ProviderID if providerID == "" { - return nil, fmt.Errorf("node providerID empty, cannot parse") + return nil, errors.New("node providerID empty, cannot parse") } awsInstanceIDRegex := "s\\.i-[a-z0-9]+|i-[a-z0-9]+$" @@ -117,28 +117,28 @@ func KubernetesAPIInstanceInfo(clientset kubernetes.Interface) (*Metadata, error re := regexp.MustCompile(awsInstanceIDRegex) instanceID := re.FindString(providerID) if instanceID == "" { - return nil, fmt.Errorf("did not find aws instance ID in node providerID string") + return nil, errors.New("did not find aws instance ID in node providerID string") } var instanceType string if val, ok := node.GetLabels()[corev1.LabelInstanceTypeStable]; ok { instanceType = val } else { - return nil, fmt.Errorf("could not retrieve instance type from topology label") + return nil, errors.New("could not retrieve instance type from topology label") } var region string if val, ok := node.GetLabels()[corev1.LabelTopologyRegion]; ok { region = val } else { - return nil, fmt.Errorf("could not retrieve region from topology label") + return nil, errors.New("could not retrieve region from topology label") } var availabilityZone string if val, ok := node.GetLabels()[corev1.LabelTopologyZone]; ok { availabilityZone = val } else { - return nil, fmt.Errorf("could not retrieve AZ from topology label") + return nil, errors.New("could not retrieve AZ from topology label") } instanceInfo := Metadata{ diff --git a/pkg/cloud/metadata/metadata.go b/pkg/cloud/metadata/metadata.go index 1497a9944b..ee644d67c0 100644 --- a/pkg/cloud/metadata/metadata.go +++ b/pkg/cloud/metadata/metadata.go @@ -17,14 +17,14 @@ limitations under the License. package metadata import ( - "fmt" + "errors" "os" "github.com/aws/aws-sdk-go-v2/aws/arn" "k8s.io/klog/v2" ) -// Metadata is info about the ec2 instance on which the driver is running +// Metadata is info about the ec2 instance on which the driver is running. type Metadata struct { InstanceID string InstanceType string @@ -57,7 +57,7 @@ func NewMetadataService(cfg MetadataServiceConfig, region string) (MetadataServi } klog.ErrorS(err, "Retrieving Kubernetes metadata failed") - return nil, fmt.Errorf("IMDS metadata and Kubernetes metadata are both unavailable") + return nil, errors.New("IMDS metadata and Kubernetes metadata are both unavailable") } func retrieveEC2Metadata(ec2MetadataClient EC2MetadataClient, region string) (*Metadata, error) { @@ -83,7 +83,7 @@ func retrieveK8sMetadata(k8sAPIClient KubernetesAPIClient) (*Metadata, error) { return KubernetesAPIInstanceInfo(clientset) } -// Override the region on a Metadata object if it is non-empty +// Override the region on a Metadata object if it is non-empty. func (m *Metadata) overrideRegion(region string) *Metadata { if region != "" { m.Region = region diff --git a/pkg/cloud/metadata/metadata_test.go b/pkg/cloud/metadata/metadata_test.go index d5a4914ea6..23b055498a 100644 --- a/pkg/cloud/metadata/metadata_test.go +++ b/pkg/cloud/metadata/metadata_test.go @@ -19,7 +19,6 @@ package metadata import ( "errors" "io" - "os" "strings" "testing" @@ -103,7 +102,7 @@ func TestNewMetadataService(t *testing.T) { return fake.NewSimpleClientset(node), nil } - os.Setenv("CSI_NODE_NAME", "test-node") + t.Setenv("CSI_NODE_NAME", "test-node") if tc.ec2MetadataError == nil { mockEC2Metadata.EXPECT().GetInstanceIdentityDocument(gomock.Any(), &imds.GetInstanceIdentityDocumentInput{}).Return(&imds.GetInstanceIdentityDocumentOutput{ @@ -525,8 +524,7 @@ func TestKubernetesAPIInstanceInfo(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - os.Setenv("CSI_NODE_NAME", tc.nodeName) - defer os.Unsetenv("CSI_NODE_NAME") + t.Setenv("CSI_NODE_NAME", tc.nodeName) clientset := fake.NewSimpleClientset() if tc.node != nil { diff --git a/pkg/cloud/volume_limits.go b/pkg/cloud/volume_limits.go index 894ea840e0..7b91db018d 100644 --- a/pkg/cloud/volume_limits.go +++ b/pkg/cloud/volume_limits.go @@ -28,6 +28,7 @@ const ( nitroMaxAttachments = 28 ) +//nolint:gochecknoinits // TODO Refactor to avoid using init function to prevent side-effects func init() { // This list of Nitro instance types have a dedicated Amazon EBS volume limit of up to 128 attachments, depending on instance size. // The limit is not shared with other device attachments: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/volume_limits.html#nitro-system-limits @@ -159,7 +160,7 @@ func GetReservedSlotsForInstanceType(it string) int { // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-store-volumes.html // IMDS does not provide NVMe instance store data; we'll just list all instances here -// g5.48xlarge is not added to this table as it is in the maxVolumeLimits +// g5.48xlarge is not added to this table as it is in the maxVolumeLimits. var nvmeInstanceStoreVolumes = map[string]int{ "c1.medium": 1, "c1.xlarge": 4, @@ -512,7 +513,7 @@ var nvmeInstanceStoreVolumes = map[string]int{ // https://aws.amazon.com/ec2/instance-types // Despite the dl1.24xlarge having Gaudi Accelerators describe instance types considers them GPUs as such that instance type is in this table -// g5.48xlarge is not added to this table as it is in the maxVolumeLimits +// g5.48xlarge is not added to this table as it is in the maxVolumeLimits. var gpuInstanceGpus = map[string]int{ "dl1.24xlarge": 8, "g3.16xlarge": 4, diff --git a/pkg/coalescer/coalescer.go b/pkg/coalescer/coalescer.go index db4e23f5c3..9fbc85afca 100644 --- a/pkg/coalescer/coalescer.go +++ b/pkg/coalescer/coalescer.go @@ -30,7 +30,7 @@ import ( // // When the delay on the request expires (determined by the time the first request comes in), the merged // input is passed to the execution function, and the result to all waiting callers (those that were -// not rejected during the merge step) +// not rejected during the merge step). type Coalescer[InputType any, ResultType any] interface { // Coalesce is a function to coalesce a given input // key = only requests with this same key will be coalesced (such as volume ID) @@ -45,7 +45,7 @@ type Coalescer[InputType any, ResultType any] interface { // mergeFunction = a function to merge a new input with the existing inputs // (should return an error if the new input cannot be combined with the existing inputs, // otherwise return the new merged input) -// executeFunction = the function to call when the delay expires +// executeFunction = the function to call when the delay expires. func New[InputType any, ResultType any](delay time.Duration, mergeFunction func(input InputType, existing InputType) (InputType, error), executeFunction func(key string, input InputType) (ResultType, error), @@ -63,21 +63,21 @@ func New[InputType any, ResultType any](delay time.Duration, return &c } -// Type to store a result or error in channels +// Type to store a result or error in channels. type result[ResultType any] struct { result ResultType err error } // Type to send inputs from Coalesce() to coalescerThread() via channel -// Includes a return channel for the result +// Includes a return channel for the result. type newInput[InputType any, ResultType any] struct { key string input InputType resultChannel chan result[ResultType] } -// Type to store pending inputs in the input map +// Type to store pending inputs in the input map. type pendingInput[InputType any, ResultType any] struct { input InputType resultChannels []chan result[ResultType] diff --git a/pkg/coalescer/coalescer_test.go b/pkg/coalescer/coalescer_test.go index be40167034..ca705fe906 100644 --- a/pkg/coalescer/coalescer_test.go +++ b/pkg/coalescer/coalescer_test.go @@ -18,39 +18,37 @@ package coalescer import ( "errors" - "fmt" "testing" "time" ) var ( - errFailedToMerge = fmt.Errorf("Failed to merge") - errFailedToExecute = fmt.Errorf("Failed to execute") + errFailedToMerge = errors.New("failed to merge") + errFailedToExecute = errors.New("failed to execute") ) // Merge function used to test the coalescer // For testing purposes, positive numbers are added to the existing input, -// and negative numbers return an error ("fail to merge") +// and negative numbers return an error ("fail to merge"). func mockMerge(input int, existing int) (int, error) { if input < 0 { return 0, errFailedToMerge - } else { - return input + existing, nil } + return input + existing, nil } // Execute function used to test the coalescer // For testing purposes, small numbers (numbers less than 100) successfully execute, -// and large numbers (numbers 100 or greater) fail to execute +// and large numbers (numbers 100 or greater) fail to execute. func mockExecute(_ string, input int) (string, error) { if input < 100 { return "success", nil - } else { - return "failure", errFailedToExecute } + return "failure", errFailedToExecute } func TestCoalescer(t *testing.T) { + t.Parallel() testCases := []struct { name string inputs []int @@ -95,11 +93,12 @@ func TestCoalescer(t *testing.T) { for range tc.inputs { err := <-testChannel if err != nil { - if errors.Is(err, errFailedToMerge) { + switch { + case errors.Is(err, errFailedToMerge): mergeFailure = true - } else if errors.Is(err, errFailedToExecute) { + case errors.Is(err, errFailedToExecute): executeFailure = true - } else { + default: t.Fatalf("Unexpected error %v", err) } } diff --git a/pkg/driver/constants.go b/pkg/driver/constants.go index 17d6ab69c8..4f5aa7d6f5 100644 --- a/pkg/driver/constants.go +++ b/pkg/driver/constants.go @@ -18,41 +18,41 @@ package driver import "time" -// constants of keys in PublishContext +// constants of keys in PublishContext. const ( // devicePathKey represents key for device path in PublishContext - // devicePath is the device path where the volume is attached to + // devicePath is the device path where the volume is attached to. DevicePathKey = "devicePath" ) -// constants of keys in VolumeContext +// constants of keys in VolumeContext. const ( // VolumeAttributePartition represents key for partition config in VolumeContext - // this represents the partition number on a device used to mount + // this represents the partition number on a device used to mount. VolumeAttributePartition = "partition" ) -// constants of keys in volume parameters +// constants of keys in volume parameters. const ( - // VolumeTypeKey represents key for volume type + // VolumeTypeKey represents key for volume type. VolumeTypeKey = "type" - // IopsPerGBKey represents key for IOPS per GB + // IopsPerGBKey represents key for IOPS per GB. IopsPerGBKey = "iopspergb" - // AllowAutoIOPSPerGBIncreaseKey represents key for allowing automatic increase of IOPS + // AllowAutoIOPSPerGBIncreaseKey represents key for allowing automatic increase of IOPS. AllowAutoIOPSPerGBIncreaseKey = "allowautoiopspergbincrease" - // Iops represents key for IOPS for volume + // Iops represents key for IOPS for volume. IopsKey = "iops" - // ThroughputKey represents key for throughput + // ThroughputKey represents key for throughput. ThroughputKey = "throughput" - // EncryptedKey represents key for whether filesystem is encrypted + // EncryptedKey represents key for whether filesystem is encrypted. EncryptedKey = "encrypted" - // KmsKeyId represents key for KMS encryption key + // KmsKeyId represents key for KMS encryption key. KmsKeyIDKey = "kmskeyid" // PVCNameKey contains name of the PVC for which is a volume provisioned. @@ -62,58 +62,58 @@ const ( PVCNamespaceKey = "csi.storage.k8s.io/pvc/namespace" // PVNameKey contains name of the final PV that will be used for the dynamically - // provisioned volume + // provisioned volume. PVNameKey = "csi.storage.k8s.io/pv/name" - // VolumeSnapshotNameKey contains name of the snapshot + // VolumeSnapshotNameKey contains name of the snapshot. VolumeSnapshotNameKey = "csi.storage.k8s.io/volumesnapshot/name" - // VolumeSnapshotNamespaceKey contains namespace of the snapshot + // VolumeSnapshotNamespaceKey contains namespace of the snapshot. VolumeSnapshotNamespaceKey = "csi.storage.k8s.io/volumesnapshot/namespace" // VolumeSnapshotCotentNameKey contains name of the VolumeSnapshotContent that is the source - // for the snapshot + // for the snapshot. VolumeSnapshotContentNameKey = "csi.storage.k8s.io/volumesnapshotcontent/name" - // BlockExpressKey increases the iops limit for io2 volumes to the block express limit + // BlockExpressKey increases the iops limit for io2 volumes to the block express limit. BlockExpressKey = "blockexpress" // FSTypeKey configures the file system type that will be formatted during volume creation. FSTypeKey = "csi.storage.k8s.io/fstype" - // BlockSizeKey configures the block size when formatting a volume + // BlockSizeKey configures the block size when formatting a volume. BlockSizeKey = "blocksize" - // InodeSizeKey configures the inode size when formatting a volume + // InodeSizeKey configures the inode size when formatting a volume. InodeSizeKey = "inodesize" - // BytesPerInodeKey configures the `bytes-per-inode` when formatting a volume + // BytesPerInodeKey configures the `bytes-per-inode` when formatting a volume. BytesPerInodeKey = "bytesperinode" - // NumberOfInodesKey configures the `number-of-inodes` when formatting a volume + // NumberOfInodesKey configures the `number-of-inodes` when formatting a volume. NumberOfInodesKey = "numberofinodes" - // Ext4ClusterSizeKey enables the bigalloc option when formatting an ext4 volume + // Ext4ClusterSizeKey enables the bigalloc option when formatting an ext4 volume. Ext4BigAllocKey = "ext4bigalloc" - // Ext4ClusterSizeKey configures the cluster size when formatting an ext4 volume with the bigalloc option enabled + // Ext4ClusterSizeKey configures the cluster size when formatting an ext4 volume with the bigalloc option enabled. Ext4ClusterSizeKey = "ext4clustersize" // TagKeyPrefix contains the prefix of a volume parameter that designates it as - // a tag to be attached to the resource + // a tag to be attached to the resource. TagKeyPrefix = "tagSpecification" - // OutpostArn represents key for outpost's arn + // OutpostArn represents key for outpost's arn. OutpostArnKey = "outpostarn" ) -// constants of keys in snapshot parameters +// constants of keys in snapshot parameters. const ( - // FastSnapShotRestoreAvailabilityZones represents key for fast snapshot restore availability zones + // FastSnapShotRestoreAvailabilityZones represents key for fast snapshot restore availability zones. FastSnapshotRestoreAvailabilityZones = "fastsnapshotrestoreavailabilityzones" ) -// constants for volume tags and their values +// constants for volume tags and their values. const ( // ResourceLifecycleTagPrefix is prefix of tag for provisioned EBS volume that // marks them as owned by the cluster. Used only when --cluster-id is set. @@ -153,29 +153,29 @@ const ( PVNameTag = "kubernetes.io/created-for/pv/name" ) -// constants for default command line flag values +// constants for default command line flag values. const ( DefaultCSIEndpoint = "unix://tmp/csi.sock" DefaultModifyVolumeRequestHandlerTimeout = 2 * time.Second ) -// constants for fstypes +// constants for fstypes. const ( - // FSTypeExt2 represents the ext2 filesystem type + // FSTypeExt2 represents the ext2 filesystem type. FSTypeExt2 = "ext2" - // FSTypeExt3 represents the ext3 filesystem type + // FSTypeExt3 represents the ext3 filesystem type. FSTypeExt3 = "ext3" - // FSTypeExt4 represents the ext4 filesystem type + // FSTypeExt4 represents the ext4 filesystem type. FSTypeExt4 = "ext4" - // FSTypeXfs represents the xfs filesystem type + // FSTypeXfs represents the xfs filesystem type. FSTypeXfs = "xfs" - // FSTypeNtfs represents the ntfs filesystem type + // FSTypeNtfs represents the ntfs filesystem type. FSTypeNtfs = "ntfs" ) -// constants for node k8s API use +// constants for node k8s API use. const ( - // AgentNotReadyNodeTaintKey contains the key of taints to be removed on driver startup + // AgentNotReadyNodeTaintKey contains the key of taints to be removed on driver startup. AgentNotReadyNodeTaintKey = "ebs.csi.aws.com/agent-not-ready" ) diff --git a/pkg/driver/controller.go b/pkg/driver/controller.go index 5af42fba46..352cc278c2 100644 --- a/pkg/driver/controller.go +++ b/pkg/driver/controller.go @@ -37,14 +37,14 @@ import ( "k8s.io/klog/v2" ) -// Supported access modes +// Supported access modes. const ( SingleNodeWriter = csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER MultiNodeMultiWriter = csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER ) var ( - // controllerCaps represents the capability of controller service + // controllerCaps represents the capability of controller service. controllerCaps = []csi.ControllerServiceCapability_RPC_Type{ csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME, csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME, @@ -55,9 +55,10 @@ var ( } ) -const isManagedByDriver = "true" +const trueStr = "true" +const isManagedByDriver = trueStr -// ControllerService represents the controller service of CSI driver +// ControllerService represents the controller service of CSI driver. type ControllerService struct { cloud cloud.Cloud inFlight *internal.InFlight @@ -67,7 +68,7 @@ type ControllerService struct { csi.UnimplementedControllerServer } -// NewControllerService creates a new controller service +// NewControllerService creates a new controller service. func NewControllerService(c cloud.Cloud, o *Options) *ControllerService { return &ControllerService{ cloud: c, @@ -141,7 +142,7 @@ func (d *ControllerService) CreateVolume(ctx context.Context, req *csi.CreateVol } iopsPerGB = int32(parseIopsPerGBKey) case AllowAutoIOPSPerGBIncreaseKey: - allowIOPSPerGBIncrease = value == "true" + allowIOPSPerGBIncrease = isTrue(value) case IopsKey: parseIopsKey, parseIopsKeyErr := strconv.ParseInt(value, 10, 32) if parseIopsKeyErr != nil { @@ -155,9 +156,7 @@ func (d *ControllerService) CreateVolume(ctx context.Context, req *csi.CreateVol } throughput = int32(parseThroughput) case EncryptedKey: - if value == "true" { - isEncrypted = true - } + isEncrypted = isTrue(value) case KmsKeyIDKey: kmsKeyID = value case PVCNameKey: @@ -170,9 +169,7 @@ func (d *ControllerService) CreateVolume(ctx context.Context, req *csi.CreateVol volumeTags[PVNameTag] = value tProps.PVName = value case BlockExpressKey: - if value == "true" { - blockExpress = true - } + blockExpress = isTrue(value) case BlockSizeKey: if isAlphanumeric := util.StringIsAlphanumeric(value); !isAlphanumeric { return nil, status.Errorf(codes.InvalidArgument, "Could not parse blockSize (%s): %v", value, err) @@ -194,9 +191,7 @@ func (d *ControllerService) CreateVolume(ctx context.Context, req *csi.CreateVol } numberOfInodes = value case Ext4BigAllocKey: - if value == "true" { - ext4BigAlloc = true - } + ext4BigAlloc = isTrue(value) case Ext4ClusterSizeKey: if isAlphanumeric := util.StringIsAlphanumeric(value); !isAlphanumeric { return nil, status.Errorf(codes.InvalidArgument, "Could not parse ext4ClusterSize (%s): %v", value, err) @@ -255,7 +250,7 @@ func (d *ControllerService) CreateVolume(ctx context.Context, req *csi.CreateVol } } if ext4BigAlloc { - responseCtx[Ext4BigAllocKey] = "true" + responseCtx[Ext4BigAllocKey] = trueStr if err = validateFormattingOption(volCap, Ext4BigAllocKey, FileSystemConfigs); err != nil { return nil, err } @@ -489,12 +484,13 @@ func validateControllerUnpublishVolumeRequest(req *csi.ControllerUnpublishVolume func (d *ControllerService) ControllerGetCapabilities(ctx context.Context, req *csi.ControllerGetCapabilitiesRequest) (*csi.ControllerGetCapabilitiesResponse, error) { klog.V(4).InfoS("ControllerGetCapabilities: called", "args", req) - var caps []*csi.ControllerServiceCapability - for _, cap := range controllerCaps { + + caps := make([]*csi.ControllerServiceCapability, 0, len(controllerCaps)) + for _, capability := range controllerCaps { c := &csi.ControllerServiceCapability{ Type: &csi.ControllerServiceCapability_Rpc{ Rpc: &csi.ControllerServiceCapability_RPC{ - Type: cap, + Type: capability, }, }, } @@ -568,8 +564,8 @@ func (d *ControllerService) ControllerExpandVolume(ctx context.Context, req *csi nodeExpansionRequired := true // if this is a raw block device, no expansion should be necessary on the node - cap := req.GetVolumeCapability() - if cap != nil && cap.GetBlock() != nil { + capability := req.GetVolumeCapability() + if capability != nil && capability.GetBlock() != nil { nodeExpansionRequired = false } @@ -639,14 +635,14 @@ func isValidCapability(c *csi.VolumeCapability) bool { } } -func isBlock(cap *csi.VolumeCapability) bool { - _, isBlock := cap.GetAccessType().(*csi.VolumeCapability_Block) - return isBlock +func isBlock(capability *csi.VolumeCapability) bool { + _, isBlk := capability.GetAccessType().(*csi.VolumeCapability_Block) + return isBlk } func isValidVolumeContext(volContext map[string]string) bool { - //There could be multiple volume attributes in the volumeContext map - //Validate here case by case + // There could be multiple volume attributes in the volumeContext map + // Validate here case by case if partition, ok := volContext[VolumeAttributePartition]; ok { partitionInt, err := strconv.ParseInt(partition, 10, 64) if err != nil { @@ -754,9 +750,9 @@ func (d *ControllerService) CreateSnapshot(ctx context.Context, req *csi.CreateS // Check if the availability zone is supported for fast snapshot restore if len(fsrAvailabilityZones) > 0 { - zones, error := d.cloud.AvailabilityZones(ctx) - if error != nil { - klog.ErrorS(error, "failed to get availability zones") + zones, err := d.cloud.AvailabilityZones(ctx) + if err != nil { + klog.ErrorS(err, "failed to get availability zones") } else { klog.V(4).InfoS("Availability Zones", "zone", zones) for _, az := range fsrAvailabilityZones { @@ -975,8 +971,7 @@ func newCreateSnapshotResponse(snapshot *cloud.Snapshot) *csi.CreateSnapshotResp } func newListSnapshotsResponse(cloudResponse *cloud.ListSnapshotsResponse) *csi.ListSnapshotsResponse { - - var entries []*csi.ListSnapshotsResponse_Entry + entries := make([]*csi.ListSnapshotsResponse_Entry, 0, len(cloudResponse.Snapshots)) for _, snapshot := range cloudResponse.Snapshots { snapshotResponseEntry := newListSnapshotsResponseEntry(snapshot) entries = append(entries, snapshotResponseEntry) @@ -1016,20 +1011,19 @@ func getVolSizeBytes(req *csi.CreateVolumeRequest) (int64, error) { return volSizeBytes, nil } -// BuildOutpostArn returns the string representation of the outpost ARN from the given csi.TopologyRequirement.segments +// BuildOutpostArn returns the string representation of the outpost ARN from the given csi.TopologyRequirement.segments. func BuildOutpostArn(segments map[string]string) string { - - if len(segments[AwsPartitionKey]) <= 0 { + if len(segments[AwsPartitionKey]) == 0 { return "" } - if len(segments[AwsRegionKey]) <= 0 { + if len(segments[AwsRegionKey]) == 0 { return "" } - if len(segments[AwsOutpostIDKey]) <= 0 { + if len(segments[AwsOutpostIDKey]) == 0 { return "" } - if len(segments[AwsAccountIDKey]) <= 0 { + if len(segments[AwsAccountIDKey]) == 0 { return "" } @@ -1043,8 +1037,7 @@ func BuildOutpostArn(segments map[string]string) string { func validateFormattingOption(volumeCapabilities []*csi.VolumeCapability, paramName string, fsConfigs map[string]fileSystemConfig) error { for _, volCap := range volumeCapabilities { - switch volCap.GetAccessType().(type) { - case *csi.VolumeCapability_Block: + if isBlock(volCap) { return status.Error(codes.InvalidArgument, fmt.Sprintf("Cannot use %s with block volume", paramName)) } @@ -1061,3 +1054,7 @@ func validateFormattingOption(volumeCapabilities []*csi.VolumeCapability, paramN return nil } + +func isTrue(value string) bool { + return value == trueStr +} diff --git a/pkg/driver/controller_modify_volume.go b/pkg/driver/controller_modify_volume.go index 09b8e1d4ed..10f40b8c7b 100644 --- a/pkg/driver/controller_modify_volume.go +++ b/pkg/driver/controller_modify_volume.go @@ -35,7 +35,7 @@ import ( const ( ModificationKeyVolumeType = "type" - // Retained for backwards compatibility, but not recommended + // Retained for backwards compatibility, but not recommended. DeprecatedModificationKeyVolumeType = "volumeType" ModificationKeyIOPS = "iops" @@ -90,31 +90,31 @@ func newModifyVolumeCoalescer(c cloud.Cloud, o *Options) coalescer.Coalescer[mod func mergeModifyVolumeRequest(input modifyVolumeRequest, existing modifyVolumeRequest) (modifyVolumeRequest, error) { if input.newSize != 0 { if existing.newSize != 0 && input.newSize != existing.newSize { - return existing, fmt.Errorf("Different size was requested by a previous request. Current: %d, Requested: %d", existing.newSize, input.newSize) + return existing, fmt.Errorf("different size was requested by a previous request. Current: %d, Requested: %d", existing.newSize, input.newSize) } existing.newSize = input.newSize } if input.modifyDiskOptions.IOPS != 0 { if existing.modifyDiskOptions.IOPS != 0 && input.modifyDiskOptions.IOPS != existing.modifyDiskOptions.IOPS { - return existing, fmt.Errorf("Different IOPS was requested by a previous request. Current: %d, Requested: %d", existing.modifyDiskOptions.IOPS, input.modifyDiskOptions.IOPS) + return existing, fmt.Errorf("different IOPS was requested by a previous request. Current: %d, Requested: %d", existing.modifyDiskOptions.IOPS, input.modifyDiskOptions.IOPS) } existing.modifyDiskOptions.IOPS = input.modifyDiskOptions.IOPS } if input.modifyDiskOptions.Throughput != 0 { if existing.modifyDiskOptions.Throughput != 0 && input.modifyDiskOptions.Throughput != existing.modifyDiskOptions.Throughput { - return existing, fmt.Errorf("Different throughput was requested by a previous request. Current: %d, Requested: %d", existing.modifyDiskOptions.Throughput, input.modifyDiskOptions.Throughput) + return existing, fmt.Errorf("different throughput was requested by a previous request. Current: %d, Requested: %d", existing.modifyDiskOptions.Throughput, input.modifyDiskOptions.Throughput) } existing.modifyDiskOptions.Throughput = input.modifyDiskOptions.Throughput } if input.modifyDiskOptions.VolumeType != "" { if existing.modifyDiskOptions.VolumeType != "" && input.modifyDiskOptions.VolumeType != existing.modifyDiskOptions.VolumeType { - return existing, fmt.Errorf("Different volume type was requested by a previous request. Current: %s, Requested: %s", existing.modifyDiskOptions.VolumeType, input.modifyDiskOptions.VolumeType) + return existing, fmt.Errorf("different volume type was requested by a previous request. Current: %s, Requested: %s", existing.modifyDiskOptions.VolumeType, input.modifyDiskOptions.VolumeType) } existing.modifyDiskOptions.VolumeType = input.modifyDiskOptions.VolumeType } if len(input.modifyTagsOptions.TagsToAdd) > 0 || len(input.modifyTagsOptions.TagsToDelete) > 0 { if (len(existing.modifyTagsOptions.TagsToAdd) > 0 || len(existing.modifyTagsOptions.TagsToDelete) > 0) && !(reflect.DeepEqual(input.modifyTagsOptions, existing.modifyTagsOptions)) { - return existing, fmt.Errorf("Different tags were requested by a previous request. Current: %v, Requested: %v", existing.modifyTagsOptions, input.modifyTagsOptions) + return existing, fmt.Errorf("different tags were requested by a previous request. Current: %v, Requested: %v", existing.modifyTagsOptions, input.modifyTagsOptions) } existing.modifyTagsOptions = cloud.ModifyTagsOptions{ TagsToAdd: input.modifyTagsOptions.TagsToAdd, @@ -171,13 +171,13 @@ func parseModifyVolumeParameters(params map[string]string) (*modifyVolumeRequest for key, value := range params { switch key { case ModificationKeyIOPS: - iops, err := strconv.Atoi(value) + iops, err := strconv.ParseInt(value, 10, 32) if err != nil { return nil, status.Errorf(codes.InvalidArgument, "Could not parse IOPS: %q", value) } options.modifyDiskOptions.IOPS = int32(iops) case ModificationKeyThroughput: - throughput, err := strconv.Atoi(value) + throughput, err := strconv.ParseInt(value, 10, 32) if err != nil { return nil, status.Errorf(codes.InvalidArgument, "Could not parse throughput: %q", value) } @@ -192,15 +192,16 @@ func parseModifyVolumeParameters(params map[string]string) (*modifyVolumeRequest case ModificationKeyVolumeType: options.modifyDiskOptions.VolumeType = value default: - if strings.HasPrefix(key, ModificationAddTag) { + switch { + case strings.HasPrefix(key, ModificationAddTag): st := strings.SplitN(value, "=", 2) if len(st) < 2 { return nil, status.Errorf(codes.InvalidArgument, "Invalid tag specification: %v", st) } options.modifyTagsOptions.TagsToAdd[st[0]] = st[1] - } else if strings.HasPrefix(key, ModificationDeleteTag) { + case strings.HasPrefix(key, ModificationDeleteTag): options.modifyTagsOptions.TagsToDelete = append(options.modifyTagsOptions.TagsToDelete, value) - } else { + default: return nil, status.Errorf(codes.InvalidArgument, "Invalid mutable parameter key: %s", key) } } diff --git a/pkg/driver/controller_test.go b/pkg/driver/controller_test.go index 63fc630c1a..b027d0a235 100644 --- a/pkg/driver/controller_test.go +++ b/pkg/driver/controller_test.go @@ -45,6 +45,8 @@ const ( expZone = "us-west-2b" expInstanceID = "i-123456789abcdef01" expDevicePath = "/dev/xvda" + + testOutpostARN = "arn:aws:outposts:us-west-2:111111111111:outpost/op-0aaa000a0aaaa00a0" ) func TestCreateVolume(t *testing.T) { @@ -88,7 +90,7 @@ func TestCreateVolume(t *testing.T) { stdVolSize := int64(5 * 1024 * 1024 * 1024) stdCapRange := &csi.CapacityRange{RequiredBytes: stdVolSize} stdParams := map[string]string{} - rawOutpostArn := "arn:aws:outposts:us-west-2:111111111111:outpost/op-0aaa000a0aaaa00a0" + rawOutpostArn := testOutpostARN strippedOutpostArn, _ := arn.Parse(strings.ReplaceAll(rawOutpostArn, "outpost/", "")) testCases := []struct { @@ -98,6 +100,7 @@ func TestCreateVolume(t *testing.T) { { name: "success normal", testFunc: func(t *testing.T) { + t.Helper() req := &csi.CreateVolumeRequest{ Name: "random-vol-name", CapacityRange: stdCapRange, @@ -137,6 +140,7 @@ func TestCreateVolume(t *testing.T) { { name: "success outposts", testFunc: func(t *testing.T) { + t.Helper() outpostArn := strippedOutpostArn req := &csi.CreateVolumeRequest{ Name: "test-vol", @@ -227,6 +231,7 @@ func TestCreateVolume(t *testing.T) { { name: "restore snapshot", testFunc: func(t *testing.T) { + t.Helper() req := &csi.CreateVolumeRequest{ Name: "random-vol-name", CapacityRange: stdCapRange, @@ -283,6 +288,7 @@ func TestCreateVolume(t *testing.T) { { name: "restore snapshot, volume already exists", testFunc: func(t *testing.T) { + t.Helper() req := &csi.CreateVolumeRequest{ Name: "random-vol-name", CapacityRange: stdCapRange, @@ -339,6 +345,7 @@ func TestCreateVolume(t *testing.T) { { name: "restore snapshot, volume already exists with different snapshot ID", testFunc: func(t *testing.T) { + t.Helper() req := &csi.CreateVolumeRequest{ Name: "random-vol-name", CapacityRange: stdCapRange, @@ -374,6 +381,7 @@ func TestCreateVolume(t *testing.T) { { name: "fail no name", testFunc: func(t *testing.T) { + t.Helper() req := &csi.CreateVolumeRequest{ Name: "", CapacityRange: stdCapRange, @@ -410,6 +418,7 @@ func TestCreateVolume(t *testing.T) { { name: "success same name and same capacity", testFunc: func(t *testing.T) { + t.Helper() req := &csi.CreateVolumeRequest{ Name: "test-vol", CapacityRange: stdCapRange, @@ -497,6 +506,7 @@ func TestCreateVolume(t *testing.T) { { name: "fail same name and different capacity", testFunc: func(t *testing.T) { + t.Helper() req := &csi.CreateVolumeRequest{ Name: "test-vol", CapacityRange: stdCapRange, @@ -561,6 +571,7 @@ func TestCreateVolume(t *testing.T) { { name: "success no capacity range", testFunc: func(t *testing.T) { + t.Helper() req := &csi.CreateVolumeRequest{ Name: "test-vol", VolumeCapabilities: stdVolCap, @@ -621,6 +632,7 @@ func TestCreateVolume(t *testing.T) { { name: "success with correct round up", testFunc: func(t *testing.T) { + t.Helper() req := &csi.CreateVolumeRequest{ Name: "vol-test", CapacityRange: &csi.CapacityRange{RequiredBytes: 1073741825}, @@ -675,6 +687,7 @@ func TestCreateVolume(t *testing.T) { { name: "success with volume type gp3", testFunc: func(t *testing.T) { + t.Helper() // iops 5000 requires at least 10GB volSize := int64(20 * 1024 * 1024 * 1024) capRange := &csi.CapacityRange{RequiredBytes: volSize} @@ -721,6 +734,7 @@ func TestCreateVolume(t *testing.T) { { name: "success with volume type io1 using iopsPerGB", testFunc: func(t *testing.T) { + t.Helper() req := &csi.CreateVolumeRequest{ Name: "vol-test", CapacityRange: stdCapRange, @@ -763,6 +777,7 @@ func TestCreateVolume(t *testing.T) { { name: "success with volume type io1 using iops", testFunc: func(t *testing.T) { + t.Helper() req := &csi.CreateVolumeRequest{ Name: "vol-test", CapacityRange: stdCapRange, @@ -805,6 +820,7 @@ func TestCreateVolume(t *testing.T) { { name: "success with volume type io2 using iopsPerGB", testFunc: func(t *testing.T) { + t.Helper() req := &csi.CreateVolumeRequest{ Name: "vol-test", CapacityRange: stdCapRange, @@ -847,6 +863,7 @@ func TestCreateVolume(t *testing.T) { { name: "success with volume type io2 using iops", testFunc: func(t *testing.T) { + t.Helper() req := &csi.CreateVolumeRequest{ Name: "vol-test", CapacityRange: stdCapRange, @@ -889,6 +906,7 @@ func TestCreateVolume(t *testing.T) { { name: "success with volume type sc1", testFunc: func(t *testing.T) { + t.Helper() req := &csi.CreateVolumeRequest{ Name: "vol-test", CapacityRange: stdCapRange, @@ -930,6 +948,7 @@ func TestCreateVolume(t *testing.T) { { name: "success with volume type standard", testFunc: func(t *testing.T) { + t.Helper() req := &csi.CreateVolumeRequest{ Name: "vol-test", CapacityRange: stdCapRange, @@ -971,6 +990,7 @@ func TestCreateVolume(t *testing.T) { { name: "success with volume encryption", testFunc: func(t *testing.T) { + t.Helper() req := &csi.CreateVolumeRequest{ Name: "vol-test", CapacityRange: stdCapRange, @@ -1012,6 +1032,7 @@ func TestCreateVolume(t *testing.T) { { name: "success with volume encryption with KMS key", testFunc: func(t *testing.T) { + t.Helper() req := &csi.CreateVolumeRequest{ Name: "vol-test", CapacityRange: stdCapRange, @@ -1054,6 +1075,7 @@ func TestCreateVolume(t *testing.T) { { name: "success with mutable parameters", testFunc: func(t *testing.T) { + t.Helper() volSize := int64(20 * 1024 * 1024 * 1024) capRange := &csi.CapacityRange{RequiredBytes: volSize} req := &csi.CreateVolumeRequest{ @@ -1103,6 +1125,7 @@ func TestCreateVolume(t *testing.T) { { name: "fail with invalid volume parameter", testFunc: func(t *testing.T) { + t.Helper() req := &csi.CreateVolumeRequest{ Name: "vol-test", CapacityRange: stdCapRange, @@ -1144,6 +1167,7 @@ func TestCreateVolume(t *testing.T) { { name: "fail with invalid iops parameter", testFunc: func(t *testing.T) { + t.Helper() req := &csi.CreateVolumeRequest{ Name: "vol-test", CapacityRange: stdCapRange, @@ -1184,6 +1208,7 @@ func TestCreateVolume(t *testing.T) { { name: "fail with invalid throughput parameter", testFunc: func(t *testing.T) { + t.Helper() req := &csi.CreateVolumeRequest{ Name: "vol-test", CapacityRange: stdCapRange, @@ -1224,6 +1249,7 @@ func TestCreateVolume(t *testing.T) { { name: "success when volume exists and contains VolumeContext and AccessibleTopology", testFunc: func(t *testing.T) { + t.Helper() req := &csi.CreateVolumeRequest{ Name: "test-vol", CapacityRange: stdCapRange, @@ -1321,6 +1347,7 @@ func TestCreateVolume(t *testing.T) { { name: "success with extra tags", testFunc: func(t *testing.T) { + t.Helper() const ( volumeName = "random-vol-name" extraVolumeTagKey = "extra-tag-key" @@ -1379,6 +1406,7 @@ func TestCreateVolume(t *testing.T) { { name: "success with cluster-id", testFunc: func(t *testing.T) { + t.Helper() const ( volumeName = "random-vol-name" clusterID = "test-cluster-id" @@ -1442,6 +1470,7 @@ func TestCreateVolume(t *testing.T) { { name: "success with legacy tags", testFunc: func(t *testing.T) { + t.Helper() const ( volumeName = "random-vol-name" expectedPVCNameTag = "kubernetes.io/created-for/pvc/name" @@ -1506,6 +1535,7 @@ func TestCreateVolume(t *testing.T) { { name: "fail with invalid volume access modes", testFunc: func(t *testing.T) { + t.Helper() req := &csi.CreateVolumeRequest{ Name: "vol-test", CapacityRange: stdCapRange, @@ -1547,6 +1577,7 @@ func TestCreateVolume(t *testing.T) { { name: "fail with in-flight request", testFunc: func(t *testing.T) { + t.Helper() req := &csi.CreateVolumeRequest{ Name: "random-vol-name", CapacityRange: stdCapRange, @@ -1579,6 +1610,7 @@ func TestCreateVolume(t *testing.T) { { name: "Fail with IdempotentParameterMismatch error", testFunc: func(t *testing.T) { + t.Helper() req := &csi.CreateVolumeRequest{ Name: "vol-test", CapacityRange: stdCapRange, @@ -1606,6 +1638,7 @@ func TestCreateVolume(t *testing.T) { { name: "success multi-attach", testFunc: func(t *testing.T) { + t.Helper() req := &csi.CreateVolumeRequest{ Name: "random-vol-name", CapacityRange: stdCapRange, @@ -1645,6 +1678,7 @@ func TestCreateVolume(t *testing.T) { { name: "fail multi-attach - invalid mount capability", testFunc: func(t *testing.T) { + t.Helper() req := &csi.CreateVolumeRequest{ Name: "random-vol-name", CapacityRange: stdCapRange, @@ -1870,6 +1904,7 @@ func TestDeleteVolume(t *testing.T) { { name: "success normal", testFunc: func(t *testing.T) { + t.Helper() req := &csi.DeleteVolumeRequest{ VolumeId: "vol-test", } @@ -1902,6 +1937,7 @@ func TestDeleteVolume(t *testing.T) { { name: "success invalid volume id", testFunc: func(t *testing.T) { + t.Helper() req := &csi.DeleteVolumeRequest{ VolumeId: "invalid-volume-name", } @@ -1934,6 +1970,7 @@ func TestDeleteVolume(t *testing.T) { { name: "fail delete disk", testFunc: func(t *testing.T) { + t.Helper() req := &csi.DeleteVolumeRequest{ VolumeId: "test-vol", } @@ -1943,7 +1980,7 @@ func TestDeleteVolume(t *testing.T) { defer mockCtl.Finish() mockCloud := cloud.NewMockCloud(mockCtl) - mockCloud.EXPECT().DeleteDisk(gomock.Eq(ctx), gomock.Eq(req.GetVolumeId())).Return(false, fmt.Errorf("DeleteDisk could not delete volume")) + mockCloud.EXPECT().DeleteDisk(gomock.Eq(ctx), gomock.Eq(req.GetVolumeId())).Return(false, errors.New("DeleteDisk could not delete volume")) awsDriver := ControllerService{ cloud: mockCloud, inFlight: internal.NewInFlight(), @@ -1970,6 +2007,7 @@ func TestDeleteVolume(t *testing.T) { { name: "fail another request already in-flight", testFunc: func(t *testing.T) { + t.Helper() req := &csi.DeleteVolumeRequest{ VolumeId: "vol-test", } @@ -2085,7 +2123,7 @@ func TestPickAvailabilityZone(t *testing.T) { } func TestGetOutpostArn(t *testing.T) { - expRawOutpostArn := "arn:aws:outposts:us-west-2:111111111111:outpost/op-0aaa000a0aaaa00a0" + expRawOutpostArn := testOutpostARN outpostArn, _ := arn.Parse(strings.ReplaceAll(expRawOutpostArn, "outpost/", "")) testCases := []struct { name string @@ -2162,7 +2200,7 @@ func TestGetOutpostArn(t *testing.T) { } func TestBuildOutpostArn(t *testing.T) { - expRawOutpostArn := "arn:aws:outposts:us-west-2:111111111111:outpost/op-0aaa000a0aaaa00a0" + expRawOutpostArn := testOutpostARN testCases := []struct { name string awsPartition string @@ -2233,6 +2271,7 @@ func TestCreateSnapshot(t *testing.T) { { name: "success normal", testFunc: func(t *testing.T) { + t.Helper() req := &csi.CreateSnapshotRequest{ Name: "test-snapshot", Parameters: nil, @@ -2274,6 +2313,7 @@ func TestCreateSnapshot(t *testing.T) { { name: "success outpost", testFunc: func(t *testing.T) { + t.Helper() req := &csi.CreateSnapshotRequest{ Name: "test-snapshot", Parameters: map[string]string{ @@ -2317,6 +2357,7 @@ func TestCreateSnapshot(t *testing.T) { { name: "success with cluster-id", testFunc: func(t *testing.T) { + t.Helper() const ( snapshotName = "test-snapshot" clusterID = "test-cluster-id" @@ -2376,6 +2417,7 @@ func TestCreateSnapshot(t *testing.T) { { name: "success with extra tags", testFunc: func(t *testing.T) { + t.Helper() const ( snapshotName = "test-snapshot" extraVolumeTagKey = "extra-tag-key" @@ -2433,6 +2475,7 @@ func TestCreateSnapshot(t *testing.T) { { name: "fail no name", testFunc: func(t *testing.T) { + t.Helper() req := &csi.CreateSnapshotRequest{ Parameters: nil, SourceVolumeId: "vol-test", @@ -2464,6 +2507,7 @@ func TestCreateSnapshot(t *testing.T) { { name: "fail outpost arn not valid", testFunc: func(t *testing.T) { + t.Helper() req := &csi.CreateSnapshotRequest{ Name: "test-snapshot", Parameters: map[string]string{ @@ -2487,12 +2531,12 @@ func TestCreateSnapshot(t *testing.T) { } _, err := awsDriver.CreateSnapshot(context.Background(), req) checkExpectedErrorCode(t, err, codes.InvalidArgument) - }, }, { name: "fail same name different volume ID", testFunc: func(t *testing.T) { + t.Helper() req := &csi.CreateSnapshotRequest{ Name: "test-snapshot", Parameters: nil, @@ -2560,6 +2604,7 @@ func TestCreateSnapshot(t *testing.T) { { name: "success same name same volume ID", testFunc: func(t *testing.T) { + t.Helper() req := &csi.CreateSnapshotRequest{ Name: "test-snapshot", Parameters: nil, @@ -2612,6 +2657,7 @@ func TestCreateSnapshot(t *testing.T) { { name: "fail with another request in-flight", testFunc: func(t *testing.T) { + t.Helper() req := &csi.CreateSnapshotRequest{ Name: "test-snapshot", Parameters: nil, @@ -2640,6 +2686,7 @@ func TestCreateSnapshot(t *testing.T) { { name: "success with VolumeSnapshotClass tags", testFunc: func(t *testing.T) { + t.Helper() const ( snapshotName = "test-snapshot" extraTagKey = "test-key" @@ -2697,10 +2744,11 @@ func TestCreateSnapshot(t *testing.T) { { name: "success with VolumeSnapshotClass with Name tag and cluster id", testFunc: func(t *testing.T) { + t.Helper() const ( snapshotName = "test-snapshot" nameTagValue = "test-name-tag-value" - clusterId = "test-cluster-id" + clusterID = "test-cluster-id" ) req := &csi.CreateSnapshotRequest{ @@ -2729,7 +2777,7 @@ func TestCreateSnapshot(t *testing.T) { cloud.SnapshotNameTagKey: snapshotName, cloud.AwsEbsDriverTagKey: isManagedByDriver, NameTag: nameTagValue, - ResourceLifecycleTagPrefix + clusterId: ResourceLifecycleOwned, + ResourceLifecycleTagPrefix + clusterID: ResourceLifecycleOwned, }, } @@ -2740,7 +2788,7 @@ func TestCreateSnapshot(t *testing.T) { awsDriver := ControllerService{ cloud: mockCloud, inFlight: internal.NewInFlight(), - options: &Options{KubernetesClusterID: clusterId}, + options: &Options{KubernetesClusterID: clusterID}, } resp, err := awsDriver.CreateSnapshot(context.Background(), req) if err != nil { @@ -2755,6 +2803,7 @@ func TestCreateSnapshot(t *testing.T) { { name: "success with EnableFastSnapshotRestore - normal", testFunc: func(t *testing.T) { + t.Helper() const ( snapshotName = "test-snapshot" ) @@ -2820,6 +2869,7 @@ func TestCreateSnapshot(t *testing.T) { { name: "success with EnableFastSnapshotRestore - failed to get availability zones", testFunc: func(t *testing.T) { + t.Helper() const ( snapshotName = "test-snapshot" ) @@ -2861,7 +2911,7 @@ func TestCreateSnapshot(t *testing.T) { mockCloud := cloud.NewMockCloud(mockCtl) mockCloud.EXPECT().GetSnapshotByName(gomock.Eq(ctx), gomock.Eq(req.GetName())).Return(nil, cloud.ErrNotFound).AnyTimes() - mockCloud.EXPECT().AvailabilityZones(gomock.Eq(ctx)).Return(nil, fmt.Errorf("error describing availability zones")).AnyTimes() + mockCloud.EXPECT().AvailabilityZones(gomock.Eq(ctx)).Return(nil, errors.New("error describing availability zones")).AnyTimes() mockCloud.EXPECT().CreateSnapshot(gomock.Eq(ctx), gomock.Eq(req.GetSourceVolumeId()), gomock.Eq(snapshotOptions)).Return(mockSnapshot, nil).AnyTimes() mockCloud.EXPECT().EnableFastSnapshotRestores(gomock.Eq(ctx), gomock.Eq([]string{"us-east-1a", "us-east-1f"}), gomock.Eq(mockSnapshot.SnapshotID)).Return(expOutput, nil).AnyTimes() @@ -2884,6 +2934,7 @@ func TestCreateSnapshot(t *testing.T) { { name: "fail with EnableFastSnapshotRestore - call to enable FSR failed", testFunc: func(t *testing.T) { + t.Helper() const ( snapshotName = "test-snapshot" ) @@ -2928,10 +2979,10 @@ func TestCreateSnapshot(t *testing.T) { mockCloud := cloud.NewMockCloud(mockCtl) mockCloud.EXPECT().GetSnapshotByName(gomock.Eq(ctx), gomock.Eq(req.GetName())).Return(nil, cloud.ErrNotFound).AnyTimes() - mockCloud.EXPECT().AvailabilityZones(gomock.Eq(ctx)).Return(nil, fmt.Errorf("error describing availability zones")).AnyTimes() + mockCloud.EXPECT().AvailabilityZones(gomock.Eq(ctx)).Return(nil, errors.New("error describing availability zones")).AnyTimes() mockCloud.EXPECT().CreateSnapshot(gomock.Eq(ctx), gomock.Eq(req.GetSourceVolumeId()), gomock.Eq(snapshotOptions)).Return(mockSnapshot, nil).AnyTimes() mockCloud.EXPECT().EnableFastSnapshotRestores(gomock.Eq(ctx), gomock.Eq([]string{"us-west-1a", "us-east-1f"}), gomock.Eq(mockSnapshot.SnapshotID)). - Return(expOutput, fmt.Errorf("Failed to create Fast Snapshot Restores")).AnyTimes() + Return(expOutput, errors.New("Failed to create Fast Snapshot Restores")).AnyTimes() mockCloud.EXPECT().DeleteSnapshot(gomock.Eq(ctx), gomock.Eq(mockSnapshot.SnapshotID)).Return(true, nil).AnyTimes() awsDriver := ControllerService{ @@ -2949,6 +3000,7 @@ func TestCreateSnapshot(t *testing.T) { { name: "fail with EnableFastSnapshotRestore - invalid availability zones", testFunc: func(t *testing.T) { + t.Helper() const ( snapshotName = "test-snapshot" ) @@ -2985,6 +3037,7 @@ func TestCreateSnapshot(t *testing.T) { { name: "fail with EnableFastSnapshotRestore", testFunc: func(t *testing.T) { + t.Helper() const ( snapshotName = "test-snapshot" ) @@ -3020,7 +3073,7 @@ func TestCreateSnapshot(t *testing.T) { "us-east-1a": {}, "us-east-1f": {}}, nil).AnyTimes() mockCloud.EXPECT().CreateSnapshot(gomock.Eq(ctx), gomock.Eq(req.GetSourceVolumeId()), gomock.Eq(snapshotOptions)).Return(mockSnapshot, nil).AnyTimes() mockCloud.EXPECT().EnableFastSnapshotRestores(gomock.Eq(ctx), gomock.Eq([]string{"us-east-1a", "us-east-1f"}), - gomock.Eq(mockSnapshot.SnapshotID)).Return(nil, fmt.Errorf("error")).AnyTimes() + gomock.Eq(mockSnapshot.SnapshotID)).Return(nil, errors.New("error")).AnyTimes() mockCloud.EXPECT().DeleteSnapshot(gomock.Eq(ctx), gomock.Eq(mockSnapshot.SnapshotID)).Return(true, nil).AnyTimes() awsDriver := ControllerService{ @@ -3050,6 +3103,7 @@ func TestDeleteSnapshot(t *testing.T) { { name: "success normal", testFunc: func(t *testing.T) { + t.Helper() ctx := context.Background() mockCtl := gomock.NewController(t) @@ -3075,6 +3129,7 @@ func TestDeleteSnapshot(t *testing.T) { { name: "success not found", testFunc: func(t *testing.T) { + t.Helper() ctx := context.Background() mockCtl := gomock.NewController(t) @@ -3100,6 +3155,7 @@ func TestDeleteSnapshot(t *testing.T) { { name: "fail with another request in-flight", testFunc: func(t *testing.T) { + t.Helper() ctx := context.Background() mockCtl := gomock.NewController(t) @@ -3140,6 +3196,7 @@ func TestListSnapshots(t *testing.T) { { name: "success normal", testFunc: func(t *testing.T) { + t.Helper() req := &csi.ListSnapshotsRequest{} mockCloudSnapshotsResponse := &cloud.ListSnapshotsResponse{ Snapshots: []*cloud.Snapshot{ @@ -3185,6 +3242,7 @@ func TestListSnapshots(t *testing.T) { { name: "success no snapshots", testFunc: func(t *testing.T) { + t.Helper() req := &csi.ListSnapshotsRequest{} ctx := context.Background() mockCtl := gomock.NewController(t) @@ -3212,6 +3270,7 @@ func TestListSnapshots(t *testing.T) { { name: "success snapshot ID", testFunc: func(t *testing.T) { + t.Helper() req := &csi.ListSnapshotsRequest{ SnapshotId: "snapshot-1", } @@ -3248,6 +3307,7 @@ func TestListSnapshots(t *testing.T) { { name: "success snapshot ID not found", testFunc: func(t *testing.T) { + t.Helper() req := &csi.ListSnapshotsRequest{ SnapshotId: "snapshot-1", } @@ -3278,6 +3338,7 @@ func TestListSnapshots(t *testing.T) { { name: "fail snapshot ID multiple found", testFunc: func(t *testing.T) { + t.Helper() req := &csi.ListSnapshotsRequest{ SnapshotId: "snapshot-1", } @@ -3311,6 +3372,7 @@ func TestListSnapshots(t *testing.T) { { name: "fail 0 < MaxEntries < 5", testFunc: func(t *testing.T) { + t.Helper() req := &csi.ListSnapshotsRequest{ MaxEntries: 4, } @@ -3359,21 +3421,21 @@ func TestControllerPublishVolume(t *testing.T) { testCases := []struct { name string - volumeId string - nodeId string + volumeID string + nodeID string volumeCapability *csi.VolumeCapability - mockAttach func(mockCloud *cloud.MockCloud, ctx context.Context, volumeId string, nodeId string) + mockAttach func(mockCloud *cloud.MockCloud, ctx context.Context, volumeID string, nodeID string) expResp *csi.ControllerPublishVolumeResponse errorCode codes.Code setupFunc func(ControllerService *ControllerService) }{ { name: "AttachDisk successfully with valid volume ID, node ID, and volume capability", - volumeId: "vol-test", - nodeId: expInstanceID, + volumeID: "vol-test", + nodeID: expInstanceID, volumeCapability: stdVolCap, - mockAttach: func(mockCloud *cloud.MockCloud, ctx context.Context, volumeId string, nodeId string) { - mockCloud.EXPECT().AttachDisk(gomock.Eq(ctx), volumeId, gomock.Eq(nodeId)).Return(expDevicePath, nil) + mockAttach: func(mockCloud *cloud.MockCloud, ctx context.Context, volumeID string, nodeID string) { + mockCloud.EXPECT().AttachDisk(gomock.Eq(ctx), volumeID, gomock.Eq(nodeID)).Return(expDevicePath, nil) }, expResp: &csi.ControllerPublishVolumeResponse{ PublishContext: map[string]string{DevicePathKey: expDevicePath}, @@ -3382,11 +3444,11 @@ func TestControllerPublishVolume(t *testing.T) { }, { name: "AttachDisk when volume is already attached to the node", - volumeId: "vol-test", - nodeId: expInstanceID, + volumeID: "vol-test", + nodeID: expInstanceID, volumeCapability: stdVolCap, - mockAttach: func(mockCloud *cloud.MockCloud, ctx context.Context, volumeId string, nodeId string) { - mockCloud.EXPECT().AttachDisk(gomock.Eq(ctx), gomock.Eq(volumeId), gomock.Eq(expInstanceID)).Return(expDevicePath, nil) + mockAttach: func(mockCloud *cloud.MockCloud, ctx context.Context, volumeID string, nodeID string) { + mockCloud.EXPECT().AttachDisk(gomock.Eq(ctx), gomock.Eq(volumeID), gomock.Eq(expInstanceID)).Return(expDevicePath, nil) }, expResp: &csi.ControllerPublishVolumeResponse{ PublishContext: map[string]string{DevicePathKey: expDevicePath}, @@ -3396,28 +3458,28 @@ func TestControllerPublishVolume(t *testing.T) { { name: "Invalid argument error when no VolumeId provided", - volumeId: "", - nodeId: expInstanceID, + volumeID: "", + nodeID: expInstanceID, volumeCapability: stdVolCap, errorCode: codes.InvalidArgument, }, { name: "Invalid argument error when no NodeId provided", - volumeId: "vol-test", - nodeId: "", + volumeID: "vol-test", + nodeID: "", volumeCapability: stdVolCap, errorCode: codes.InvalidArgument, }, { name: "Invalid argument error when no VolumeCapability provided", - volumeId: "vol-test", - nodeId: expInstanceID, + volumeID: "vol-test", + nodeID: expInstanceID, errorCode: codes.InvalidArgument, }, { name: "Invalid argument error when invalid VolumeCapability provided", - volumeId: "vol-test", - nodeId: expInstanceID, + volumeID: "vol-test", + nodeID: expInstanceID, volumeCapability: &csi.VolumeCapability{ AccessMode: &csi.VolumeCapability_AccessMode{ Mode: csi.VolumeCapability_AccessMode_UNKNOWN, @@ -3427,40 +3489,40 @@ func TestControllerPublishVolume(t *testing.T) { }, { name: "Internal error when AttachDisk fails", - volumeId: "vol-test", - nodeId: expInstanceID, + volumeID: "vol-test", + nodeID: expInstanceID, volumeCapability: stdVolCap, - mockAttach: func(mockCloud *cloud.MockCloud, ctx context.Context, volumeId string, nodeId string) { - mockCloud.EXPECT().AttachDisk(gomock.Eq(ctx), gomock.Eq(volumeId), gomock.Eq(expInstanceID)).Return("", status.Error(codes.Internal, "test error")) + mockAttach: func(mockCloud *cloud.MockCloud, ctx context.Context, volumeID string, nodeID string) { + mockCloud.EXPECT().AttachDisk(gomock.Eq(ctx), gomock.Eq(volumeID), gomock.Eq(expInstanceID)).Return("", status.Error(codes.Internal, "test error")) }, errorCode: codes.Internal, }, { name: "Fail when node does not exist", - volumeId: "vol-test", - nodeId: expInstanceID, + volumeID: "vol-test", + nodeID: expInstanceID, volumeCapability: stdVolCap, - mockAttach: func(mockCloud *cloud.MockCloud, ctx context.Context, volumeId string, nodeId string) { - mockCloud.EXPECT().AttachDisk(gomock.Eq(ctx), gomock.Eq(volumeId), gomock.Eq(nodeId)).Return("", status.Error(codes.Internal, "test error")) + mockAttach: func(mockCloud *cloud.MockCloud, ctx context.Context, volumeID string, nodeID string) { + mockCloud.EXPECT().AttachDisk(gomock.Eq(ctx), gomock.Eq(volumeID), gomock.Eq(nodeID)).Return("", status.Error(codes.Internal, "test error")) }, errorCode: codes.Internal, }, { name: "Fail when volume does not exist", - volumeId: "vol-test", - nodeId: expInstanceID, + volumeID: "vol-test", + nodeID: expInstanceID, volumeCapability: stdVolCap, - mockAttach: func(mockCloud *cloud.MockCloud, ctx context.Context, volumeId string, nodeId string) { - mockCloud.EXPECT().AttachDisk(gomock.Eq(ctx), gomock.Eq(volumeId), gomock.Eq(expInstanceID)).Return("", status.Error(codes.Internal, "volume not found")) + mockAttach: func(mockCloud *cloud.MockCloud, ctx context.Context, volumeID string, nodeID string) { + mockCloud.EXPECT().AttachDisk(gomock.Eq(ctx), gomock.Eq(volumeID), gomock.Eq(expInstanceID)).Return("", status.Error(codes.Internal, "volume not found")) }, errorCode: codes.Internal, }, { name: "Aborted error when AttachDisk operation already in-flight", - volumeId: "vol-test", - nodeId: expInstanceID, + volumeID: "vol-test", + nodeID: expInstanceID, volumeCapability: stdVolCap, - mockAttach: func(mockCloud *cloud.MockCloud, ctx context.Context, volumeId string, nodeId string) { + mockAttach: func(mockCloud *cloud.MockCloud, ctx context.Context, volumeID string, nodeID string) { }, errorCode: codes.Aborted, setupFunc: func(ControllerService *ControllerService) { @@ -3472,9 +3534,9 @@ func TestControllerPublishVolume(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { req := &csi.ControllerPublishVolumeRequest{ - NodeId: tc.nodeId, + NodeId: tc.nodeID, VolumeCapability: tc.volumeCapability, - VolumeId: tc.volumeId, + VolumeId: tc.volumeID, } ctx := context.Background() @@ -3505,59 +3567,58 @@ func TestControllerPublishVolume(t *testing.T) { func TestControllerUnpublishVolume(t *testing.T) { testCases := []struct { name string - volumeId string - nodeId string + volumeID string + nodeID string errorCode codes.Code - mockDetach func(mockCloud *cloud.MockCloud, ctx context.Context, volumeId string, nodeId string) + mockDetach func(mockCloud *cloud.MockCloud, ctx context.Context, volumeID string, nodeID string) expResp *csi.ControllerUnpublishVolumeResponse setupFunc func(driver *ControllerService) }{ { name: "DetachDisk successfully with valid volume ID and node ID", - volumeId: "vol-test", - nodeId: expInstanceID, + volumeID: "vol-test", + nodeID: expInstanceID, errorCode: codes.OK, - mockDetach: func(mockCloud *cloud.MockCloud, ctx context.Context, volumeId string, nodeId string) { - mockCloud.EXPECT().DetachDisk(gomock.Eq(ctx), volumeId, nodeId).Return(nil) - + mockDetach: func(mockCloud *cloud.MockCloud, ctx context.Context, volumeID string, nodeID string) { + mockCloud.EXPECT().DetachDisk(gomock.Eq(ctx), volumeID, nodeID).Return(nil) }, expResp: &csi.ControllerUnpublishVolumeResponse{}, }, { name: "Return success when volume not found during DetachDisk operation", - volumeId: "vol-not-found", - nodeId: expInstanceID, + volumeID: "vol-not-found", + nodeID: expInstanceID, errorCode: codes.OK, - mockDetach: func(mockCloud *cloud.MockCloud, ctx context.Context, volumeId string, nodeId string) { - mockCloud.EXPECT().DetachDisk(gomock.Eq(ctx), volumeId, nodeId).Return(cloud.ErrNotFound) + mockDetach: func(mockCloud *cloud.MockCloud, ctx context.Context, volumeID string, nodeID string) { + mockCloud.EXPECT().DetachDisk(gomock.Eq(ctx), volumeID, nodeID).Return(cloud.ErrNotFound) }, expResp: &csi.ControllerUnpublishVolumeResponse{}, }, { name: "Invalid argument error when no VolumeId provided", - volumeId: "", - nodeId: expInstanceID, + volumeID: "", + nodeID: expInstanceID, errorCode: codes.InvalidArgument, }, { name: "Invalid argument error when no NodeId provided", - volumeId: "vol-test", - nodeId: "", + volumeID: "vol-test", + nodeID: "", errorCode: codes.InvalidArgument, }, { name: "Internal error when DetachDisk operation fails", - volumeId: "vol-test", - nodeId: expInstanceID, + volumeID: "vol-test", + nodeID: expInstanceID, errorCode: codes.Internal, - mockDetach: func(mockCloud *cloud.MockCloud, ctx context.Context, volumeId string, nodeId string) { - mockCloud.EXPECT().DetachDisk(gomock.Eq(ctx), volumeId, nodeId).Return(errors.New("test error")) + mockDetach: func(mockCloud *cloud.MockCloud, ctx context.Context, volumeID string, nodeID string) { + mockCloud.EXPECT().DetachDisk(gomock.Eq(ctx), volumeID, nodeID).Return(errors.New("test error")) }, }, { name: "Aborted error when operation already in-flight", - volumeId: "vol-test", - nodeId: expInstanceID, + volumeID: "vol-test", + nodeID: expInstanceID, errorCode: codes.Aborted, setupFunc: func(driver *ControllerService) { driver.inFlight.Insert("vol-test" + expInstanceID) @@ -3568,8 +3629,8 @@ func TestControllerUnpublishVolume(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { req := &csi.ControllerUnpublishVolumeRequest{ - NodeId: tc.nodeId, - VolumeId: tc.volumeId, + NodeId: tc.nodeID, + VolumeId: tc.volumeID, } ctx := context.Background() @@ -3668,10 +3729,8 @@ func TestControllerExpandVolume(t *testing.T) { if !tc.expError { t.Fatalf("Unexpected error: %v", err) } - } else { - if tc.expError { - t.Fatalf("Expected error from ControllerExpandVolume, got nothing") - } + } else if tc.expError { + t.Fatalf("Expected error from ControllerExpandVolume, got nothing") } sizeGiB := util.BytesToGiB(resp.GetCapacityBytes()) @@ -3684,6 +3743,7 @@ func TestControllerExpandVolume(t *testing.T) { } func checkExpectedErrorCode(t *testing.T, err error, expectedCode codes.Code) { + t.Helper() if err == nil { t.Fatalf("Expected operation to fail but got no error") } diff --git a/pkg/driver/driver.go b/pkg/driver/driver.go index af74a919d0..031da14d10 100644 --- a/pkg/driver/driver.go +++ b/pkg/driver/driver.go @@ -52,7 +52,7 @@ const ( AwsRegionKey = "topology." + DriverName + "/region" AwsOutpostIDKey = "topology." + DriverName + "/outpost-id" WellKnownZoneTopologyKey = "topology.kubernetes.io/zone" - // DEPRECATED Use the WellKnownZoneTopologyKey instead + // Deprecated: Use the WellKnownZoneTopologyKey instead. ZoneTopologyKey = "topology." + DriverName + "/zone" OSTopologyKey = "kubernetes.io/os" ) diff --git a/pkg/driver/internal/inflight_test.go b/pkg/driver/internal/inflight_test.go index 4373c59da0..748aea1622 100644 --- a/pkg/driver/internal/inflight_test.go +++ b/pkg/driver/internal/inflight_test.go @@ -21,7 +21,7 @@ import ( ) type testRequest struct { - volumeId string + volumeID string extra string expResp bool delete bool @@ -37,54 +37,53 @@ func TestInFlight(t *testing.T) { requests: []testRequest{ { - volumeId: "random-vol-name", + volumeID: "random-vol-name", expResp: true, }, }, }, { - name: "success adding request with different volumeId", + name: "success adding request with different volumeID", requests: []testRequest{ { - volumeId: "random-vol-foobar", + volumeID: "random-vol-foobar", expResp: true, }, { - volumeId: "random-vol-name-foobar", + volumeID: "random-vol-name-foobar", expResp: true, }, }, }, { - name: "failed adding request with same volumeId", + name: "failed adding request with same volumeID", requests: []testRequest{ { - volumeId: "random-vol-name-foobar", + volumeID: "random-vol-name-foobar", expResp: true, }, { - volumeId: "random-vol-name-foobar", + volumeID: "random-vol-name-foobar", expResp: false, }, }, }, - { name: "success add, delete, add copy", requests: []testRequest{ { - volumeId: "random-vol-name", + volumeID: "random-vol-name", extra: "random-node-id", expResp: true, }, { - volumeId: "random-vol-name", + volumeID: "random-vol-name", extra: "random-node-id", expResp: false, delete: true, }, { - volumeId: "random-vol-name", + volumeID: "random-vol-name", extra: "random-node-id", expResp: true, }, @@ -98,15 +97,14 @@ func TestInFlight(t *testing.T) { for _, r := range tc.requests { var resp bool if r.delete { - db.Delete(r.volumeId) + db.Delete(r.volumeID) } else { - resp = db.Insert(r.volumeId) + resp = db.Insert(r.volumeID) } if r.expResp != resp { t.Fatalf("expected insert to be %+v, got %+v", r.expResp, resp) } } }) - } } diff --git a/pkg/driver/node.go b/pkg/driver/node.go index 750fa6eee8..0c4ab698e0 100644 --- a/pkg/driver/node.go +++ b/pkg/driver/node.go @@ -44,10 +44,10 @@ import ( ) const ( - // default file system type to be used when it is not provided + // default file system type to be used when it is not provided. defaultFsType = FSTypeExt4 - // VolumeOperationAlreadyExists is message fmt returned to CO when there is another in-flight call on the given volumeID + // VolumeOperationAlreadyExists is message fmt returned to CO when there is another in-flight call on the given volumeID. VolumeOperationAlreadyExists = "An operation with the given volume=%q is already in progress" // sbeDeviceVolumeAttachmentLimit refers to the maximum number of volumes that can be attached to an instance on snow. @@ -72,9 +72,9 @@ var ( csi.NodeServiceCapability_RPC_GET_VOLUME_STATS, } - // taintRemovalInitialDelay is the initial delay for node taint removal + // taintRemovalInitialDelay is the initial delay for node taint removal. taintRemovalInitialDelay = 1 * time.Second - // taintRemovalBackoff is the exponential backoff configuration for node taint removal + // taintRemovalBackoff is the exponential backoff configuration for node taint removal. taintRemovalBackoff = wait.Backoff{ Duration: 500 * time.Millisecond, Factor: 2, @@ -82,7 +82,7 @@ var ( } ) -// NodeService represents the node service of CSI driver +// NodeService represents the node service of CSI driver. type NodeService struct { metadata metadata.MetadataService mounter mounter.Mounter @@ -91,7 +91,7 @@ type NodeService struct { csi.UnimplementedNodeServer } -// NewNodeService creates a new node service +// NewNodeService creates a new node service. func NewNodeService(o *Options, md metadata.MetadataService, m mounter.Mounter, k kubernetes.Interface) *NodeService { if k != nil { // Remove taint from node to indicate driver startup success @@ -136,8 +136,7 @@ func (d *NodeService) NodeStageVolume(ctx context.Context, req *csi.NodeStageVol } // If the access type is block, do nothing for stage - switch volCap.GetAccessType().(type) { - case *csi.VolumeCapability_Block: + if _, isAccessTypeBlock := volCap.GetAccessType().(*csi.VolumeCapability_Block); isAccessTypeBlock { return &csi.NodeStageVolumeResponse{}, nil } @@ -555,12 +554,11 @@ func (d *NodeService) NodeGetVolumeStats(ctx context.Context, req *csi.NodeGetVo }, }, }, nil - } func (d *NodeService) NodeGetCapabilities(ctx context.Context, req *csi.NodeGetCapabilitiesRequest) (*csi.NodeGetCapabilitiesResponse, error) { klog.V(4).InfoS("NodeGetCapabilities: called", "args", req) - var caps []*csi.NodeServiceCapability + caps := make([]*csi.NodeServiceCapability, 0, len(nodeCaps)) for _, cap := range nodeCaps { c := &csi.NodeServiceCapability{ Type: &csi.NodeServiceCapability_Rpc{ @@ -658,7 +656,7 @@ func (d *NodeService) nodePublishVolumeForBlock(req *csi.NodePublishVolumeReques return status.Errorf(codes.Internal, "Could not create file %q: %v", target, err) } - //Checking if the target file is already mounted with a device. + // Checking if the target file is already mounted with a device. mounted, err := d.isMounted(source, target) if err != nil { return status.Errorf(codes.Internal, "Could not check if %q is mounted: %v", target, err) @@ -690,14 +688,14 @@ func (d *NodeService) isMounted(_ string, target string) (bool, error) { */ notMnt, err := d.mounter.IsLikelyNotMountPoint(target) if err != nil && !os.IsNotExist(err) { - //Checking if the path exists and error is related to Corrupted Mount, in that case, the system could unmount and mount. + // Checking if the path exists and error is related to Corrupted Mount, in that case, the system could unmount and mount. _, pathErr := d.mounter.PathExists(target) if pathErr != nil && d.mounter.IsCorruptedMnt(pathErr) { klog.V(4).InfoS("NodePublishVolume: Target path is a corrupted mount. Trying to unmount.", "target", target) if mntErr := d.mounter.Unpublish(target); mntErr != nil { return false, status.Errorf(codes.Internal, "Unable to unmount the target %q : %v", target, mntErr) } - //After successful unmount, the device is ready to be mounted. + // After successful unmount, the device is ready to be mounted. return false, nil } return false, status.Errorf(codes.Internal, "Could not check if %q is a mount point: %v, %v", target, err, pathErr) @@ -736,7 +734,7 @@ func (d *NodeService) nodePublishVolumeForFileSystem(req *csi.NodePublishVolumeR return status.Errorf(codes.Internal, "%s", err.Error()) } - //Checking if the target directory is already mounted with a device. + // Checking if the target directory is already mounted with a device. mounted, err := d.isMounted(source, target) if err != nil { return status.Errorf(codes.Internal, "Could not check if %q is mounted: %v", target, err) @@ -763,9 +761,8 @@ func (d *NodeService) nodePublishVolumeForFileSystem(req *csi.NodePublishVolumeR return nil } -// getVolumesLimit returns the limit of volumes that the node supports +// getVolumesLimit returns the limit of volumes that the node supports. func (d *NodeService) getVolumesLimit() int64 { - if d.options.VolumeAttachLimit >= 0 { return d.options.VolumeAttachLimit } @@ -801,7 +798,7 @@ func (d *NodeService) getVolumesLimit() int64 { availableAttachments = availableAttachments - enis - reservedSlots } } - availableAttachments = availableAttachments - reservedVolumeAttachments + availableAttachments -= reservedVolumeAttachments if availableAttachments <= 0 { availableAttachments = 1 } @@ -809,13 +806,6 @@ func (d *NodeService) getVolumesLimit() int64 { return int64(availableAttachments) } -func min(x, y int) int { - if x <= y { - return x - } - return y -} - // hasMountOption returns a boolean indicating whether the given // slice already contains a mount option. This is used to prevent // passing duplicate option to the mount command. @@ -849,14 +839,14 @@ func collectMountOptions(fsType string, mntFlags []string) []string { return options } -// Struct for JSON patch operations +// Struct for JSON patch operations. type JSONPatch struct { OP string `json:"op,omitempty"` Path string `json:"path,omitempty"` Value interface{} `json:"value"` } -// removeTaintInBackground is a goroutine that retries removeNotReadyTaint with exponential backoff +// removeTaintInBackground is a goroutine that retries removeNotReadyTaint with exponential backoff. func removeTaintInBackground(k8sClient kubernetes.Interface, backoff wait.Backoff, removalFunc func(kubernetes.Interface) error) { backoffErr := wait.ExponentialBackoff(backoff, func() (bool, error) { err := removalFunc(k8sClient) diff --git a/pkg/driver/node_test.go b/pkg/driver/node_test.go index a4da70b3fd..59f6ebab27 100644 --- a/pkg/driver/node_test.go +++ b/pkg/driver/node_test.go @@ -14,13 +14,13 @@ See the License for the specific language governing permissions and limitations under the License. */ +//nolint:forcetypeassert package driver import ( "context" "errors" "fmt" - "os" "reflect" "runtime" "testing" @@ -50,17 +50,12 @@ func TestNewNodeService(t *testing.T) { mockMounter := mounter.NewMockMounter(ctrl) mockKubernetesClient := NewMockKubernetesClient(ctrl) - os.Setenv("AWS_REGION", "us-west-2") - defer os.Unsetenv("AWS_REGION") + t.Setenv("AWS_REGION", "us-west-2") options := &Options{} nodeService := NewNodeService(options, mockMetadataService, mockMounter, mockKubernetesClient) - if nodeService == nil { - t.Fatal("Expected NewNodeService to return a non-nil NodeService") - } - if nodeService.metadata != mockMetadataService { t.Error("Expected NodeService.metadata to be set to the mock MetadataService") } @@ -2360,7 +2355,7 @@ func TestNodeExpandVolume(t *testing.T) { func TestNodeGetVolumeStats(t *testing.T) { testCases := []struct { name string - validVolId bool + validVolID bool validPath bool metricsStatErr bool mounterMock func(mockCtl *gomock.Controller, dir string) *mounter.MockMounter @@ -2368,7 +2363,7 @@ func TestNodeGetVolumeStats(t *testing.T) { }{ { name: "success normal", - validVolId: true, + validVolID: true, validPath: true, mounterMock: func(ctrl *gomock.Controller, dir string) *mounter.MockMounter { m := mounter.NewMockMounter(ctrl) @@ -2382,14 +2377,14 @@ func TestNodeGetVolumeStats(t *testing.T) { }, { name: "invalid_volume_id", - validVolId: false, + validVolID: false, expectedErr: func(dir string) error { return status.Error(codes.InvalidArgument, "NodeGetVolumeStats volume ID was empty") }, }, { name: "invalid_volume_path", - validVolId: true, + validVolID: true, validPath: false, expectedErr: func(dir string) error { return status.Error(codes.InvalidArgument, "NodeGetVolumeStats volume path was empty") @@ -2397,7 +2392,7 @@ func TestNodeGetVolumeStats(t *testing.T) { }, { name: "path_exists_error", - validVolId: true, + validVolID: true, validPath: true, mounterMock: func(ctrl *gomock.Controller, dir string) *mounter.MockMounter { m := mounter.NewMockMounter(ctrl) @@ -2410,7 +2405,7 @@ func TestNodeGetVolumeStats(t *testing.T) { }, { name: "path_does_not_exist", - validVolId: true, + validVolID: true, validPath: true, mounterMock: func(ctrl *gomock.Controller, dir string) *mounter.MockMounter { m := mounter.NewMockMounter(ctrl) @@ -2423,7 +2418,7 @@ func TestNodeGetVolumeStats(t *testing.T) { }, { name: "is_block_device_error", - validVolId: true, + validVolID: true, validPath: true, mounterMock: func(ctrl *gomock.Controller, dir string) *mounter.MockMounter { m := mounter.NewMockMounter(ctrl) @@ -2437,7 +2432,7 @@ func TestNodeGetVolumeStats(t *testing.T) { }, { name: "get_block_size_bytes_error", - validVolId: true, + validVolID: true, validPath: true, mounterMock: func(ctrl *gomock.Controller, dir string) *mounter.MockMounter { m := mounter.NewMockMounter(ctrl) @@ -2452,7 +2447,7 @@ func TestNodeGetVolumeStats(t *testing.T) { }, { name: "success block device", - validVolId: true, + validVolID: true, validPath: true, mounterMock: func(ctrl *gomock.Controller, dir string) *mounter.MockMounter { m := mounter.NewMockMounter(ctrl) @@ -2486,7 +2481,7 @@ func TestNodeGetVolumeStats(t *testing.T) { } req := &csi.NodeGetVolumeStatsRequest{} - if tc.validVolId { + if tc.validVolID { req.VolumeId = "vol-test" } if tc.validPath { @@ -2515,18 +2510,20 @@ func TestRemoveNotReadyTaint(t *testing.T) { { name: "failed to get node", setup: func(t *testing.T, mockCtl *gomock.Controller) func() (kubernetes.Interface, error) { + t.Helper() t.Setenv("CSI_NODE_NAME", nodeName) - getNodeMock, _ := getNodeMock(mockCtl, nodeName, nil, fmt.Errorf("Failed to get node!")) + getNodeMock, _ := getNodeMock(mockCtl, nodeName, nil, errors.New("Failed to get node!")) return func() (kubernetes.Interface, error) { return getNodeMock, nil } }, - expResult: fmt.Errorf("Failed to get node!"), + expResult: errors.New("Failed to get node!"), }, { name: "no taints to remove", setup: func(t *testing.T, mockCtl *gomock.Controller) func() (kubernetes.Interface, error) { + t.Helper() t.Setenv("CSI_NODE_NAME", nodeName) getNodeMock, _ := getNodeMock(mockCtl, nodeName, &corev1.Node{}, nil) @@ -2568,6 +2565,7 @@ func TestRemoveNotReadyTaint(t *testing.T) { { name: "failed to patch node", setup: func(t *testing.T, mockCtl *gomock.Controller) func() (kubernetes.Interface, error) { + t.Helper() t.Setenv("CSI_NODE_NAME", nodeName) getNodeMock, mockNode := getNodeMock(mockCtl, nodeName, &corev1.Node{ ObjectMeta: metav1.ObjectMeta{ @@ -2614,18 +2612,19 @@ func TestRemoveNotReadyTaint(t *testing.T) { mockNode.EXPECT(). Patch(gomock.Any(), gomock.Eq(nodeName), gomock.Any(), gomock.Any(), gomock.Any()). - Return(nil, fmt.Errorf("Failed to patch node!")). + Return(nil, errors.New("Failed to patch node!")). Times(1) return func() (kubernetes.Interface, error) { return getNodeMock, nil } }, - expResult: fmt.Errorf("Failed to patch node!"), + expResult: errors.New("Failed to patch node!"), }, { name: "success", setup: func(t *testing.T, mockCtl *gomock.Controller) func() (kubernetes.Interface, error) { + t.Helper() t.Setenv("CSI_NODE_NAME", nodeName) getNodeMock, mockNode := getNodeMock(mockCtl, nodeName, &corev1.Node{ ObjectMeta: metav1.ObjectMeta{ @@ -2684,6 +2683,7 @@ func TestRemoveNotReadyTaint(t *testing.T) { { name: "failed to get CSINode", setup: func(t *testing.T, mockCtl *gomock.Controller) func() (kubernetes.Interface, error) { + t.Helper() t.Setenv("CSI_NODE_NAME", nodeName) getNodeMock, _ := getNodeMock(mockCtl, nodeName, &corev1.Node{ ObjectMeta: metav1.ObjectMeta{ @@ -2707,7 +2707,7 @@ func TestRemoveNotReadyTaint(t *testing.T) { csiNodesMock.EXPECT(). Get(gomock.Any(), gomock.Eq(nodeName), gomock.Any()). - Return(nil, fmt.Errorf("Failed to get CSINode")). + Return(nil, errors.New("Failed to get CSINode")). Times(1) return func() (kubernetes.Interface, error) { @@ -2719,6 +2719,7 @@ func TestRemoveNotReadyTaint(t *testing.T) { { name: "allocatable value not set for driver on node", setup: func(t *testing.T, mockCtl *gomock.Controller) func() (kubernetes.Interface, error) { + t.Helper() t.Setenv("CSI_NODE_NAME", nodeName) getNodeMock, _ := getNodeMock(mockCtl, nodeName, &corev1.Node{ ObjectMeta: metav1.ObjectMeta{ @@ -2768,6 +2769,7 @@ func TestRemoveNotReadyTaint(t *testing.T) { { name: "driver not found on node", setup: func(t *testing.T, mockCtl *gomock.Controller) func() (kubernetes.Interface, error) { + t.Helper() t.Setenv("CSI_NODE_NAME", nodeName) getNodeMock, _ := getNodeMock(mockCtl, nodeName, &corev1.Node{ ObjectMeta: metav1.ObjectMeta{ @@ -2840,7 +2842,7 @@ func TestRemoveTaintInBackground(t *testing.T) { if mockRemovalCount == 3 { return nil } else { - return fmt.Errorf("Taint removal failed!") + return errors.New("Taint removal failed!") } } removeTaintInBackground(nil, taintRemovalBackoff, mockRemovalFunc) @@ -2851,7 +2853,7 @@ func TestRemoveTaintInBackground(t *testing.T) { mockRemovalCount := 0 mockRemovalFunc := func(_ kubernetes.Interface) error { mockRemovalCount += 1 - return fmt.Errorf("Taint removal failed!") + return errors.New("Taint removal failed!") } removeTaintInBackground(nil, wait.Backoff{ Steps: 5, diff --git a/pkg/driver/options.go b/pkg/driver/options.go index 415e3ddc34..adf291ad5a 100644 --- a/pkg/driver/options.go +++ b/pkg/driver/options.go @@ -17,7 +17,7 @@ limitations under the License. package driver import ( - "fmt" + "errors" "time" flag "github.com/spf13/pflag" @@ -34,10 +34,10 @@ type Options struct { // #### Server options #### - //Endpoint is the endpoint for the CSI driver server + // Endpoint is the endpoint for the CSI driver server Endpoint string - // HttpEndpoint is the TCP network address where the HTTP server for metrics will listen - HttpEndpoint string + // HTTPEndpoint is the TCP network address where the HTTP server for metrics will listen + HTTPEndpoint string // MetricsCertFile is the location of the certificate for serving the metrics server over HTTPS MetricsCertFile string // MetricsKeyFile is the location of the key for serving the metrics server over HTTPS @@ -52,7 +52,7 @@ type Options struct { ExtraTags map[string]string // ExtraVolumeTags is a map of tags that will be attached to each dynamically provisioned // volume. - // DEPRECATED: Use ExtraTags instead. + // Deprecated: Use ExtraTags instead. ExtraVolumeTags map[string]string // ID of the kubernetes cluster. KubernetesClusterID string @@ -96,7 +96,7 @@ func (o *Options) AddFlags(f *flag.FlagSet) { // Server options f.StringVar(&o.Endpoint, "endpoint", DefaultCSIEndpoint, "Endpoint for the CSI driver server") - f.StringVar(&o.HttpEndpoint, "http-endpoint", "", "The TCP network address where the HTTP server for metrics will listen (example: `:8080`). The default is empty string, which means the server is disabled.") + f.StringVar(&o.HTTPEndpoint, "http-endpoint", "", "The TCP network address where the HTTP server for metrics will listen (example: `:8080`). The default is empty string, which means the server is disabled.") f.StringVar(&o.MetricsCertFile, "metrics-cert-file", "", "The path to a certificate to use for serving the metrics server over HTTPS. If the certificate is signed by a certificate authority, this file should be the concatenation of the server's certificate, any intermediates, and the CA's certificate. If this is non-empty, --http-endpoint and --metrics-key-file MUST also be non-empty.") f.StringVar(&o.MetricsKeyFile, "metrics-key-file", "", "The path to a key to use for serving the metrics server over HTTPS. If this is non-empty, --http-endpoint and --metrics-cert-file MUST also be non-empty.") f.BoolVar(&o.EnableOtelTracing, "enable-otel-tracing", false, "To enable opentelemetry tracing for the driver. The tracing is disabled by default. Configure the exporter endpoint with OTEL_EXPORTER_OTLP_ENDPOINT and other env variables, see https://opentelemetry.io/docs/specs/otel/configuration/sdk-environment-variables/#general-sdk-configuration.") @@ -124,17 +124,18 @@ func (o *Options) AddFlags(f *flag.FlagSet) { func (o *Options) Validate() error { if o.Mode == AllMode || o.Mode == NodeMode { if o.VolumeAttachLimit != -1 && o.ReservedVolumeAttachments != -1 { - return fmt.Errorf("only one of --volume-attach-limit and --reserved-volume-attachments may be specified") + return errors.New("only one of --volume-attach-limit and --reserved-volume-attachments may be specified") } } if o.MetricsCertFile != "" || o.MetricsKeyFile != "" { - if o.HttpEndpoint == "" { - return fmt.Errorf("--http-endpoint MUST be specififed when using the metrics server with HTTPS") - } else if o.MetricsCertFile == "" { - return fmt.Errorf("--metrics-cert-file MUST be specififed when using the metrics server with HTTPS") - } else if o.MetricsKeyFile == "" { - return fmt.Errorf("--metrics-key-file MUST be specififed when using the metrics server with HTTPS") + switch { + case o.HTTPEndpoint == "": + return errors.New("--http-endpoint MUST be specififed when using the metrics server with HTTPS") + case o.MetricsCertFile == "": + return errors.New("--metrics-cert-file MUST be specififed when using the metrics server with HTTPS") + case o.MetricsKeyFile == "": + return errors.New("--metrics-key-file MUST be specififed when using the metrics server with HTTPS") } } diff --git a/pkg/driver/options_test.go b/pkg/driver/options_test.go index 38944b7c9f..0cb8271c1f 100644 --- a/pkg/driver/options_test.go +++ b/pkg/driver/options_test.go @@ -77,8 +77,8 @@ func TestAddFlags(t *testing.T) { if o.Endpoint != "custom-endpoint" { t.Errorf("unexpected Endpoint: got %s, want custom-endpoint", o.Endpoint) } - if o.HttpEndpoint != ":8080" { - t.Errorf("unexpected HttpEndpoint: got %s, want :8080", o.HttpEndpoint) + if o.HTTPEndpoint != ":8080" { + t.Errorf("unexpected HTTPEndpoint: got %s, want :8080", o.HTTPEndpoint) } if !o.EnableOtelTracing { t.Error("unexpected EnableOtelTracing: got false, want true") @@ -215,7 +215,7 @@ func TestValidateMetricsHTTPS(t *testing.T) { t.Run(tt.name, func(t *testing.T) { o := &Options{ Mode: ControllerMode, - HttpEndpoint: tt.httpEndpoint, + HTTPEndpoint: tt.httpEndpoint, MetricsCertFile: tt.metricsCertFile, MetricsKeyFile: tt.metricsKeyFile, } diff --git a/pkg/driver/request_coalescing_test.go b/pkg/driver/request_coalescing_test.go index 8e700b7354..2b7b2c2c64 100644 --- a/pkg/driver/request_coalescing_test.go +++ b/pkg/driver/request_coalescing_test.go @@ -18,20 +18,18 @@ package driver import ( "context" - "fmt" - + "errors" "sync" "testing" "time" "github.com/awslabs/volume-modifier-for-k8s/pkg/rpc" "github.com/container-storage-interface/spec/lib/go/csi" + "github.com/golang/mock/gomock" + "github.com/kubernetes-sigs/aws-ebs-csi-driver/pkg/cloud" "github.com/kubernetes-sigs/aws-ebs-csi-driver/pkg/driver/internal" "github.com/kubernetes-sigs/aws-ebs-csi-driver/pkg/util" "k8s.io/klog/v2" - - "github.com/golang/mock/gomock" - "github.com/kubernetes-sigs/aws-ebs-csi-driver/pkg/cloud" ) type modifyVolumeExecutor func(ctx context.Context, driver ControllerService, name string, params map[string]string) error @@ -53,6 +51,7 @@ func modifierForK8sModifyVolume(ctx context.Context, driver ControllerService, n } func TestVolumeModificationWithCoalescing(t *testing.T) { + t.Parallel() testCases := []struct { name string testFunction func(t *testing.T, executor modifyVolumeExecutor) @@ -84,7 +83,6 @@ func TestVolumeModificationWithCoalescing(t *testing.T) { } for _, tc := range testCases { - tc := tc // Not strictly necessary but required by `go vet` t.Run(tc.name+": volume-modifier-for-k8s", func(t *testing.T) { t.Parallel() tc.testFunction(t, modifierForK8sModifyVolume) @@ -98,6 +96,7 @@ func TestVolumeModificationWithCoalescing(t *testing.T) { // TestBasicRequestCoalescingSuccess tests the success case of coalescing 2 requests from ControllerExpandVolume and a modify function respectively. func testBasicRequestCoalescingSuccess(t *testing.T, executor modifyVolumeExecutor) { + t.Helper() const NewVolumeType = "gp3" const NewSize = 5 * util.GiB volumeID := t.Name() @@ -159,6 +158,7 @@ func testBasicRequestCoalescingSuccess(t *testing.T, executor modifyVolumeExecut // TestRequestFail tests failing requests from ResizeOrModifyDisk failure. func testRequestFail(t *testing.T, executor modifyVolumeExecutor) { + t.Helper() const NewVolumeType = "gp3" const NewSize = 5 * util.GiB volumeID := t.Name() @@ -169,7 +169,7 @@ func testRequestFail(t *testing.T, executor modifyVolumeExecutor) { mockCloud := cloud.NewMockCloud(mockCtl) mockCloud.EXPECT().ResizeOrModifyDisk(gomock.Any(), gomock.Eq(volumeID), gomock.Any(), gomock.Any()).DoAndReturn(func(_ context.Context, volumeID string, newSize int64, options *cloud.ModifyDiskOptions) (int64, error) { klog.InfoS("ResizeOrModifyDisk called", "volumeID", volumeID, "newSize", newSize, "options", options) - return 0, fmt.Errorf("ResizeOrModifyDisk failed") + return 0, errors.New("ResizeOrModifyDisk failed") }) options := &Options{ @@ -218,6 +218,7 @@ func testRequestFail(t *testing.T, executor modifyVolumeExecutor) { // 3) Change volume type to NewVolumeType2 // The expected result is the resizing request succeeds and one of the volume-type requests fails. func testPartialFail(t *testing.T, executor modifyVolumeExecutor) { + t.Helper() const NewVolumeType1 = "gp3" const NewVolumeType2 = "io2" const NewSize = 5 * util.GiB @@ -290,27 +291,29 @@ func testPartialFail(t *testing.T, executor modifyVolumeExecutor) { wg.Wait() - if volumeTypeChosen == NewVolumeType1 { + switch volumeTypeChosen { + case NewVolumeType1: if volumeType1Err { t.Error("Controller chose", NewVolumeType1, "but errored request") } if !volumeType2Error { t.Error("Controller chose", NewVolumeType1, "but returned success to", NewVolumeType2, "request") } - } else if volumeTypeChosen == NewVolumeType2 { + case NewVolumeType2: if volumeType2Error { t.Error("Controller chose", NewVolumeType2, "but errored request") } if !volumeType1Err { t.Error("Controller chose", NewVolumeType2, "but returned success to", NewVolumeType1, "request") } - } else { + default: t.Error("No volume type chosen") } } // TestSequential tests sending 2 requests sequentially. func testSequentialRequests(t *testing.T, executor modifyVolumeExecutor) { + t.Helper() const NewVolumeType = "gp3" const NewSize = 5 * util.GiB volumeID := t.Name() @@ -369,6 +372,7 @@ func testSequentialRequests(t *testing.T, executor modifyVolumeExecutor) { // TestDuplicateRequest tests sending multiple same requests roughly in parallel. func testDuplicateRequest(t *testing.T, executor modifyVolumeExecutor) { + t.Helper() const NewSize = 5 * util.GiB volumeID := t.Name() @@ -395,7 +399,7 @@ func testDuplicateRequest(t *testing.T, executor modifyVolumeExecutor) { num := 5 wg.Add(num * 2) - for j := 0; j < num; j++ { + for range num { go wrapTimeout(t, "ControllerExpandVolume timed out", func() { _, err := awsDriver.ControllerExpandVolume(context.Background(), &csi.ControllerExpandVolumeRequest{ VolumeId: volumeID, @@ -422,8 +426,9 @@ func testDuplicateRequest(t *testing.T, executor modifyVolumeExecutor) { wg.Wait() } -// TestResponseReturnTiming tests the caller of request coalescing blocking until receiving response from cloud.ResizeOrModifyDisk +// TestResponseReturnTiming tests the caller of request coalescing blocking until receiving response from cloud.ResizeOrModifyDisk. func testResponseReturnTiming(t *testing.T, executor modifyVolumeExecutor) { + t.Helper() const NewVolumeType = "gp3" const NewSize = 5 * util.GiB var ec2ModifyVolumeFinished = false @@ -491,6 +496,7 @@ func testResponseReturnTiming(t *testing.T, executor modifyVolumeExecutor) { } func wrapTimeout(t *testing.T, failMessage string, execFunc func()) { + t.Helper() timeout := time.After(15 * time.Second) done := make(chan bool) go func() { diff --git a/pkg/driver/validation.go b/pkg/driver/validation.go index 456cbc2dd5..dd7667c40b 100644 --- a/pkg/driver/validation.go +++ b/pkg/driver/validation.go @@ -27,15 +27,15 @@ import ( func ValidateDriverOptions(options *Options) error { if err := validateExtraTags(options.ExtraTags, false); err != nil { - return fmt.Errorf("Invalid extra tags: %w", err) + return fmt.Errorf("invalid extra tags: %w", err) } if err := validateMode(options.Mode); err != nil { - return fmt.Errorf("Invalid mode: %w", err) + return fmt.Errorf("invalid mode: %w", err) } if options.ModifyVolumeRequestHandlerTimeout == 0 && (options.Mode == ControllerMode || options.Mode == AllMode) { - return errors.New("Invalid modifyVolumeRequestHandlerTimeout: Timeout cannot be zero") + return errors.New("invalid modifyVolumeRequestHandlerTimeout: timeout cannot be zero") } return nil @@ -43,32 +43,32 @@ func ValidateDriverOptions(options *Options) error { func validateExtraTags(tags map[string]string, warnOnly bool) error { if len(tags) > cloud.MaxNumTagsPerResource { - return fmt.Errorf("Too many tags (actual: %d, limit: %d)", len(tags), cloud.MaxNumTagsPerResource) + return fmt.Errorf("too many tags (actual: %d, limit: %d)", len(tags), cloud.MaxNumTagsPerResource) } validate := func(k, v string) error { if len(k) > cloud.MaxTagKeyLength { - return fmt.Errorf("Tag key too long (actual: %d, limit: %d)", len(k), cloud.MaxTagKeyLength) + return fmt.Errorf("tag key too long (actual: %d, limit: %d)", len(k), cloud.MaxTagKeyLength) } else if len(k) < cloud.MinTagKeyLength { - return fmt.Errorf("Tag key cannot be empty (min: 1)") + return errors.New("tag key cannot be empty (min: 1)") } if len(v) > cloud.MaxTagValueLength { - return fmt.Errorf("Tag value too long (actual: %d, limit: %d)", len(v), cloud.MaxTagValueLength) + return fmt.Errorf("tag value too long (actual: %d, limit: %d)", len(v), cloud.MaxTagValueLength) } if k == cloud.VolumeNameTagKey { - return fmt.Errorf("Tag key '%s' is reserved", cloud.VolumeNameTagKey) + return fmt.Errorf("tag key '%s' is reserved", cloud.VolumeNameTagKey) } if k == cloud.AwsEbsDriverTagKey { - return fmt.Errorf("Tag key '%s' is reserved", cloud.AwsEbsDriverTagKey) + return fmt.Errorf("tag key '%s' is reserved", cloud.AwsEbsDriverTagKey) } if k == cloud.SnapshotNameTagKey { - return fmt.Errorf("Tag key '%s' is reserved", cloud.SnapshotNameTagKey) + return fmt.Errorf("tag key '%s' is reserved", cloud.SnapshotNameTagKey) } if strings.HasPrefix(k, cloud.KubernetesTagKeyPrefix) { - return fmt.Errorf("Tag key prefix '%s' is reserved", cloud.KubernetesTagKeyPrefix) + return fmt.Errorf("tag key prefix '%s' is reserved", cloud.KubernetesTagKeyPrefix) } if strings.HasPrefix(k, cloud.AWSTagKeyPrefix) { - return fmt.Errorf("Tag key prefix '%s' is reserved", cloud.AWSTagKeyPrefix) + return fmt.Errorf("tag key prefix '%s' is reserved", cloud.AWSTagKeyPrefix) } return nil } @@ -88,7 +88,7 @@ func validateExtraTags(tags map[string]string, warnOnly bool) error { func validateMode(mode Mode) error { if mode != AllMode && mode != ControllerMode && mode != NodeMode { - return fmt.Errorf("Mode is not supported (actual: %s, supported: %v)", mode, []Mode{AllMode, ControllerMode, NodeMode}) + return fmt.Errorf("mode is not supported (actual: %s, supported: %v)", mode, []Mode{AllMode, ControllerMode, NodeMode}) } return nil diff --git a/pkg/driver/validation_test.go b/pkg/driver/validation_test.go index 95aa81f16c..c476c231cd 100644 --- a/pkg/driver/validation_test.go +++ b/pkg/driver/validation_test.go @@ -40,7 +40,7 @@ func randomString(n int) string { func randomStringMap(n int) map[string]string { result := map[string]string{} - for i := 0; i < n; i++ { + for i := range n { result[strconv.Itoa(i)] = randomString(10) } return result @@ -64,61 +64,61 @@ func TestValidateExtraTags(t *testing.T) { tags: map[string]string{ randomString(cloud.MaxTagKeyLength + 1): "extra-tag-value", }, - expErr: fmt.Errorf("Tag key too long (actual: %d, limit: %d)", cloud.MaxTagKeyLength+1, cloud.MaxTagKeyLength), + expErr: fmt.Errorf("tag key too long (actual: %d, limit: %d)", cloud.MaxTagKeyLength+1, cloud.MaxTagKeyLength), }, { name: "invaid tag: key is empty", tags: map[string]string{ "": "extra-tag-value", }, - expErr: fmt.Errorf("Tag key cannot be empty (min: 1)"), + expErr: errors.New("tag key cannot be empty (min: 1)"), }, { name: "invalid tag: value too long", tags: map[string]string{ "extra-tag-key": randomString(cloud.MaxTagValueLength + 1), }, - expErr: fmt.Errorf("Tag value too long (actual: %d, limit: %d)", cloud.MaxTagValueLength+1, cloud.MaxTagValueLength), + expErr: fmt.Errorf("tag value too long (actual: %d, limit: %d)", cloud.MaxTagValueLength+1, cloud.MaxTagValueLength), }, { name: "invalid tag: reserved CSI key", tags: map[string]string{ cloud.VolumeNameTagKey: "extra-tag-value", }, - expErr: fmt.Errorf("Tag key '%s' is reserved", cloud.VolumeNameTagKey), + expErr: fmt.Errorf("tag key '%s' is reserved", cloud.VolumeNameTagKey), }, { name: "invalid tag: reserved driver key", tags: map[string]string{ cloud.AwsEbsDriverTagKey: "false", }, - expErr: fmt.Errorf("Tag key '%s' is reserved", cloud.AwsEbsDriverTagKey), + expErr: fmt.Errorf("tag key '%s' is reserved", cloud.AwsEbsDriverTagKey), }, { name: "invaid tag: reserved snapshot key", tags: map[string]string{ cloud.SnapshotNameTagKey: "false", }, - expErr: fmt.Errorf("Tag key '%s' is reserved", cloud.SnapshotNameTagKey), + expErr: fmt.Errorf("tag key '%s' is reserved", cloud.SnapshotNameTagKey), }, { name: "invalid tag: reserved Kubernetes key prefix", tags: map[string]string{ cloud.KubernetesTagKeyPrefix + "/cluster": "extra-tag-value", }, - expErr: fmt.Errorf("Tag key prefix '%s' is reserved", cloud.KubernetesTagKeyPrefix), + expErr: fmt.Errorf("tag key prefix '%s' is reserved", cloud.KubernetesTagKeyPrefix), }, { name: "invalid tag: reserved AWS key prefix", tags: map[string]string{ cloud.AWSTagKeyPrefix + "foo": "extra-tag-value", }, - expErr: fmt.Errorf("Tag key prefix '%s' is reserved", cloud.AWSTagKeyPrefix), + expErr: fmt.Errorf("tag key prefix '%s' is reserved", cloud.AWSTagKeyPrefix), }, { name: "invalid tag: too many tags", tags: randomStringMap(cloud.MaxNumTagsPerResource + 1), - expErr: fmt.Errorf("Too many tags (actual: %d, limit: %d)", cloud.MaxNumTagsPerResource+1, cloud.MaxNumTagsPerResource), + expErr: fmt.Errorf("too many tags (actual: %d, limit: %d)", cloud.MaxNumTagsPerResource+1, cloud.MaxNumTagsPerResource), }, } @@ -156,7 +156,7 @@ func TestValidateMode(t *testing.T) { { name: "invalid mode: unknown", mode: Mode("unknown"), - expErr: fmt.Errorf("Mode is not supported (actual: unknown, supported: %v)", []Mode{AllMode, ControllerMode, NodeMode}), + expErr: fmt.Errorf("mode is not supported (actual: unknown, supported: %v)", []Mode{AllMode, ControllerMode, NodeMode}), }, } @@ -188,7 +188,7 @@ func TestValidateDriverOptions(t *testing.T) { name: "fail because validateMode fails", mode: Mode("unknown"), modifyVolumeTimeout: 5 * time.Second, - expErr: fmt.Errorf("Invalid mode: %w", fmt.Errorf("Mode is not supported (actual: unknown, supported: %v)", []Mode{AllMode, ControllerMode, NodeMode})), + expErr: fmt.Errorf("invalid mode: %w", fmt.Errorf("mode is not supported (actual: unknown, supported: %v)", []Mode{AllMode, ControllerMode, NodeMode})), }, { name: "fail because validateExtraVolumeTags fails", @@ -197,13 +197,13 @@ func TestValidateDriverOptions(t *testing.T) { randomString(cloud.MaxTagKeyLength + 1): "extra-tag-value", }, modifyVolumeTimeout: 5 * time.Second, - expErr: fmt.Errorf("Invalid extra tags: %w", fmt.Errorf("Tag key too long (actual: %d, limit: %d)", cloud.MaxTagKeyLength+1, cloud.MaxTagKeyLength)), + expErr: fmt.Errorf("invalid extra tags: %w", fmt.Errorf("tag key too long (actual: %d, limit: %d)", cloud.MaxTagKeyLength+1, cloud.MaxTagKeyLength)), }, { name: "fail because modifyVolumeRequestHandlerTimeout is zero", mode: AllMode, modifyVolumeTimeout: 0, - expErr: errors.New("Invalid modifyVolumeRequestHandlerTimeout: Timeout cannot be zero"), + expErr: errors.New("invalid modifyVolumeRequestHandlerTimeout: timeout cannot be zero"), }, } diff --git a/pkg/driver/version.go b/pkg/driver/version.go index f168fa4e2d..7adb7b9bac 100644 --- a/pkg/driver/version.go +++ b/pkg/driver/version.go @@ -22,7 +22,7 @@ import ( "runtime" ) -// These are set during build time via -ldflags +// These are set during build time via -ldflags. var ( driverVersion string gitCommit string diff --git a/pkg/expiringcache/expiring_cache.go b/pkg/expiringcache/expiring_cache.go index 962301d12d..74cb5c7c1d 100644 --- a/pkg/expiringcache/expiring_cache.go +++ b/pkg/expiringcache/expiring_cache.go @@ -30,7 +30,7 @@ import ( // // From the consumer's perspective, it behaves similarly to a map // KeyType is the type of the object that is used as a key -// ValueType is the type of the object that is stored +// ValueType is the type of the object that is stored. type ExpiringCache[KeyType comparable, ValueType any] interface { // Get operates identically to retrieving from a map, returning // the value and/or boolean indicating if the value existed in the map @@ -55,7 +55,7 @@ type expiringCache[KeyType comparable, ValueType any] struct { } // New returns a new ExpiringCache -// for a given KeyType, ValueType, and expiration delay +// for a given KeyType, ValueType, and expiration delay. func New[KeyType comparable, ValueType any](expirationDelay time.Duration) ExpiringCache[KeyType, ValueType] { return &expiringCache[KeyType, ValueType]{ expirationDelay: expirationDelay, diff --git a/pkg/metrics/metrics.go b/pkg/metrics/metrics.go index 83fee738a1..85596ccc90 100644 --- a/pkg/metrics/metrics.go +++ b/pkg/metrics/metrics.go @@ -65,7 +65,12 @@ func (m *metricRecorder) IncreaseCount(name string, labels map[string]string) { return } - metric.(*metrics.CounterVec).With(metrics.Labels(labels)).Inc() + metricAsCounterVec, ok := metric.(*metrics.CounterVec) + if ok { + metricAsCounterVec.With(labels).Inc() + } else { + klog.V(4).InfoS("Could not assert metric as metrics.CounterVec. Metric increase may have been skipped") + } } // ObserveHistogram records the given value in the histogram metric. @@ -82,7 +87,12 @@ func (m *metricRecorder) ObserveHistogram(name string, value float64, labels map return } - metric.(*metrics.HistogramVec).With(metrics.Labels(labels)).Observe(value) + metricAsHistogramVec, ok := metric.(*metrics.HistogramVec) + if ok { + metricAsHistogramVec.With(labels).Observe(value) + } else { + klog.V(4).InfoS("Could not assert metric as metrics.HistogramVec. Metric observation may have been skipped") + } } // InitializeMetricsHandler starts a new HTTP server to expose the metrics. diff --git a/pkg/mounter/mount.go b/pkg/mounter/mount.go index c75bfb4c00..344224522e 100644 --- a/pkg/mounter/mount.go +++ b/pkg/mounter/mount.go @@ -27,7 +27,7 @@ import ( // A mix & match of functions defined in upstream libraries. (FormatAndMount // from struct SafeFormatAndMount, PathExists from an old edition of // mount.Interface). Define it explicitly so that it can be mocked and to -// insulate from oft-changing upstream interfaces/structs +// insulate from oft-changing upstream interfaces/structs. type Mounter interface { mountutils.Interface diff --git a/pkg/mounter/mount_linux.go b/pkg/mounter/mount_linux.go index 7df2219b84..82cce121f1 100644 --- a/pkg/mounter/mount_linux.go +++ b/pkg/mounter/mount_linux.go @@ -20,6 +20,7 @@ limitations under the License. package mounter import ( + "errors" "fmt" "os" "os/exec" @@ -48,14 +49,14 @@ func NewSafeMounter() (*mountutils.SafeFormatAndMount, error) { } func NewSafeMounterV2() (*mountutils.SafeFormatAndMount, error) { - return nil, fmt.Errorf("NewSafeMounterV2 is not supported on this platform") + return nil, errors.New("NewSafeMounterV2 is not supported on this platform") } // FindDevicePath finds path of device and verifies its existence // if the device is not nvme, return the path directly -// if the device is nvme, finds and returns the nvme device path eg. /dev/nvme1n1 +// if the device is nvme, finds and returns the nvme device path eg. /dev/nvme1n1. func (m *NodeMounter) FindDevicePath(devicePath, volumeID, partition, region string) (string, error) { - strippedVolumeName := strings.Replace(volumeID, "-", "", -1) + strippedVolumeName := strings.ReplaceAll(volumeID, "-", "") canonicalDevicePath := "" // If the given path exists, the device MAY be nvme. Further, it MAY be a @@ -131,7 +132,7 @@ func (m *NodeMounter) FindDevicePath(devicePath, volumeID, partition, region str } // findNvmeVolume looks for the nvme volume with the specified name -// It follows the symlink (if it exists) and returns the absolute path to the device +// It follows the symlink (if it exists) and returns the absolute path to the device. func findNvmeVolume(findName string) (device string, err error) { p := filepath.Join("/dev/disk/by-id/", findName) stat, err := os.Lstat(p) @@ -162,12 +163,12 @@ func findNvmeVolume(findName string) (device string, err error) { } // execRunner is a helper to inject exec.Comamnd().CombinedOutput() for verifyVolumeSerialMatch -// Tests use a mocked version that does not actually execute any binaries +// Tests use a mocked version that does not actually execute any binaries. func execRunner(name string, arg ...string) ([]byte, error) { return exec.Command(name, arg...).CombinedOutput() } -// verifyVolumeSerialMatch checks the volume serial of the device against the expected volume +// verifyVolumeSerialMatch checks the volume serial of the device against the expected volume. func verifyVolumeSerialMatch(canonicalDevicePath string, strippedVolumeName string, execRunner func(string, ...string) ([]byte, error)) error { // In some rare cases, a race condition can lead to the /dev/disk/by-id/ symlink becoming out of date // See https://github.com/kubernetes-sigs/aws-ebs-csi-driver/issues/1224 for more info @@ -193,7 +194,7 @@ func verifyVolumeSerialMatch(canonicalDevicePath string, strippedVolumeName stri return nil } -// PreparePublishTarget creates the target directory for the volume to be mounted +// PreparePublishTarget creates the target directory for the volume to be mounted. func (m *NodeMounter) PreparePublishTarget(target string) error { klog.V(4).InfoS("NodePublishVolume: creating dir", "target", target) if err := m.MakeDir(target); err != nil { @@ -202,7 +203,7 @@ func (m *NodeMounter) PreparePublishTarget(target string) error { return nil } -// IsBlockDevice checks if the given path is a block device +// IsBlockDevice checks if the given path is a block device. func (m *NodeMounter) IsBlockDevice(fullPath string) (bool, error) { var st unix.Stat_t err := unix.Stat(fullPath, &st) @@ -213,7 +214,7 @@ func (m *NodeMounter) IsBlockDevice(fullPath string) (bool, error) { return (st.Mode & unix.S_IFMT) == unix.S_IFBLK, nil } -// GetBlockSizeBytes gets the size of the disk in bytes +// GetBlockSizeBytes gets the size of the disk in bytes. func (m *NodeMounter) GetBlockSizeBytes(devicePath string) (int64, error) { output, err := m.Exec.Command("blockdev", "--getsize64", devicePath).Output() if err != nil { @@ -227,7 +228,7 @@ func (m *NodeMounter) GetBlockSizeBytes(devicePath string) (int64, error) { return gotSizeBytes, nil } -// appendPartition appends the partition to the device path +// appendPartition appends the partition to the device path. func (m *NodeMounter) appendPartition(devicePath, partition string) string { if partition == "" { return devicePath @@ -245,13 +246,13 @@ func (m NodeMounter) GetDeviceNameFromMount(mountPath string) (string, int, erro return mountutils.GetDeviceNameFromMount(m, mountPath) } -// IsCorruptedMnt return true if err is about corrupted mount point +// IsCorruptedMnt return true if err is about corrupted mount point. func (m NodeMounter) IsCorruptedMnt(err error) bool { return mountutils.IsCorruptedMnt(err) } // This function is mirrored in ./sanity_test.go to make sure sanity test covered this block of code -// Please mirror the change to func MakeFile in ./sanity_test.go +// Please mirror the change to func MakeFile in ./sanity_test.go. func (m *NodeMounter) MakeFile(path string) error { f, err := os.OpenFile(path, os.O_CREATE, os.FileMode(0644)) if err != nil { @@ -266,7 +267,7 @@ func (m *NodeMounter) MakeFile(path string) error { } // This function is mirrored in ./sanity_test.go to make sure sanity test covered this block of code -// Please mirror the change to func MakeFile in ./sanity_test.go +// Please mirror the change to func MakeFile in ./sanity_test.go. func (m *NodeMounter) MakeDir(path string) error { err := os.MkdirAll(path, os.FileMode(0755)) if err != nil { @@ -278,28 +279,28 @@ func (m *NodeMounter) MakeDir(path string) error { } // This function is mirrored in ./sanity_test.go to make sure sanity test covered this block of code -// Please mirror the change to func MakeFile in ./sanity_test.go +// Please mirror the change to func MakeFile in ./sanity_test.go. func (m *NodeMounter) PathExists(path string) (bool, error) { return mountutils.PathExists(path) } -// Resize resizes the filesystem of the given devicePath +// Resize resizes the filesystem of the given devicePath. func (m *NodeMounter) Resize(devicePath, deviceMountPath string) (bool, error) { return mountutils.NewResizeFs(m.Exec).Resize(devicePath, deviceMountPath) } -// NeedResize checks if the filesystem of the given devicePath needs to be resized +// NeedResize checks if the filesystem of the given devicePath needs to be resized. func (m *NodeMounter) NeedResize(devicePath string, deviceMountPath string) (bool, error) { return mountutils.NewResizeFs(m.Exec).NeedResize(devicePath, deviceMountPath) } -// Unpublish unmounts the given path +// Unpublish unmounts the given path. func (m *NodeMounter) Unpublish(path string) error { // On linux, unpublish and unstage both perform an unmount return m.Unstage(path) } -// Unstage unmounts the given path +// Unstage unmounts the given path. func (m *NodeMounter) Unstage(path string) error { err := mountutils.CleanupMountPoint(path, m, false) // Ignore the error when it contains "not mounted", because that indicates the diff --git a/pkg/mounter/mount_linux_test.go b/pkg/mounter/mount_linux_test.go index c36826642f..ebb4f70b46 100644 --- a/pkg/mounter/mount_linux_test.go +++ b/pkg/mounter/mount_linux_test.go @@ -27,7 +27,6 @@ import ( "github.com/stretchr/testify/assert" "k8s.io/mount-utils" - utilexec "k8s.io/utils/exec" fakeexec "k8s.io/utils/exec/testing" ) @@ -138,7 +137,6 @@ func TestMakeFile(t *testing.T) { if exists, err := mountObj.PathExists(targetPath); !exists { t.Fatalf("Expect no error but got: %v", err) } - } func TestPathExists(t *testing.T) { @@ -165,7 +163,6 @@ func TestPathExists(t *testing.T) { if exists { t.Fatalf("Expected file %s to not exist", targetPath) } - } func TestGetDeviceName(t *testing.T) { @@ -186,7 +183,6 @@ func TestGetDeviceName(t *testing.T) { if _, _, err := mountObj.GetDeviceNameFromMount(targetPath); err != nil { t.Fatalf("Expect no error but got: %v", err) } - } func TestFindDevicePath(t *testing.T) { diff --git a/pkg/mounter/mount_unsupported.go b/pkg/mounter/mount_unsupported.go index 394f72df2d..2637a01079 100644 --- a/pkg/mounter/mount_unsupported.go +++ b/pkg/mounter/mount_unsupported.go @@ -20,7 +20,8 @@ limitations under the License. package mounter import ( - "fmt" + "errors" + mountutils "k8s.io/mount-utils" ) @@ -34,35 +35,31 @@ run driver e2e tests. */ func NewSafeMounter() (*mountutils.SafeFormatAndMount, error) { - return nil, fmt.Errorf("NewSafeMounter is not supported on this platform") + return nil, errors.New("NewSafeMounter is not supported on this platform") } func NewSafeMounterV2() (*mountutils.SafeFormatAndMount, error) { - return nil, fmt.Errorf("NewSafeMounterV2 is not supported on this platform") + return nil, errors.New("NewSafeMounterV2 is not supported on this platform") } func (m *NodeMounter) FindDevicePath(devicePath, volumeID, partition, region string) (string, error) { - return stubMessage, fmt.Errorf(stubMessage) + return stubMessage, errors.New(stubMessage) } func (m *NodeMounter) PreparePublishTarget(target string) error { - return fmt.Errorf(stubMessage) + return errors.New(stubMessage) } func (m *NodeMounter) IsBlockDevice(fullPath string) (bool, error) { - return false, fmt.Errorf(stubMessage) + return false, errors.New(stubMessage) } func (m *NodeMounter) GetBlockSizeBytes(devicePath string) (int64, error) { - return 1, fmt.Errorf(stubMessage) -} - -func (m *NodeMounter) appendPartition(devicePath, partition string) string { - return stubMessage + return 1, errors.New(stubMessage) } func (m NodeMounter) GetDeviceNameFromMount(mountPath string) (string, int, error) { - return stubMessage, 0, fmt.Errorf(stubMessage) + return stubMessage, 0, errors.New(stubMessage) } func (m NodeMounter) IsCorruptedMnt(err error) bool { @@ -70,29 +67,29 @@ func (m NodeMounter) IsCorruptedMnt(err error) bool { } func (m *NodeMounter) MakeFile(path string) error { - return fmt.Errorf(stubMessage) + return errors.New(stubMessage) } func (m *NodeMounter) MakeDir(path string) error { - return fmt.Errorf(stubMessage) + return errors.New(stubMessage) } func (m *NodeMounter) PathExists(path string) (bool, error) { - return false, fmt.Errorf(stubMessage) + return false, errors.New(stubMessage) } func (m *NodeMounter) Resize(devicePath, deviceMountPath string) (bool, error) { - return false, fmt.Errorf(stubMessage) + return false, errors.New(stubMessage) } func (m *NodeMounter) NeedResize(devicePath string, deviceMountPath string) (bool, error) { - return false, fmt.Errorf(stubMessage) + return false, errors.New(stubMessage) } func (m *NodeMounter) Unpublish(path string) error { - return fmt.Errorf(stubMessage) + return errors.New(stubMessage) } func (m *NodeMounter) Unstage(path string) error { - return fmt.Errorf(stubMessage) + return errors.New(stubMessage) } diff --git a/pkg/mounter/mount_windows.go b/pkg/mounter/mount_windows.go index afb4f41151..6883cf4edf 100644 --- a/pkg/mounter/mount_windows.go +++ b/pkg/mounter/mount_windows.go @@ -20,6 +20,7 @@ limitations under the License. package mounter import ( + "errors" "fmt" "regexp" @@ -31,7 +32,7 @@ import ( ) var ( - ErrUnsupportedMounter = fmt.Errorf("unsupported mounter type") + ErrUnsupportedMounter = errors.New("unsupported mounter type") ) func (m NodeMounter) FindDevicePath(devicePath, volumeID, _, _ string) (string, error) { diff --git a/pkg/mounter/mount_windows_proxyV1.go b/pkg/mounter/mount_windows_proxyV1.go index 71cdf93cad..af6cbc225f 100644 --- a/pkg/mounter/mount_windows_proxyV1.go +++ b/pkg/mounter/mount_windows_proxyV1.go @@ -162,7 +162,7 @@ func (mounter *CSIProxyMounter) WriteVolumeCache(target string) { } func (mounter *CSIProxyMounter) List() ([]mountutils.MountPoint, error) { - return []mountutils.MountPoint{}, fmt.Errorf("List not implemented for CSIProxyMounter") + return []mountutils.MountPoint{}, errors.New("List not implemented for CSIProxyMounter") } func (mounter *CSIProxyMounter) IsMountPointMatch(mp mountutils.MountPoint, dir string) bool { @@ -203,11 +203,11 @@ func (mounter *CSIProxyMounter) IsLikelyNotMountPoint(path string) (bool, error) } func (mounter *CSIProxyMounter) PathIsDevice(pathname string) (bool, error) { - return false, fmt.Errorf("PathIsDevice not implemented for CSIProxyMounter") + return false, errors.New("PathIsDevice not implemented for CSIProxyMounter") } func (mounter *CSIProxyMounter) DeviceOpened(pathname string) (bool, error) { - return false, fmt.Errorf("DeviceOpened not implemented for CSIProxyMounter") + return false, errors.New("DeviceOpened not implemented for CSIProxyMounter") } // GetDeviceNameFromMount returns the disk number for a mount path. @@ -230,11 +230,11 @@ func (mounter *CSIProxyMounter) GetDeviceNameFromMount(mountPath, _ string) (str } func (mounter *CSIProxyMounter) MakeRShared(path string) error { - return fmt.Errorf("MakeRShared not implemented for CSIProxyMounter") + return errors.New("MakeRShared not implemented for CSIProxyMounter") } func (mounter *CSIProxyMounter) MakeFile(pathname string) error { - return fmt.Errorf("MakeFile not implemented for CSIProxyMounter") + return errors.New("MakeFile not implemented for CSIProxyMounter") } // MakeDir - Creates a directory. The CSI proxy takes in context information. @@ -266,35 +266,35 @@ func (mounter *CSIProxyMounter) ExistsPath(path string) (bool, error) { } func (mounter *CSIProxyMounter) EvalHostSymlinks(pathname string) (string, error) { - return "", fmt.Errorf("EvalHostSymlinks is not implemented for CSIProxyMounter") + return "", errors.New("EvalHostSymlinks is not implemented for CSIProxyMounter") } func (mounter *CSIProxyMounter) GetMountRefs(pathname string) ([]string, error) { - return []string{}, fmt.Errorf("GetMountRefs is not implemented for CSIProxyMounter") + return []string{}, errors.New("GetMountRefs is not implemented for CSIProxyMounter") } func (mounter *CSIProxyMounter) GetFSGroup(pathname string) (int64, error) { - return -1, fmt.Errorf("GetFSGroup is not implemented for CSIProxyMounter") + return -1, errors.New("GetFSGroup is not implemented for CSIProxyMounter") } func (mounter *CSIProxyMounter) GetSELinuxSupport(pathname string) (bool, error) { - return false, fmt.Errorf("GetSELinuxSupport is not implemented for CSIProxyMounter") + return false, errors.New("GetSELinuxSupport is not implemented for CSIProxyMounter") } func (mounter *CSIProxyMounter) GetMode(pathname string) (os.FileMode, error) { - return 0, fmt.Errorf("GetMode is not implemented for CSIProxyMounter") + return 0, errors.New("GetMode is not implemented for CSIProxyMounter") } func (mounter *CSIProxyMounter) MountSensitive(source string, target string, fstype string, options []string, sensitiveOptions []string) error { - return fmt.Errorf("MountSensitive is not implemented for CSIProxyMounter") + return errors.New("MountSensitive is not implemented for CSIProxyMounter") } func (mounter *CSIProxyMounter) MountSensitiveWithoutSystemd(source string, target string, fstype string, options []string, sensitiveOptions []string) error { - return fmt.Errorf("MountSensitiveWithoutSystemd is not implemented for CSIProxyMounter") + return errors.New("MountSensitiveWithoutSystemd is not implemented for CSIProxyMounter") } func (mounter *CSIProxyMounter) MountSensitiveWithoutSystemdWithMountFlags(source string, target string, fstype string, options []string, sensitiveOptions []string, mountFlags []string) error { - return fmt.Errorf("MountSensitiveWithoutSystemdWithMountFlags is not implemented for CSIProxyMounter") + return errors.New("MountSensitiveWithoutSystemdWithMountFlags is not implemented for CSIProxyMounter") } // Rescan would trigger an update storage cache via the CSI proxy. diff --git a/pkg/mounter/mount_windows_proxyV2.go b/pkg/mounter/mount_windows_proxyV2.go index 468ee219f0..bc340650b9 100644 --- a/pkg/mounter/mount_windows_proxyV2.go +++ b/pkg/mounter/mount_windows_proxyV2.go @@ -161,7 +161,7 @@ func (mounter *CSIProxyMounterV2) WriteVolumeCache(target string) { } func (mounter *CSIProxyMounterV2) List() ([]mountutils.MountPoint, error) { - return []mountutils.MountPoint{}, fmt.Errorf("List not implemented for CSIProxyMounter") + return []mountutils.MountPoint{}, errors.New("List not implemented for CSIProxyMounter") } func (mounter *CSIProxyMounterV2) IsMountPointMatch(mp mountutils.MountPoint, dir string) bool { @@ -202,11 +202,11 @@ func (mounter *CSIProxyMounterV2) IsLikelyNotMountPoint(path string) (bool, erro } func (mounter *CSIProxyMounterV2) PathIsDevice(pathname string) (bool, error) { - return false, fmt.Errorf("PathIsDevice not implemented for CSIProxyMounter") + return false, errors.New("PathIsDevice not implemented for CSIProxyMounter") } func (mounter *CSIProxyMounterV2) DeviceOpened(pathname string) (bool, error) { - return false, fmt.Errorf("DeviceOpened not implemented for CSIProxyMounter") + return false, errors.New("DeviceOpened not implemented for CSIProxyMounter") } // GetDeviceNameFromMount returns the disk number for a mount path. @@ -229,11 +229,11 @@ func (mounter *CSIProxyMounterV2) GetDeviceNameFromMount(mountPath, _ string) (s } func (mounter *CSIProxyMounterV2) MakeRShared(path string) error { - return fmt.Errorf("MakeRShared not implemented for CSIProxyMounter") + return errors.New("MakeRShared not implemented for CSIProxyMounter") } func (mounter *CSIProxyMounterV2) MakeFile(pathname string) error { - return fmt.Errorf("MakeFile not implemented for CSIProxyMounter") + return errors.New("MakeFile not implemented for CSIProxyMounter") } // MakeDir - Creates a directory. The CSI proxy takes in context information. @@ -265,35 +265,35 @@ func (mounter *CSIProxyMounterV2) ExistsPath(path string) (bool, error) { } func (mounter *CSIProxyMounterV2) EvalHostSymlinks(pathname string) (string, error) { - return "", fmt.Errorf("EvalHostSymlinks is not implemented for CSIProxyMounter") + return "", errors.New("EvalHostSymlinks is not implemented for CSIProxyMounter") } func (mounter *CSIProxyMounterV2) GetMountRefs(pathname string) ([]string, error) { - return []string{}, fmt.Errorf("GetMountRefs is not implemented for CSIProxyMounter") + return []string{}, errors.New("GetMountRefs is not implemented for CSIProxyMounter") } func (mounter *CSIProxyMounterV2) GetFSGroup(pathname string) (int64, error) { - return -1, fmt.Errorf("GetFSGroup is not implemented for CSIProxyMounter") + return -1, errors.New("GetFSGroup is not implemented for CSIProxyMounter") } func (mounter *CSIProxyMounterV2) GetSELinuxSupport(pathname string) (bool, error) { - return false, fmt.Errorf("GetSELinuxSupport is not implemented for CSIProxyMounter") + return false, errors.New("GetSELinuxSupport is not implemented for CSIProxyMounter") } func (mounter *CSIProxyMounterV2) GetMode(pathname string) (os.FileMode, error) { - return 0, fmt.Errorf("GetMode is not implemented for CSIProxyMounter") + return 0, errors.New("GetMode is not implemented for CSIProxyMounter") } func (mounter *CSIProxyMounterV2) MountSensitive(source string, target string, fstype string, options []string, sensitiveOptions []string) error { - return fmt.Errorf("MountSensitive is not implemented for CSIProxyMounter") + return errors.New("MountSensitive is not implemented for CSIProxyMounter") } func (mounter *CSIProxyMounterV2) MountSensitiveWithoutSystemd(source string, target string, fstype string, options []string, sensitiveOptions []string) error { - return fmt.Errorf("MountSensitiveWithoutSystemd is not implemented for CSIProxyMounter") + return errors.New("MountSensitiveWithoutSystemd is not implemented for CSIProxyMounter") } func (mounter *CSIProxyMounterV2) MountSensitiveWithoutSystemdWithMountFlags(source string, target string, fstype string, options []string, sensitiveOptions []string, mountFlags []string) error { - return fmt.Errorf("MountSensitiveWithoutSystemdWithMountFlags is not implemented for CSIProxyMounter") + return errors.New("MountSensitiveWithoutSystemdWithMountFlags is not implemented for CSIProxyMounter") } // Rescan would trigger an update storage cache via the CSI proxy. diff --git a/pkg/util/template/funcs.go b/pkg/util/template/funcs.go index 9b1b868cc2..3195138455 100644 --- a/pkg/util/template/funcs.go +++ b/pkg/util/template/funcs.go @@ -15,26 +15,27 @@ package template import ( + "errors" "fmt" "html/template" "strings" ) -// Disable functions -func html(args ...interface{}) (string, error) { - return "", fmt.Errorf("cannot call 'html' function") +// Disable functions. +func html(...interface{}) (string, error) { + return "", errors.New("cannot call 'html' function") } -func js(args ...interface{}) (string, error) { - return "", fmt.Errorf("cannot call 'js' function") +func js(...interface{}) (string, error) { + return "", errors.New("cannot call 'js' function") } -func call(args ...interface{}) (string, error) { - return "", fmt.Errorf("cannot call 'call' function") +func call(...interface{}) (string, error) { + return "", errors.New("cannot call 'call' function") } -func urlquery(args ...interface{}) (string, error) { - return "", fmt.Errorf("cannot call 'urlquery' function") +func urlquery(...interface{}) (string, error) { + return "", errors.New("cannot call 'urlquery' function") } func contains(arg1, arg2 string) bool { diff --git a/pkg/util/template/template.go b/pkg/util/template/template.go index c8b07c3a0a..ca7f21bb61 100644 --- a/pkg/util/template/template.go +++ b/pkg/util/template/template.go @@ -44,7 +44,7 @@ func Evaluate(tm []string, props interface{}, warnOnly bool) (map[string]string, key, value := st[0], st[1] - t := template.New("tmpl").Funcs(template.FuncMap(newFuncMap())) + t := template.New("tmpl").Funcs(newFuncMap()) val, err := execTemplate(value, props, t) if err != nil { if warnOnly { diff --git a/pkg/util/template/template_test.go b/pkg/util/template/template_test.go index a292e9a685..d866acb413 100644 --- a/pkg/util/template/template_test.go +++ b/pkg/util/template/template_test.go @@ -202,7 +202,6 @@ func TestEvaluate(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - props := &PVProps{ PVCName: tc.pvcName, PVCNamespace: tc.pvcNamespace, @@ -343,7 +342,6 @@ func TestEvaluateVolumeSnapshotTemplate(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - props := &VolumeSnapshotProps{ VolumeSnapshotName: tc.volumeSnapshotName, VolumeSnapshotNamespace: tc.volumeSnapshotNamespace, diff --git a/pkg/util/util.go b/pkg/util/util.go index 3e0c5a6082..346dde5d5a 100644 --- a/pkg/util/util.go +++ b/pkg/util/util.go @@ -41,32 +41,34 @@ var ( isMACAddressRegex = regexp.MustCompile(`([0-9A-Fa-f]{2}[:-]){5}([0-9A-Fa-f]{2})`) ) -// RoundUpBytes rounds up the volume size in bytes up to multiplications of GiB +// RoundUpBytes rounds up the volume size in bytes up to multiplications of GiB. func RoundUpBytes(volumeSizeBytes int64) int64 { return roundUpSize(volumeSizeBytes, GiB) * GiB } // RoundUpGiB rounds up the volume size in bytes upto multiplications of GiB -// in the unit of GiB +// in the unit of GiB. func RoundUpGiB(volumeSizeBytes int64) (int32, error) { result := roundUpSize(volumeSizeBytes, GiB) if result > int64(math.MaxInt32) { return 0, fmt.Errorf("rounded up size exceeds maximum value of int32: %d", result) } + //nolint:gosec // Integer overflow handled return int32(result), nil } -// BytesToGiB converts Bytes to GiB +// BytesToGiB converts Bytes to GiB. func BytesToGiB(volumeSizeBytes int64) int32 { result := volumeSizeBytes / GiB if result > int64(math.MaxInt32) { // Handle overflow return math.MaxInt32 } + //nolint:gosec // Integer overflow handled return int32(result) } -// GiBToBytes converts GiB to Bytes +// GiBToBytes converts GiB to Bytes. func GiBToBytes(volumeSizeGiB int32) int64 { return int64(volumeSizeGiB) * GiB } @@ -134,20 +136,20 @@ func IsSBE(region string) bool { return region == "snow" } -// StringIsAlphanumeric returns true if a given string contains only English letters or numbers +// StringIsAlphanumeric returns true if a given string contains only English letters or numbers. func StringIsAlphanumeric(s string) bool { return isAlphanumericRegex(s) } -// CountMACAddresses returns the amount of MAC addresses within a string +// CountMACAddresses returns the amount of MAC addresses within a string. func CountMACAddresses(s string) int { matches := isMACAddressRegex.FindAllStringIndex(s, -1) return len(matches) } -// NormalizeWindowsPath normalizes a Windows path +// NormalizeWindowsPath normalizes a Windows path. func NormalizeWindowsPath(path string) string { - normalizedPath := strings.Replace(path, "/", "\\", -1) + normalizedPath := strings.ReplaceAll(path, "/", "\\") if strings.HasPrefix(normalizedPath, "\\") { normalizedPath = "c:" + normalizedPath } diff --git a/pkg/util/util_test.go b/pkg/util/util_test.go index c5a71f3ca6..2d9f6cf760 100644 --- a/pkg/util/util_test.go +++ b/pkg/util/util_test.go @@ -20,13 +20,12 @@ limitations under the License. package util import ( - "fmt" + "errors" "reflect" "testing" - "github.com/stretchr/testify/assert" - csi "github.com/container-storage-interface/spec/lib/go/csi" + "github.com/stretchr/testify/assert" ) func TestRoundUpBytes(t *testing.T) { @@ -107,7 +106,7 @@ func TestParseEndpoint(t *testing.T) { { name: "invalid endpoint", endpoint: "http://127.0.0.1", - expErr: fmt.Errorf("unsupported protocol: http"), + expErr: errors.New("unsupported protocol: http"), }, } @@ -119,7 +118,6 @@ func TestParseEndpoint(t *testing.T) { if err.Error() != tc.expErr.Error() { t.Fatalf("Expecting err: expected %v, got %v", tc.expErr, err) } - } else { if err != nil { t.Fatalf("err is not nil. got: %v", err) @@ -134,7 +132,6 @@ func TestParseEndpoint(t *testing.T) { } }) } - } func TestGetAccessModes(t *testing.T) { diff --git a/tests/e2e/driver/driver.go b/tests/e2e/driver/driver.go index b3faa749c4..0fff49879c 100644 --- a/tests/e2e/driver/driver.go +++ b/tests/e2e/driver/driver.go @@ -32,13 +32,13 @@ type PVTestDriver interface { VolumeSnapshotTestDriver } -// DynamicPVTestDriver represents an interface for a CSI driver that supports DynamicPV +// DynamicPVTestDriver represents an interface for a CSI driver that supports DynamicPV. type DynamicPVTestDriver interface { // GetDynamicProvisionStorageClass returns a StorageClass dynamic provision Persistent Volume GetDynamicProvisionStorageClass(parameters map[string]string, mountOptions []string, reclaimPolicy *v1.PersistentVolumeReclaimPolicy, volumeExpansion *bool, bindingMode *storagev1.VolumeBindingMode, allowedTopologyValues []string, namespace string) *storagev1.StorageClass } -// PreProvisionedVolumeTestDriver represents an interface for a CSI driver that supports pre-provisioned volume +// PreProvisionedVolumeTestDriver represents an interface for a CSI driver that supports pre-provisioned volume. type PreProvisionedVolumeTestDriver interface { // GetPersistentVolume returns a PersistentVolume with pre-provisioned volumeHandle GetPersistentVolume(volumeID string, fsType string, size string, reclaimPolicy *v1.PersistentVolumeReclaimPolicy, namespace string, accessMode v1.PersistentVolumeAccessMode, volumeMode v1.PersistentVolumeMode) *v1.PersistentVolume diff --git a/tests/e2e/driver/ebs_csi_driver.go b/tests/e2e/driver/ebs_csi_driver.go index ee0a038068..efab4571e3 100644 --- a/tests/e2e/driver/ebs_csi_driver.go +++ b/tests/e2e/driver/ebs_csi_driver.go @@ -29,12 +29,12 @@ const ( True = "true" ) -// Implement DynamicPVTestDriver interface +// Implement DynamicPVTestDriver interface. type ebsCSIDriver struct { driverName string } -// InitEbsCSIDriver returns ebsCSIDriver that implements DynamicPVTestDriver interface +// InitEbsCSIDriver returns ebsCSIDriver that implements DynamicPVTestDriver interface. func InitEbsCSIDriver() PVTestDriver { return &ebsCSIDriver{ driverName: ebscsidriver.DriverName, @@ -92,7 +92,7 @@ func (d *ebsCSIDriver) GetPersistentVolume(volumeID string, fsType string, size Spec: v1.PersistentVolumeSpec{ AccessModes: []v1.PersistentVolumeAccessMode{accessMode}, Capacity: v1.ResourceList{ - v1.ResourceName(v1.ResourceStorage): resource.MustParse(size), + v1.ResourceStorage: resource.MustParse(size), }, PersistentVolumeReclaimPolicy: pvReclaimPolicy, PersistentVolumeSource: v1.PersistentVolumeSource{ @@ -107,7 +107,7 @@ func (d *ebsCSIDriver) GetPersistentVolume(volumeID string, fsType string, size } } -// MinimumSizeForVolumeType returns the minimum disk size for each volumeType +// MinimumSizeForVolumeType returns the minimum disk size for each volumeType. func MinimumSizeForVolumeType(volumeType string) string { switch volumeType { case "st1", "sc1": diff --git a/tests/e2e/dynamic_provisioning.go b/tests/e2e/dynamic_provisioning.go index c39de5b51c..bf17eb25dd 100644 --- a/tests/e2e/dynamic_provisioning.go +++ b/tests/e2e/dynamic_provisioning.go @@ -20,21 +20,19 @@ import ( "os" "strings" + awscloud "github.com/kubernetes-sigs/aws-ebs-csi-driver/pkg/cloud" + ebscsidriver "github.com/kubernetes-sigs/aws-ebs-csi-driver/pkg/driver" + "github.com/kubernetes-sigs/aws-ebs-csi-driver/tests/e2e/driver" + "github.com/kubernetes-sigs/aws-ebs-csi-driver/tests/e2e/testsuites" . "github.com/onsi/ginkgo/v2" v1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" - clientset "k8s.io/client-go/kubernetes" - restclientset "k8s.io/client-go/rest" - "k8s.io/kubernetes/test/e2e/framework" - - "github.com/kubernetes-sigs/aws-ebs-csi-driver/tests/e2e/driver" - "github.com/kubernetes-sigs/aws-ebs-csi-driver/tests/e2e/testsuites" - - awscloud "github.com/kubernetes-sigs/aws-ebs-csi-driver/pkg/cloud" - ebscsidriver "github.com/kubernetes-sigs/aws-ebs-csi-driver/pkg/driver" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" + clientset "k8s.io/client-go/kubernetes" + restclientset "k8s.io/client-go/rest" + "k8s.io/kubernetes/test/e2e/framework" admissionapi "k8s.io/pod-security-admission/api" ) diff --git a/tests/e2e/format_options.go b/tests/e2e/format_options.go index 440cac8b04..b098192856 100644 --- a/tests/e2e/format_options.go +++ b/tests/e2e/format_options.go @@ -16,6 +16,7 @@ package e2e import ( "fmt" + ebscsidriver "github.com/kubernetes-sigs/aws-ebs-csi-driver/pkg/driver" "github.com/kubernetes-sigs/aws-ebs-csi-driver/tests/e2e/driver" "github.com/kubernetes-sigs/aws-ebs-csi-driver/tests/e2e/testsuites" @@ -90,7 +91,6 @@ var _ = Describe("[ebs-csi-e2e] [single-az] [format-options] Formatting a volume Context(fmt.Sprintf("using an %s filesystem", fsType), func() { for testedParameter, formatOptionTestCase := range formatOptionTests { - formatOptionTestCase := formatOptionTestCase if fsTypeDoesNotSupportFormatOptionParameter(fsType, testedParameter) { continue } diff --git a/tests/e2e/modify_volume.go b/tests/e2e/modify_volume.go index 4083319ad1..b3fb0ec3b8 100644 --- a/tests/e2e/modify_volume.go +++ b/tests/e2e/modify_volume.go @@ -143,7 +143,6 @@ var _ = Describe("[ebs-csi-e2e] [single-az] [modify-volume] Modifying a PVC", fu }) for testName, modifyVolumeTest := range modifyVolumeTests { - modifyVolumeTest := modifyVolumeTest Context(testName, func() { It("will modify associated PV and EBS Volume via volume-modifier-for-k8s", func() { if modifyVolumeTest.ExternalResizerOnly { diff --git a/tests/e2e/pre_provsioning.go b/tests/e2e/pre_provsioning.go index cd77e6e6ca..a95e974561 100644 --- a/tests/e2e/pre_provsioning.go +++ b/tests/e2e/pre_provsioning.go @@ -22,15 +22,14 @@ import ( "strings" "time" - ebscsidriver "github.com/kubernetes-sigs/aws-ebs-csi-driver/pkg/driver" - k8srestclient "k8s.io/client-go/rest" - awscloud "github.com/kubernetes-sigs/aws-ebs-csi-driver/pkg/cloud" + ebscsidriver "github.com/kubernetes-sigs/aws-ebs-csi-driver/pkg/driver" "github.com/kubernetes-sigs/aws-ebs-csi-driver/tests/e2e/driver" "github.com/kubernetes-sigs/aws-ebs-csi-driver/tests/e2e/testsuites" . "github.com/onsi/ginkgo/v2" v1 "k8s.io/api/core/v1" clientset "k8s.io/client-go/kubernetes" + k8srestclient "k8s.io/client-go/rest" "k8s.io/kubernetes/test/e2e/framework" admissionapi "k8s.io/pod-security-admission/api" ) @@ -49,7 +48,7 @@ var ( defaultDiskSizeBytes int64 = defaultDiskSize * 1024 * 1024 * 1024 ) -// Requires env AWS_AVAILABILITY_ZONES a comma separated list of AZs to be set +// Requires env AWS_AVAILABILITY_ZONES a comma separated list of AZs to be set. var _ = Describe("[ebs-csi-e2e] [single-az] Pre-Provisioned", func() { f := framework.NewDefaultFramework("ebs") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged diff --git a/tests/e2e/requires_aws_api.go b/tests/e2e/requires_aws_api.go index 2bb03435c6..c5a5696514 100644 --- a/tests/e2e/requires_aws_api.go +++ b/tests/e2e/requires_aws_api.go @@ -18,31 +18,28 @@ import ( "context" "fmt" - "github.com/google/uuid" - "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/config" "github.com/aws/aws-sdk-go-v2/service/ec2" "github.com/aws/aws-sdk-go-v2/service/ec2/types" + "github.com/google/uuid" + volumesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" + awscloud "github.com/kubernetes-sigs/aws-ebs-csi-driver/pkg/cloud" + ebscsidriver "github.com/kubernetes-sigs/aws-ebs-csi-driver/pkg/driver" "github.com/kubernetes-sigs/aws-ebs-csi-driver/tests/e2e/driver" "github.com/kubernetes-sigs/aws-ebs-csi-driver/tests/e2e/testsuites" - "k8s.io/kubernetes/test/e2e/framework" - - volumesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" . "github.com/onsi/ginkgo/v2" v1 "k8s.io/api/core/v1" clientset "k8s.io/client-go/kubernetes" restclientset "k8s.io/client-go/rest" + "k8s.io/kubernetes/test/e2e/framework" admissionapi "k8s.io/pod-security-admission/api" - - awscloud "github.com/kubernetes-sigs/aws-ebs-csi-driver/pkg/cloud" - ebscsidriver "github.com/kubernetes-sigs/aws-ebs-csi-driver/pkg/driver" ) const testTagNamePrefix = "testTag" const testTagValue = "3.1415926" -// generateTagName appends a random uuid to tag name to prevent clashes on parallel e2e test runs on shared cluster +// generateTagName appends a random uuid to tag name to prevent clashes on parallel e2e test runs on shared cluster. func generateTagName() string { return testTagNamePrefix + uuid.NewString()[:8] } diff --git a/tests/e2e/suite_test.go b/tests/e2e/suite_test.go index 209c8216ad..6668c451ea 100644 --- a/tests/e2e/suite_test.go +++ b/tests/e2e/suite_test.go @@ -25,13 +25,13 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "k8s.io/kubernetes/test/e2e/framework" frameworkconfig "k8s.io/kubernetes/test/e2e/framework/config" ) const kubeconfigEnvVar = "KUBECONFIG" +//nolint:gochecknoinits func init() { rand.New(rand.NewSource(time.Now().UnixNano())) testing.Init() diff --git a/tests/e2e/testsuites/dynamically_provisioned_cmd_volume_tester.go b/tests/e2e/testsuites/dynamically_provisioned_cmd_volume_tester.go index 49f69ed004..f776e2f289 100644 --- a/tests/e2e/testsuites/dynamically_provisioned_cmd_volume_tester.go +++ b/tests/e2e/testsuites/dynamically_provisioned_cmd_volume_tester.go @@ -16,15 +16,14 @@ package testsuites import ( "github.com/kubernetes-sigs/aws-ebs-csi-driver/tests/e2e/driver" + . "github.com/onsi/ginkgo/v2" v1 "k8s.io/api/core/v1" clientset "k8s.io/client-go/kubernetes" - - . "github.com/onsi/ginkgo/v2" ) // DynamicallyProvisionedCmdVolumeTest will provision required StorageClass(es), PVC(s) and Pod(s) // Waiting for the PV provisioner to create a new PV -// Testing if the Pod(s) Cmd is run with a 0 exit code +// Testing if the Pod(s) Cmd is run with a 0 exit code. type DynamicallyProvisionedCmdVolumeTest struct { CSIDriver driver.DynamicPVTestDriver Pods []PodDetails diff --git a/tests/e2e/testsuites/dynamically_provisioned_collocated_pod_tester.go b/tests/e2e/testsuites/dynamically_provisioned_collocated_pod_tester.go index d8e5b77f2b..d4bd17ea17 100644 --- a/tests/e2e/testsuites/dynamically_provisioned_collocated_pod_tester.go +++ b/tests/e2e/testsuites/dynamically_provisioned_collocated_pod_tester.go @@ -16,16 +16,14 @@ package testsuites import ( "github.com/kubernetes-sigs/aws-ebs-csi-driver/tests/e2e/driver" - + . "github.com/onsi/ginkgo/v2" v1 "k8s.io/api/core/v1" clientset "k8s.io/client-go/kubernetes" - - . "github.com/onsi/ginkgo/v2" ) // DynamicallyProvisionedCollocatedPodTest will provision required StorageClass(es), PVC(s) and Pod(s) // Waiting for the PV provisioner to create a new PV -// Testing if multiple Pod(s) can write simultaneously +// Testing if multiple Pod(s) can write simultaneously. type DynamicallyProvisionedCollocatedPodTest struct { CSIDriver driver.DynamicPVTestDriver Pods []PodDetails @@ -52,5 +50,4 @@ func (t *DynamicallyProvisionedCollocatedPodTest) Run(client clientset.Interface tpod.WaitForRunning() nodeName = tpod.pod.Spec.NodeName } - } diff --git a/tests/e2e/testsuites/dynamically_provisioned_delete_pod_tester.go b/tests/e2e/testsuites/dynamically_provisioned_delete_pod_tester.go index 20502c43b7..88ce4355f8 100644 --- a/tests/e2e/testsuites/dynamically_provisioned_delete_pod_tester.go +++ b/tests/e2e/testsuites/dynamically_provisioned_delete_pod_tester.go @@ -23,7 +23,7 @@ import ( // DynamicallyProvisionedDeletePodTest will provision required StorageClass and Deployment // Testing if the Pod can write and read to mounted volumes -// Deleting a pod, and again testing if the Pod can write and read to mounted volumes +// Deleting a pod, and again testing if the Pod can write and read to mounted volumes. type DynamicallyProvisionedDeletePodTest struct { CSIDriver driver.DynamicPVTestDriver Pod PodDetails diff --git a/tests/e2e/testsuites/dynamically_provisioned_read_only_volume_tester.go b/tests/e2e/testsuites/dynamically_provisioned_read_only_volume_tester.go index 4cf48c944c..0ada41e7ef 100644 --- a/tests/e2e/testsuites/dynamically_provisioned_read_only_volume_tester.go +++ b/tests/e2e/testsuites/dynamically_provisioned_read_only_volume_tester.go @@ -18,20 +18,18 @@ import ( "fmt" "github.com/kubernetes-sigs/aws-ebs-csi-driver/tests/e2e/driver" - v1 "k8s.io/api/core/v1" - "k8s.io/kubernetes/test/e2e/framework" - - clientset "k8s.io/client-go/kubernetes" - . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + v1 "k8s.io/api/core/v1" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/framework" ) const expectedReadOnlyLog = "Read-only file system" // DynamicallyProvisionedReadOnlyVolumeTest will provision required StorageClass(es), PVC(s) and Pod(s) // Waiting for the PV provisioner to create a new PV -// Testing that the Pod(s) cannot write to the volume when mounted +// Testing that the Pod(s) cannot write to the volume when mounted. type DynamicallyProvisionedReadOnlyVolumeTest struct { CSIDriver driver.DynamicPVTestDriver Pods []PodDetails diff --git a/tests/e2e/testsuites/dynamically_provisioned_reclaim_policy_tester.go b/tests/e2e/testsuites/dynamically_provisioned_reclaim_policy_tester.go index e58e1210ae..fb94bbe7d2 100644 --- a/tests/e2e/testsuites/dynamically_provisioned_reclaim_policy_tester.go +++ b/tests/e2e/testsuites/dynamically_provisioned_reclaim_policy_tester.go @@ -17,13 +17,12 @@ package testsuites import ( "github.com/kubernetes-sigs/aws-ebs-csi-driver/pkg/cloud" "github.com/kubernetes-sigs/aws-ebs-csi-driver/tests/e2e/driver" - - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" clientset "k8s.io/client-go/kubernetes" ) // DynamicallyProvisionedReclaimPolicyTest will provision required PV(s) and PVC(s) -// Testing the correct behavior for different reclaimPolicies +// Testing the correct behavior for different reclaimPolicies. type DynamicallyProvisionedReclaimPolicyTest struct { CSIDriver driver.DynamicPVTestDriver Volumes []VolumeDetails diff --git a/tests/e2e/testsuites/dynamically_provisioned_topology_aware_volume_tester.go b/tests/e2e/testsuites/dynamically_provisioned_topology_aware_volume_tester.go index b1d32078f4..e4d24ee8e6 100644 --- a/tests/e2e/testsuites/dynamically_provisioned_topology_aware_volume_tester.go +++ b/tests/e2e/testsuites/dynamically_provisioned_topology_aware_volume_tester.go @@ -18,17 +18,15 @@ import ( "fmt" "github.com/kubernetes-sigs/aws-ebs-csi-driver/tests/e2e/driver" - + . "github.com/onsi/ginkgo/v2" v1 "k8s.io/api/core/v1" clientset "k8s.io/client-go/kubernetes" - - . "github.com/onsi/ginkgo/v2" ) // DynamicallyProvisionedTopologyAwareVolumeTest will provision required StorageClass(es), PVC(s) and Pod(s) // Waiting for the PV provisioner to create a new PV // Testing if the Pod(s) can write and read to mounted volumes -// Validate PVs have expected PV nodeAffinity +// Validate PVs have expected PV nodeAffinity. type DynamicallyProvisionedTopologyAwareVolumeTest struct { CSIDriver driver.DynamicPVTestDriver Pods []PodDetails diff --git a/tests/e2e/testsuites/dynamically_provisioned_volume_snapshot_tester.go b/tests/e2e/testsuites/dynamically_provisioned_volume_snapshot_tester.go index ef2b3f7c5a..e28405f875 100644 --- a/tests/e2e/testsuites/dynamically_provisioned_volume_snapshot_tester.go +++ b/tests/e2e/testsuites/dynamically_provisioned_volume_snapshot_tester.go @@ -15,14 +15,12 @@ limitations under the License. package testsuites import ( - "github.com/kubernetes-sigs/aws-ebs-csi-driver/tests/e2e/driver" - volumesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" + "github.com/kubernetes-sigs/aws-ebs-csi-driver/tests/e2e/driver" + . "github.com/onsi/ginkgo/v2" v1 "k8s.io/api/core/v1" clientset "k8s.io/client-go/kubernetes" restclientset "k8s.io/client-go/rest" - - . "github.com/onsi/ginkgo/v2" ) // DynamicallyProvisionedVolumeSnapshotTest will provision required StorageClass(es),VolumeSnapshotClass(es), PVC(s) and Pod(s) @@ -30,7 +28,7 @@ import ( // Testing if the Pod(s) can write and read to mounted volumes // Create a snapshot, validate the data is still on the disk, and then write and read to it again // And finally delete the snapshot -// This test only supports a single volume +// This test only supports a single volume. type DynamicallyProvisionedVolumeSnapshotTest struct { CSIDriver driver.PVTestDriver Pod PodDetails diff --git a/tests/e2e/testsuites/e2e_utils.go b/tests/e2e/testsuites/e2e_utils.go index 31129eb624..0c5a03d8c4 100644 --- a/tests/e2e/testsuites/e2e_utils.go +++ b/tests/e2e/testsuites/e2e_utils.go @@ -37,7 +37,7 @@ const ( DefaultModificationTimeout = 3 * time.Minute DefaultResizeTimout = 1 * time.Minute - DefaultK8sApiPollingInterval = 5 * time.Second + DefaultK8sAPIPollingInterval = 5 * time.Second Iops = "iops" Throughput = "throughput" @@ -51,22 +51,22 @@ var DefaultGeneratedVolumeMount = VolumeMountDetails{ MountPathGenerate: "/mnt/test-", } -// PodCmdWriteToVolume returns pod command that would write to mounted volume +// PodCmdWriteToVolume returns pod command that would write to mounted volume. func PodCmdWriteToVolume(volumeMountPath string) string { return fmt.Sprintf("echo 'hello world' >> %s/data && grep 'hello world' %s/data && sync", volumeMountPath, volumeMountPath) } -// PodCmdContinuousWrite returns pod command that would continuously write to mounted volume +// PodCmdContinuousWrite returns pod command that would continuously write to mounted volume. func PodCmdContinuousWrite(volumeMountPath string) string { return fmt.Sprintf("while true; do echo \"$(date -u)\" >> /%s/out.txt; sleep 5; done", volumeMountPath) } -// PodCmdGrepVolumeData returns pod command that would check that a volume was written to by PodCmdWriteToVolume +// PodCmdGrepVolumeData returns pod command that would check that a volume was written to by PodCmdWriteToVolume. func PodCmdGrepVolumeData(volumeMountPath string) string { return fmt.Sprintf("grep 'hello world' %s/data", volumeMountPath) } -// IncreasePvcObjectStorage increases `storage` of a K8s PVC object by specified Gigabytes +// IncreasePvcObjectStorage increases `storage` of a K8s PVC object by specified Gigabytes. func IncreasePvcObjectStorage(pvc *v1.PersistentVolumeClaim, sizeIncreaseGi int32) resource.Quantity { pvcSize := pvc.Spec.Resources.Requests["storage"] delta := resource.Quantity{} @@ -76,7 +76,7 @@ func IncreasePvcObjectStorage(pvc *v1.PersistentVolumeClaim, sizeIncreaseGi int3 return pvcSize } -// WaitForPvToResize waiting for pvc size to be resized to desired size +// WaitForPvToResize waiting for pvc size to be resized to desired size. func WaitForPvToResize(c clientset.Interface, ns *v1.Namespace, pvName string, desiredSize resource.Quantity, timeout time.Duration, interval time.Duration) error { framework.Logf("waiting up to %v for pv resize in namespace %q to be complete", timeout, ns.Name) for start := time.Now(); time.Since(start) < timeout; time.Sleep(interval) { @@ -90,7 +90,7 @@ func WaitForPvToResize(c clientset.Interface, ns *v1.Namespace, pvName string, d return fmt.Errorf("gave up after waiting %v for pv %q to complete resizing", timeout, pvName) } -// ResizeTestPvc increases size of given `TestPersistentVolumeClaim` by specified Gigabytes +// ResizeTestPvc increases size of given `TestPersistentVolumeClaim` by specified Gigabytes. func ResizeTestPvc(client clientset.Interface, namespace *v1.Namespace, testPvc *TestPersistentVolumeClaim, sizeIncreaseGi int32) (updatedSize resource.Quantity) { framework.Logf("getting pvc name: %v", testPvc.persistentVolumeClaim.Name) pvc, _ := client.CoreV1().PersistentVolumeClaims(namespace.Name).Get(context.TODO(), testPvc.persistentVolumeClaim.Name, metav1.GetOptions{}) @@ -105,19 +105,19 @@ func ResizeTestPvc(client clientset.Interface, namespace *v1.Namespace, testPvc updatedSize = updatedPvc.Spec.Resources.Requests["storage"] framework.Logf("checking the resizing PV result") - err = WaitForPvToResize(client, namespace, updatedPvc.Spec.VolumeName, updatedSize, DefaultResizeTimout, DefaultK8sApiPollingInterval) + err = WaitForPvToResize(client, namespace, updatedPvc.Spec.VolumeName, updatedSize, DefaultResizeTimout, DefaultK8sAPIPollingInterval) framework.ExpectNoError(err) return updatedSize } -// AnnotatePvc annotates supplied k8s pvc object with supplied annotations +// AnnotatePvc annotates supplied k8s pvc object with supplied annotations. func AnnotatePvc(pvc *v1.PersistentVolumeClaim, annotations map[string]string) { for annotation, value := range annotations { pvc.Annotations[annotation] = value } } -// CheckPvAnnotations checks whether supplied k8s pv object contains supplied annotations +// CheckPvAnnotations checks whether supplied k8s pv object contains supplied annotations. func CheckPvAnnotations(pv *v1.PersistentVolume, annotations map[string]string) bool { for annotation, value := range annotations { if pv.Annotations[annotation] != value { @@ -127,7 +127,7 @@ func CheckPvAnnotations(pv *v1.PersistentVolume, annotations map[string]string) return true } -// WaitForPvToModify waiting for PV to be modified +// WaitForPvToModify waiting for PV to be modified. func WaitForPvToModify(c clientset.Interface, ns *v1.Namespace, pvName string, expectedAnnotations map[string]string, timeout time.Duration, interval time.Duration) error { framework.Logf("waiting up to %v for pv in namespace %q to be modified", timeout, ns.Name) @@ -142,7 +142,7 @@ func WaitForPvToModify(c clientset.Interface, ns *v1.Namespace, pvName string, e return fmt.Errorf("gave up after waiting %v for pv %q to complete modifying", timeout, pvName) } -// WaitForVacToApplyToPv waits for a PV's VAC to match the PVC's VAC +// WaitForVacToApplyToPv waits for a PV's VAC to match the PVC's VAC. func WaitForVacToApplyToPv(c clientset.Interface, ns *v1.Namespace, pvName string, expectedVac string, timeout time.Duration, interval time.Duration) error { framework.Logf("waiting up to %v for pv in namespace %q to be modified via VAC", timeout, ns.Name) diff --git a/tests/e2e/testsuites/modify_volume_tester.go b/tests/e2e/testsuites/modify_volume_tester.go index 63454d0aaf..38ee37461b 100644 --- a/tests/e2e/testsuites/modify_volume_tester.go +++ b/tests/e2e/testsuites/modify_volume_tester.go @@ -102,13 +102,13 @@ func (modifyVolumeTest *ModifyVolumeTest) Run(c clientset.Interface, ns *v1.Name By("wait for and confirm pv modification") if testType == VolumeModifierForK8s { - err = WaitForPvToModify(c, ns, testVolume.persistentVolume.Name, parametersWithPrefix, DefaultModificationTimeout, DefaultK8sApiPollingInterval) + err = WaitForPvToModify(c, ns, testVolume.persistentVolume.Name, parametersWithPrefix, DefaultModificationTimeout, DefaultK8sAPIPollingInterval) } else if testType == ExternalResizer { - err = WaitForVacToApplyToPv(c, ns, testVolume.persistentVolume.Name, *modifyingPvc.Spec.VolumeAttributesClassName, DefaultModificationTimeout, DefaultK8sApiPollingInterval) + err = WaitForVacToApplyToPv(c, ns, testVolume.persistentVolume.Name, *modifyingPvc.Spec.VolumeAttributesClassName, DefaultModificationTimeout, DefaultK8sAPIPollingInterval) } framework.ExpectNoError(err, fmt.Sprintf("fail to modify pv(%s): %v", modifyingPvc.Name, err)) if modifyVolumeTest.ShouldResizeVolume { - err = WaitForPvToResize(c, ns, testVolume.persistentVolume.Name, updatedPvcSize, DefaultResizeTimout, DefaultK8sApiPollingInterval) + err = WaitForPvToResize(c, ns, testVolume.persistentVolume.Name, updatedPvcSize, DefaultResizeTimout, DefaultK8sAPIPollingInterval) framework.ExpectNoError(err, fmt.Sprintf("fail to resize pv(%s): %v", modifyingPvc.Name, err)) } } diff --git a/tests/e2e/testsuites/pre_provisioned_read_only_volume_tester.go b/tests/e2e/testsuites/pre_provisioned_read_only_volume_tester.go index 8fa5ec2963..2db452a027 100644 --- a/tests/e2e/testsuites/pre_provisioned_read_only_volume_tester.go +++ b/tests/e2e/testsuites/pre_provisioned_read_only_volume_tester.go @@ -18,16 +18,15 @@ import ( "fmt" "github.com/kubernetes-sigs/aws-ebs-csi-driver/tests/e2e/driver" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" v1 "k8s.io/api/core/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" ) // PreProvisionedReadOnlyVolumeTest will provision required PV(s), PVC(s) and Pod(s) -// Testing that the Pod(s) cannot write to the volume when mounted +// Testing that the Pod(s) cannot write to the volume when mounted. type PreProvisionedReadOnlyVolumeTest struct { CSIDriver driver.PreProvisionedVolumeTestDriver Pods []PodDetails diff --git a/tests/e2e/testsuites/pre_provisioned_reclaim_policy_tester.go b/tests/e2e/testsuites/pre_provisioned_reclaim_policy_tester.go index 8fd7793a19..c3e0b7b5ac 100644 --- a/tests/e2e/testsuites/pre_provisioned_reclaim_policy_tester.go +++ b/tests/e2e/testsuites/pre_provisioned_reclaim_policy_tester.go @@ -16,13 +16,12 @@ package testsuites import ( "github.com/kubernetes-sigs/aws-ebs-csi-driver/tests/e2e/driver" - - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" clientset "k8s.io/client-go/kubernetes" ) // PreProvisionedReclaimPolicyTest will provision required PV(s) and PVC(s) -// Testing the correct behavior for different reclaimPolicies +// Testing the correct behavior for different reclaimPolicies. type PreProvisionedReclaimPolicyTest struct { CSIDriver driver.PreProvisionedVolumeTestDriver Volumes []VolumeDetails diff --git a/tests/e2e/testsuites/pre_provisioned_snapshot_volume_tester.go b/tests/e2e/testsuites/pre_provisioned_snapshot_volume_tester.go index 81f10cd738..fcc72f5a22 100644 --- a/tests/e2e/testsuites/pre_provisioned_snapshot_volume_tester.go +++ b/tests/e2e/testsuites/pre_provisioned_snapshot_volume_tester.go @@ -15,16 +15,15 @@ limitations under the License. package testsuites import ( - "fmt" + "errors" "github.com/kubernetes-sigs/aws-ebs-csi-driver/tests/e2e/driver" + . "github.com/onsi/ginkgo/v2" v1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" clientset "k8s.io/client-go/kubernetes" k8srestclient "k8s.io/client-go/rest" "k8s.io/kubernetes/test/e2e/framework" - - . "github.com/onsi/ginkgo/v2" ) type PreProvisionedVolumeSnapshotTest struct { @@ -32,20 +31,18 @@ type PreProvisionedVolumeSnapshotTest struct { Pod PodDetails } -func (t *PreProvisionedVolumeSnapshotTest) Run(client clientset.Interface, restclient k8srestclient.Interface, namespace *v1.Namespace, snapshotId string) { - +func (t *PreProvisionedVolumeSnapshotTest) Run(client clientset.Interface, restclient k8srestclient.Interface, namespace *v1.Namespace, snapshotID string) { By("taking snapshots") tvsc, cleanup := CreateVolumeSnapshotClass(restclient, namespace, t.CSIDriver, nil) defer cleanup() - tvolumeSnapshotContent := tvsc.CreateStaticVolumeSnapshotContent(snapshotId) + tvolumeSnapshotContent := tvsc.CreateStaticVolumeSnapshotContent(snapshotID) tvs := tvsc.CreateStaticVolumeSnapshot(tvolumeSnapshotContent) defer tvsc.DeleteVolumeSnapshotContent(tvolumeSnapshotContent) defer tvsc.DeleteSnapshot(tvs) if len(t.Pod.Volumes) < 1 { - err := fmt.Errorf("Volume is not setup for testing pod, exit. ") - framework.ExpectNoError(err) + framework.ExpectNoError(errors.New("volume is not setup for testing pod, exit")) } volume := t.Pod.Volumes[0] diff --git a/tests/e2e/testsuites/pre_provisioned_volume_tester.go b/tests/e2e/testsuites/pre_provisioned_volume_tester.go index b3967fe72d..56ec2cd119 100644 --- a/tests/e2e/testsuites/pre_provisioned_volume_tester.go +++ b/tests/e2e/testsuites/pre_provisioned_volume_tester.go @@ -16,14 +16,13 @@ package testsuites import ( "github.com/kubernetes-sigs/aws-ebs-csi-driver/tests/e2e/driver" + . "github.com/onsi/ginkgo/v2" v1 "k8s.io/api/core/v1" clientset "k8s.io/client-go/kubernetes" - - . "github.com/onsi/ginkgo/v2" ) // PreProvisionedVolumeTest will provision required PV(s), PVC(s) and Pod(s) -// Testing if the Pod(s) can write and read to mounted volumes +// Testing if the Pod(s) can write and read to mounted volumes. type PreProvisionedVolumeTest struct { CSIDriver driver.PreProvisionedVolumeTestDriver Pods []PodDetails diff --git a/tests/e2e/testsuites/specs.go b/tests/e2e/testsuites/specs.go index 20a7d679a0..8aa03d72e8 100644 --- a/tests/e2e/testsuites/specs.go +++ b/tests/e2e/testsuites/specs.go @@ -18,13 +18,11 @@ import ( "fmt" "github.com/kubernetes-sigs/aws-ebs-csi-driver/tests/e2e/driver" - + . "github.com/onsi/ginkgo/v2" v1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" clientset "k8s.io/client-go/kubernetes" restclientset "k8s.io/client-go/rest" - - . "github.com/onsi/ginkgo/v2" ) type PodDetails struct { diff --git a/tests/e2e/testsuites/testsuites.go b/tests/e2e/testsuites/testsuites.go index d4ac74feca..32ffe0f43b 100644 --- a/tests/e2e/testsuites/testsuites.go +++ b/tests/e2e/testsuites/testsuites.go @@ -21,7 +21,6 @@ import ( "time" "github.com/aws/aws-sdk-go-v2/aws" - volumesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" snapshotclientset "github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned" awscloud "github.com/kubernetes-sigs/aws-ebs-csi-driver/pkg/cloud" @@ -48,7 +47,7 @@ const ( execTimeout = 10 * time.Second // Some pods can take much longer to get ready due to volume attach/detach latency. slowPodStartTimeout = 15 * time.Minute - // Description that will printed during tests + // Description that will printed during tests. failedConditionDescription = "Error status code" volumeSnapshotNameStatic = "volume-snapshot-tester" @@ -151,8 +150,8 @@ func (t *TestVolumeSnapshotClass) CreateStaticVolumeSnapshot(vsc *volumesnapshot return snapshotObj } -func (t *TestVolumeSnapshotClass) CreateStaticVolumeSnapshotContent(snapshotId string) *volumesnapshotv1.VolumeSnapshotContent { - By("creating a VolumeSnapshotContent from snapshotId: " + snapshotId) +func (t *TestVolumeSnapshotClass) CreateStaticVolumeSnapshotContent(snapshotID string) *volumesnapshotv1.VolumeSnapshotContent { + By("creating a VolumeSnapshotContent from snapshotID: " + snapshotID) snapshotContent := &volumesnapshotv1.VolumeSnapshotContent{ TypeMeta: metav1.TypeMeta{ Kind: VolumeSnapshotContentKind, @@ -172,7 +171,7 @@ func (t *TestVolumeSnapshotClass) CreateStaticVolumeSnapshotContent(snapshotId s }, Driver: "ebs.csi.aws.com", Source: volumesnapshotv1.VolumeSnapshotContentSource{ - SnapshotHandle: aws.String(snapshotId), + SnapshotHandle: aws.String(snapshotID), }, }, } @@ -185,7 +184,6 @@ func (t *TestVolumeSnapshotClass) UpdateStaticVolumeSnapshotContent(volumeSnapsh volumeSnapshotContent.Spec.VolumeSnapshotRef.Name = volumeSnapshot.Name _, err := snapshotclientset.New(t.client).SnapshotV1().VolumeSnapshotContents().Update(context.Background(), volumeSnapshotContent, metav1.UpdateOptions{}) framework.ExpectNoError(err) - } func (t *TestVolumeSnapshotClass) ReadyToUse(snapshot *volumesnapshotv1.VolumeSnapshot) { By("waiting for VolumeSnapshot to be ready to use - " + snapshot.Name) @@ -345,11 +343,11 @@ func (t *TestPersistentVolumeClaim) ValidateProvisionedPersistentVolume() { framework.ExpectNoError(err) // Check sizes - expectedCapacity := t.requestedPersistentVolumeClaim.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] - claimCapacity := t.persistentVolumeClaim.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] + expectedCapacity := t.requestedPersistentVolumeClaim.Spec.Resources.Requests[v1.ResourceStorage] + claimCapacity := t.persistentVolumeClaim.Spec.Resources.Requests[v1.ResourceStorage] Expect(claimCapacity.Value()).To(Equal(expectedCapacity.Value()), "claimCapacity is not equal to requestedCapacity") - pvCapacity := t.persistentVolume.Spec.Capacity[v1.ResourceName(v1.ResourceStorage)] + pvCapacity := t.persistentVolume.Spec.Capacity[v1.ResourceStorage] Expect(pvCapacity.Value()).To(Equal(expectedCapacity.Value()), "pvCapacity is not equal to requestedCapacity") // Check PV properties @@ -389,7 +387,6 @@ func (t *TestPersistentVolumeClaim) ValidateProvisionedPersistentVolume() { for _, v := range t.persistentVolume.Spec.NodeAffinity.Required.NodeSelectorTerms[0].MatchExpressions[0].Values { Expect(t.storageClass.AllowedTopologies[0].MatchLabelExpressions[0].Values).To(ContainElement(v)) } - } } } @@ -425,7 +422,7 @@ func generatePVC(namespace, storageClassName, claimSize string, volumeMode v1.Pe }, Resources: v1.VolumeResourceRequirements{ Requests: v1.ResourceList{ - v1.ResourceName(v1.ResourceStorage): resource.MustParse(claimSize), + v1.ResourceStorage: resource.MustParse(claimSize), }, }, VolumeMode: &volumeMode, @@ -604,16 +601,16 @@ func (t *TestDeployment) Logs() ([]byte, error) { } // waitForPersistentVolumeClaimDeleted waits for a PersistentVolumeClaim to be removed from the system until timeout occurs, whichever comes first. -func waitForPersistentVolumeClaimDeleted(c clientset.Interface, ns string, pvcName string, Poll, timeout time.Duration) error { +func waitForPersistentVolumeClaimDeleted(c clientset.Interface, ns string, pvcName string, poll, timeout time.Duration) error { framework.Logf("Waiting up to %v for PersistentVolumeClaim %s to be removed", timeout, pvcName) - for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) { + for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { _, err := c.CoreV1().PersistentVolumeClaims(ns).Get(context.Background(), pvcName, metav1.GetOptions{}) if err != nil { if apierrs.IsNotFound(err) { framework.Logf("Claim %q in namespace %q doesn't exist in the system", pvcName, ns) return nil } - framework.Logf("Failed to get claim %q in namespace %q, retrying in %v. Error: %v", pvcName, ns, Poll, err) + framework.Logf("Failed to get claim %q in namespace %q, retrying in %v. Error: %v", pvcName, ns, poll, err) } } return fmt.Errorf("PersistentVolumeClaim %s is not removed from the system within %v", pvcName, timeout) @@ -668,7 +665,7 @@ func (t *TestPod) WaitForRunning() { } // Ideally this would be in "k8s.io/kubernetes/test/e2e/framework" -// Similar to framework.WaitForPodSuccessInNamespace +// Similar to framework.WaitForPodSuccessInNamespace. var podFailedCondition = func(pod *v1.Pod) (bool, error) { switch pod.Status.Phase { case v1.PodFailed: