From 2fc45c36db44aac477f63d7ec3d0596da3c3dd43 Mon Sep 17 00:00:00 2001 From: Jiri Mencak Date: Thu, 12 Dec 2024 18:04:01 +0100 Subject: [PATCH] Linter fixes Golangci-linter configuration ".ci-operator.yaml" was configured in a way which was showing a lot of false positives and also missing a lot of issues. Fix both the linter configuration and the missing issues that found its way into the codebase. --- .ci-operator.yaml | 2 +- .golangci.yaml | 21 +++-- Dockerfile.rhel9 | 4 +- Makefile | 2 +- cmd/gather-sysinfo/gather-sysinfo_test.go | 3 +- .../performanceprofile/performance_test.go | 6 +- .../v2/performanceprofile_validation.go | 4 +- .../v2/performanceprofile_validation_test.go | 52 +++++------ pkg/operator/status.go | 13 +-- .../kubeletconfig/kubeletconfig_test.go | 22 +++-- .../machineconfig/machineconfig_test.go | 6 +- .../components/tuned/tuned_test.go | 90 +++++++++---------- ...erformanceprofile_controller_suite_test.go | 9 +- .../performanceprofile_controller_test.go | 12 +-- .../profilecreator/helper.go | 3 - .../profilecreator/profilecreator_test.go | 3 +- .../utils/testing/testing.go | 12 +-- pkg/tuned/cmd/render/cmd.go | 4 +- test/e2e/basic/custom_node_labels.go | 5 +- test/e2e/basic/custom_pod_labels.go | 5 +- test/e2e/basic/default_irq_smp_affinity.go | 5 +- test/e2e/basic/modules.go | 6 +- test/e2e/basic/netdev_set_channels.go | 7 +- test/e2e/basic/rollback.go | 8 +- test/e2e/basic/sysctl_d_override.go | 10 ++- test/e2e/basic/tuned_builtin_expand.go | 7 +- test/e2e/basic/tuned_errors_and_recovery.go | 9 +- test/e2e/deferred/basic.go | 5 +- test/e2e/deferred/non_regression.go | 3 +- test/e2e/deferred/operator_test.go | 2 - test/e2e/deferred/restart.go | 3 +- test/e2e/deferred/updates.go | 3 +- .../1_render_command/render_test.go | 2 +- .../functests/0_config/config.go | 16 ++-- .../10_performance_ppc/10_ppc_suite_test.go | 2 +- .../functests/10_performance_ppc/ppc.go | 13 ++- .../11_mixedcpus/11_mixedcpus_suite_test.go | 2 +- .../functests/11_mixedcpus/mixedcpus.go | 13 ++- .../functests/12_hypershift/hypershift.go | 4 +- .../functests/1_performance/cpu_management.go | 12 +-- .../functests/1_performance/hugepages.go | 2 +- .../functests/1_performance/netqueues.go | 13 +-- .../functests/1_performance/performance.go | 43 +++------ .../functests/1_performance/rt-kernel.go | 2 +- .../test_suite_performance_test.go | 3 +- .../2_performance_update/memorymanager.go | 27 ++++-- .../test_suite_performance_update_test.go | 3 +- .../2_performance_update/updating_profile.go | 43 +++++---- .../functests/4_latency/latency.go | 4 +- .../4_latency/test_suite_latency_test.go | 3 +- .../5_latency_testing_suite_test.go | 2 +- .../5_latency_testing/latency_testing.go | 14 ++- .../test_suite_mustgather_test.go | 2 +- .../7_performance_kubelet_node/cgroups.go | 25 ++++-- .../7_performance_kubelet_node/kubelet.go | 9 +- ...est_suite_performance_kubelet_node_test.go | 3 +- ...st_suite_performance_workloadhints_test.go | 3 +- .../workloadhints.go | 89 +++++++++--------- .../functests/9_reboot/devices.go | 4 +- .../9_reboot/test_suite_reboot_test.go | 3 +- .../functests/utils/cgroup/v2/v2.go | 2 +- .../functests/utils/consts.go | 2 +- .../functests/utils/infrastructure/vm.go | 2 +- .../functests/utils/mcps/mcps.go | 2 - .../utils/node_inspector/inspector.go | 8 +- .../functests/utils/nodes/nodes.go | 18 ++-- .../utils/profilesupdate/profile_update.go | 2 +- test/e2e/reboots/kernel_parameter_add_rm.go | 11 ++- test/e2e/reboots/operator_test.go | 3 +- .../reboots/sno/kernel_parameter_add_rm.go | 7 +- test/e2e/reboots/sno/operator_test.go | 1 + test/e2e/reboots/sno/stalld.go | 6 +- test/e2e/reboots/stalld.go | 10 ++- test/e2e/util/util.go | 7 +- 74 files changed, 429 insertions(+), 359 deletions(-) diff --git a/.ci-operator.yaml b/.ci-operator.yaml index 64887a08b4..7c15f83e3e 100644 --- a/.ci-operator.yaml +++ b/.ci-operator.yaml @@ -1,4 +1,4 @@ build_root_image: name: release namespace: openshift - tag: rhel-9-release-golang-1.22-openshift-4.18 + tag: rhel-9-release-golang-1.23-openshift-4.19 diff --git a/.golangci.yaml b/.golangci.yaml index 473644234c..04323855a2 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -1,16 +1,19 @@ run: timeout: 5m - tests: false - skip-dirs: - - vendor - - test modules-download-mode: vendor +issues: + exclude-dirs: + - vendor + exclude-rules: + # Temporarily disable the deprecation warnings until we rewrite the code based on deprecated functionality. + - linters: + - staticcheck + text: "SA1019: .* deprecated" linters: disable-all: true enable: - errcheck - bodyclose - - exportloopref - gosimple - govet - ineffassign @@ -25,8 +28,14 @@ linters: - wastedassign - whitespace linters-settings: + errcheck: + # report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`; + # default is false: such cases aren't reported by default. + check-blank: false misspell: - locale: US + # Do not set locale explicitly, the default is to use a neutral variety of English. + # Setting locale to US will cause correcting the British spelling of 'colour' to 'color'. + # locale: US ignore-words: - NTO - nto diff --git a/Dockerfile.rhel9 b/Dockerfile.rhel9 index 5bc30df680..b3808bf605 100644 --- a/Dockerfile.rhel9 +++ b/Dockerfile.rhel9 @@ -1,9 +1,9 @@ -FROM registry.ci.openshift.org/ocp/builder:rhel-9-golang-1.22-openshift-4.18 AS builder +FROM registry.ci.openshift.org/ocp/builder:rhel-9-golang-1.23-openshift-4.19 AS builder WORKDIR /go/src/github.com/openshift/cluster-node-tuning-operator COPY . . RUN make build -FROM registry.ci.openshift.org/ocp/4.18:base-rhel9 +FROM registry.ci.openshift.org/ocp/4.19:base-rhel9 COPY --from=builder /go/src/github.com/openshift/cluster-node-tuning-operator/_output/cluster-node-tuning-operator /usr/bin/ COPY --from=builder /go/src/github.com/openshift/cluster-node-tuning-operator/_output/performance-profile-creator /usr/bin/ COPY --from=builder /go/src/github.com/openshift/cluster-node-tuning-operator/_output/gather-sysinfo /usr/bin/ diff --git a/Makefile b/Makefile index 6d46c9b23e..09f8f61bed 100644 --- a/Makefile +++ b/Makefile @@ -55,7 +55,7 @@ PAO_CRD_APIS :=$(addprefix ./$(API_TYPES_DIR)/performanceprofile/,v2 v1 v1alpha1 PAO_E2E_SUITES := $(shell hack/list-test-bin.sh) # golangci-lint variables -GOLANGCI_LINT_VERSION=1.54.2 +GOLANGCI_LINT_VERSION=1.62.2 GOLANGCI_LINT_BIN=$(OUT_DIR)/golangci-lint GOLANGCI_LINT_VERSION_TAG=v${GOLANGCI_LINT_VERSION} diff --git a/cmd/gather-sysinfo/gather-sysinfo_test.go b/cmd/gather-sysinfo/gather-sysinfo_test.go index 0924259716..82f2f30490 100644 --- a/cmd/gather-sysinfo/gather-sysinfo_test.go +++ b/cmd/gather-sysinfo/gather-sysinfo_test.go @@ -1,7 +1,6 @@ package main import ( - "io/ioutil" "os" "strings" "testing" @@ -63,7 +62,7 @@ func TestCollectMachineInfo(t *testing.T) { t.Errorf("Collection of machine info failed: %v", err) } - content, err := ioutil.ReadFile(destFile) + content, err := os.ReadFile(destFile) if err != nil { t.Errorf("Reading of generated output failed: %v", err) } diff --git a/pkg/apis/performanceprofile/performance_test.go b/pkg/apis/performanceprofile/performance_test.go index 2e511090eb..651aa6aaab 100644 --- a/pkg/apis/performanceprofile/performance_test.go +++ b/pkg/apis/performanceprofile/performance_test.go @@ -1,7 +1,7 @@ package performance import ( - "io/ioutil" + "os" "strings" "github.com/RHsyseng/operator-utils/pkg/validation" @@ -53,7 +53,7 @@ var _ = Describe("PerformanceProfile CR(D) Schema", func() { // getSchema reads in & returns CRD schema file as openAPIV3Schema{} for validation usage. // See references operator-utils/validation/schema & go-openapi/spec/schema func getSchema(crdPath string) (validation.Schema, error) { - bytes, err := ioutil.ReadFile(crdPath) + bytes, err := os.ReadFile(crdPath) if err != nil { return nil, err } @@ -66,7 +66,7 @@ func getSchema(crdPath string) (validation.Schema, error) { // getCR unmarshals a *_cr.yaml file and returns the representing struct func getCR(crPath string) (map[string]interface{}, error) { - bytes, err := ioutil.ReadFile(crPath) + bytes, err := os.ReadFile(crPath) if err != nil { return nil, err } diff --git a/pkg/apis/performanceprofile/v2/performanceprofile_validation.go b/pkg/apis/performanceprofile/v2/performanceprofile_validation.go index c71a75ca11..53a5103a6e 100644 --- a/pkg/apis/performanceprofile/v2/performanceprofile_validation.go +++ b/pkg/apis/performanceprofile/v2/performanceprofile_validation.go @@ -216,11 +216,11 @@ func validateNoIntersectionExists(lists *components.CPULists, allErrs field.Erro func (r *PerformanceProfile) validateSelectors() field.ErrorList { var allErrs field.ErrorList - if r.Spec.MachineConfigLabel != nil && len(r.Spec.MachineConfigLabel) > 1 { + if len(r.Spec.MachineConfigLabel) > 1 { allErrs = append(allErrs, field.Invalid(field.NewPath("spec.machineConfigLabel"), r.Spec.MachineConfigLabel, "you should provide only 1 MachineConfigLabel")) } - if r.Spec.MachineConfigPoolSelector != nil && len(r.Spec.MachineConfigPoolSelector) > 1 { + if len(r.Spec.MachineConfigPoolSelector) > 1 { allErrs = append(allErrs, field.Invalid(field.NewPath("spec.machineConfigPoolSelector"), r.Spec.MachineConfigLabel, "you should provide only 1 MachineConfigPoolSelector")) } diff --git a/pkg/apis/performanceprofile/v2/performanceprofile_validation_test.go b/pkg/apis/performanceprofile/v2/performanceprofile_validation_test.go index a2336783bd..76f1854ba5 100644 --- a/pkg/apis/performanceprofile/v2/performanceprofile_validation_test.go +++ b/pkg/apis/performanceprofile/v2/performanceprofile_validation_test.go @@ -12,7 +12,7 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) const ( @@ -126,13 +126,13 @@ func NewPerformanceProfile(name string) *PerformanceProfile { }, }, RealTimeKernel: &RealTimeKernel{ - Enabled: pointer.Bool(true), + Enabled: ptr.To(true), }, NUMA: &NUMA{ TopologyPolicy: &numaPolicy, }, Net: &Net{ - UserLevelNetworking: pointer.Bool(true), + UserLevelNetworking: ptr.To(true), Devices: []Device{ { InterfaceName: &netDeviceName, @@ -381,7 +381,7 @@ var _ = Describe("PerformanceProfile", func() { setValidNodeSelector(profile) errors = profile.validateSelectors() - Expect(profile.validateSelectors()).To(BeEmpty(), "should not have validation errors when machine config selector nil") + Expect(errors).To(BeEmpty(), "should not have validation errors when machine config selector nil") }) It("should should have 0 or 1 MachineConfigPoolSelector labels", func() { @@ -397,7 +397,7 @@ var _ = Describe("PerformanceProfile", func() { setValidNodeSelector(profile) errors = profile.validateSelectors() - Expect(profile.validateSelectors()).To(BeEmpty(), "should not have validation errors when machine config pool selector nil") + Expect(errors).To(BeEmpty(), "should not have validation errors when machine config pool selector nil") }) It("should have sensible NodeSelector in case MachineConfigLabel or MachineConfigPoolSelector is empty", func() { @@ -583,7 +583,7 @@ var _ = Describe("PerformanceProfile", func() { profile.Spec.HugePages.Pages = append(profile.Spec.HugePages.Pages, HugePage{ Count: 128, - Node: pointer.Int32(0), + Node: ptr.To(int32(0)), Size: "14M", }) errors := profile.validateHugePages(nodes) @@ -604,7 +604,7 @@ var _ = Describe("PerformanceProfile", func() { profile.Spec.HugePages.Pages = append(profile.Spec.HugePages.Pages, HugePage{ Count: 128, - Node: pointer.Int32(0), + Node: ptr.To(int32(0)), Size: "14M", }) errors := profile.validateHugePages(nodes) @@ -635,7 +635,7 @@ var _ = Describe("PerformanceProfile", func() { profile.Spec.HugePages.Pages = append(profile.Spec.HugePages.Pages, HugePage{ Count: 128, - Node: pointer.Int32(0), + Node: ptr.To(int32(0)), Size: "14M", }) @@ -661,12 +661,12 @@ var _ = Describe("PerformanceProfile", func() { profile.Spec.HugePages.Pages = append(profile.Spec.HugePages.Pages, HugePage{ Count: 128, Size: hugepagesSize1G, - Node: pointer.Int32(0), + Node: ptr.To(int32(0)), }) profile.Spec.HugePages.Pages = append(profile.Spec.HugePages.Pages, HugePage{ Count: 64, Size: hugepagesSize1G, - Node: pointer.Int32(0), + Node: ptr.To(int32(0)), }) errors := profile.validateHugePages(nodes) Expect(errors).NotTo(BeEmpty()) @@ -729,22 +729,22 @@ var _ = Describe("PerformanceProfile", func() { It("should raise the validation syntax errors", func() { invalidVendor := "123" invalidDevice := "0x12345" - profile.Spec.Net.Devices[0].InterfaceName = pointer.String("") - profile.Spec.Net.Devices[0].VendorID = pointer.String(invalidVendor) - profile.Spec.Net.Devices[0].DeviceID = pointer.String(invalidDevice) + profile.Spec.Net.Devices[0].InterfaceName = ptr.To("") + profile.Spec.Net.Devices[0].VendorID = ptr.To(invalidVendor) + profile.Spec.Net.Devices[0].DeviceID = ptr.To(invalidDevice) errors := profile.validateNet() Expect(len(errors)).To(Equal(3)) - Expect(errors[0].Error()).To(ContainSubstring(fmt.Sprintf("device name cannot be empty"))) + Expect(errors[0].Error()).To(ContainSubstring("device name cannot be empty")) Expect(errors[1].Error()).To(ContainSubstring(fmt.Sprintf("device vendor ID %s has an invalid format. Vendor ID should be represented as 0x<4 hexadecimal digits> (16 bit representation)", invalidVendor))) Expect(errors[2].Error()).To(ContainSubstring(fmt.Sprintf("device model ID %s has an invalid format. Model ID should be represented as 0x<4 hexadecimal digits> (16 bit representation)", invalidDevice))) }) It("should raise the validation errors for missing fields", func() { profile.Spec.Net.Devices[0].VendorID = nil - profile.Spec.Net.Devices[0].DeviceID = pointer.String("0x1") + profile.Spec.Net.Devices[0].DeviceID = ptr.To("0x1") errors := profile.validateNet() Expect(errors).NotTo(BeEmpty()) - Expect(errors[0].Error()).To(ContainSubstring(fmt.Sprintf("device model ID can not be used without specifying the device vendor ID."))) + Expect(errors[0].Error()).To(ContainSubstring("device model ID can not be used without specifying the device vendor ID.")) }) }) @@ -752,10 +752,10 @@ var _ = Describe("PerformanceProfile", func() { When("realtime kernel is enabled and realtime workload hint is explicitly disabled", func() { It("should raise validation error", func() { profile.Spec.WorkloadHints = &WorkloadHints{ - RealTime: pointer.Bool(false), + RealTime: ptr.To(false), } profile.Spec.RealTimeKernel = &RealTimeKernel{ - Enabled: pointer.Bool(true), + Enabled: ptr.To(true), } errors := profile.validateWorkloadHints() Expect(errors).NotTo(BeEmpty()) @@ -765,8 +765,8 @@ var _ = Describe("PerformanceProfile", func() { When("HighPowerConsumption hint is enabled and PerPodPowerManagement hint is enabled", func() { It("should raise validation error", func() { profile.Spec.WorkloadHints = &WorkloadHints{ - HighPowerConsumption: pointer.Bool(true), - PerPodPowerManagement: pointer.Bool(true), + HighPowerConsumption: ptr.To(true), + PerPodPowerManagement: ptr.To(true), } errors := profile.validateWorkloadHints() Expect(errors).NotTo(BeEmpty()) @@ -776,7 +776,7 @@ var _ = Describe("PerformanceProfile", func() { When("MixedCPUs hint is enabled but no shared CPUs are specified", func() { It("should raise validation error", func() { profile.Spec.WorkloadHints = &WorkloadHints{ - MixedCpus: pointer.Bool(true), + MixedCpus: ptr.To(true), } errors := profile.validateWorkloadHints() Expect(errors).NotTo(BeEmpty()) @@ -807,17 +807,17 @@ var _ = Describe("PerformanceProfile", func() { profile.Spec.HugePages.DefaultHugePagesSize = &incorrectDefaultSize profile.Spec.WorkloadHints = &WorkloadHints{ - RealTime: pointer.Bool(false), + RealTime: ptr.To(false), } profile.Spec.RealTimeKernel = &RealTimeKernel{ - Enabled: pointer.Bool(true), + Enabled: ptr.To(true), } invalidVendor := "123" invalidDevice := "0x12345" - profile.Spec.Net.Devices[0].InterfaceName = pointer.String("") - profile.Spec.Net.Devices[0].VendorID = pointer.String(invalidVendor) - profile.Spec.Net.Devices[0].DeviceID = pointer.String(invalidDevice) + profile.Spec.Net.Devices[0].InterfaceName = ptr.To("") + profile.Spec.Net.Devices[0].VendorID = ptr.To(invalidVendor) + profile.Spec.Net.Devices[0].DeviceID = ptr.To(invalidDevice) errors := profile.ValidateBasicFields() diff --git a/pkg/operator/status.go b/pkg/operator/status.go index 682533c35a..ea81bf00c5 100644 --- a/pkg/operator/status.go +++ b/pkg/operator/status.go @@ -2,6 +2,7 @@ package operator import ( "context" + "errors" "fmt" "os" @@ -9,7 +10,7 @@ import ( operatorv1 "github.com/openshift/api/operator/v1" operatorv1helpers "github.com/openshift/library-go/pkg/operator/v1helpers" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/klog/v2" @@ -80,7 +81,7 @@ func (c *Controller) syncOperatorStatus(tuned *tunedv1.Tuned) error { func (c *Controller) getOrCreateOperatorStatus() (*configv1.ClusterOperator, error) { co, err := c.listers.ClusterOperators.Get(tunedv1.TunedClusterOperatorResourceName) if err != nil { - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { // Cluster operator not found, create it co = &configv1.ClusterOperator{ObjectMeta: metav1.ObjectMeta{Name: tunedv1.TunedClusterOperatorResourceName}} co, err = c.clients.ConfigV1Client.ClusterOperators().Create(context.TODO(), co, metav1.CreateOptions{}) @@ -205,7 +206,7 @@ func (c *Controller) computeStatus(tuned *tunedv1.Tuned, conditions []configv1.C ds, err := c.listers.DaemonSets.Get(dsMf.Name) if err != nil { // There was a problem fetching Tuned daemonset - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { // Tuned daemonset has not been created yet if len(conditions) == 0 { // This looks like a fresh install => initialize @@ -238,7 +239,7 @@ func (c *Controller) computeStatus(tuned *tunedv1.Tuned, conditions []configv1.C } else { if ds.Status.ObservedGeneration != ds.Generation { // Do not base the conditions on stale information - return conditions, "", fmt.Errorf(errGenerationMismatch) + return conditions, "", errors.New(errGenerationMismatch) } dsReleaseVersion := c.getDaemonSetReleaseVersion(ds) @@ -303,14 +304,14 @@ func (c *Controller) computeStatus(tuned *tunedv1.Tuned, conditions []configv1.C } if numDegradedProfiles > 0 { - klog.Infof(fmt.Sprintf("%v/%v Profiles failed to be applied", numDegradedProfiles, len(profileList))) + klog.Infof("%v/%v Profiles failed to be applied", numDegradedProfiles, len(profileList)) availableCondition.Reason = "ProfileDegraded" availableCondition.Message = fmt.Sprintf("%v/%v Profiles failed to be applied", numDegradedProfiles, len(profileList)) } numConflict := c.numProfilesWithBootcmdlineConflict(profileList) if numConflict > 0 { - klog.Infof(fmt.Sprintf("%v/%v Profiles with bootcmdline conflict", numConflict, len(profileList))) + klog.Infof("%v/%v Profiles with bootcmdline conflict", numConflict, len(profileList)) degradedCondition.Status = configv1.ConditionTrue degradedCondition.Reason = "ProfileConflict" degradedCondition.Message = fmt.Sprintf("%v/%v Profiles with bootcmdline conflict", numConflict, len(profileList)) diff --git a/pkg/performanceprofile/controller/performanceprofile/components/kubeletconfig/kubeletconfig_test.go b/pkg/performanceprofile/controller/performanceprofile/components/kubeletconfig/kubeletconfig_test.go index 6a6b20a491..bf090e857d 100644 --- a/pkg/performanceprofile/controller/performanceprofile/components/kubeletconfig/kubeletconfig_test.go +++ b/pkg/performanceprofile/controller/performanceprofile/components/kubeletconfig/kubeletconfig_test.go @@ -7,7 +7,7 @@ import ( . "github.com/onsi/gomega" kubeletconfigv1beta1 "k8s.io/kubelet/config/v1beta1" "k8s.io/kubernetes/pkg/kubelet/eviction" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/yaml" "github.com/openshift/cluster-node-tuning-operator/pkg/performanceprofile/controller/performanceprofile/components" @@ -43,7 +43,7 @@ var _ = Describe("Kubelet Config", func() { Context("with topology manager restricted policy", func() { It("should have the memory manager related parameters", func() { profile := testutils.NewPerformanceProfile("test") - profile.Spec.NUMA.TopologyPolicy = pointer.String(kubeletconfigv1beta1.RestrictedTopologyManagerPolicy) + profile.Spec.NUMA.TopologyPolicy = ptr.To(kubeletconfigv1beta1.RestrictedTopologyManagerPolicy) selectorKey, selectorValue := components.GetFirstKeyAndValue(profile.Spec.MachineConfigPoolSelector) kc, err := New(profile, &components.KubeletConfigOptions{MachineConfigPoolSelector: map[string]string{selectorKey: selectorValue}}) Expect(err).ToNot(HaveOccurred()) @@ -58,7 +58,7 @@ var _ = Describe("Kubelet Config", func() { It("should not have the cpumanager policy options set", func() { profile := testutils.NewPerformanceProfile("test") - profile.Spec.NUMA.TopologyPolicy = pointer.String(kubeletconfigv1beta1.RestrictedTopologyManagerPolicy) + profile.Spec.NUMA.TopologyPolicy = ptr.To(kubeletconfigv1beta1.RestrictedTopologyManagerPolicy) selectorKey, selectorValue := components.GetFirstKeyAndValue(profile.Spec.MachineConfigPoolSelector) kc, err := New(profile, &components.KubeletConfigOptions{MachineConfigPoolSelector: map[string]string{selectorKey: selectorValue}}) Expect(err).ToNot(HaveOccurred()) @@ -75,7 +75,7 @@ var _ = Describe("Kubelet Config", func() { Context("with topology manager best-effort policy", func() { It("should not have the memory manager related parameters", func() { profile := testutils.NewPerformanceProfile("test") - profile.Spec.NUMA.TopologyPolicy = pointer.String(kubeletconfigv1beta1.BestEffortTopologyManagerPolicy) + profile.Spec.NUMA.TopologyPolicy = ptr.To(kubeletconfigv1beta1.BestEffortTopologyManagerPolicy) selectorKey, selectorValue := components.GetFirstKeyAndValue(profile.Spec.MachineConfigPoolSelector) kc, err := New(profile, &components.KubeletConfigOptions{MachineConfigPoolSelector: map[string]string{selectorKey: selectorValue}}) Expect(err).ToNot(HaveOccurred()) @@ -97,6 +97,7 @@ var _ = Describe("Kubelet Config", func() { } selectorKey, selectorValue := components.GetFirstKeyAndValue(profile.Spec.MachineConfigPoolSelector) kc, err := New(profile, &components.KubeletConfigOptions{MachineConfigPoolSelector: map[string]string{selectorKey: selectorValue}}) + Expect(err).ToNot(HaveOccurred()) y, err := yaml.Marshal(kc) Expect(err).ToNot(HaveOccurred()) @@ -113,6 +114,7 @@ var _ = Describe("Kubelet Config", func() { } selectorKey, selectorValue := components.GetFirstKeyAndValue(profile.Spec.MachineConfigPoolSelector) kc, err := New(profile, &components.KubeletConfigOptions{MachineConfigPoolSelector: map[string]string{selectorKey: selectorValue}}) + Expect(err).ToNot(HaveOccurred()) y, err := yaml.Marshal(kc) Expect(err).ToNot(HaveOccurred()) @@ -128,6 +130,7 @@ var _ = Describe("Kubelet Config", func() { } selectorKey, selectorValue := components.GetFirstKeyAndValue(profile.Spec.MachineConfigPoolSelector) kc, err := New(profile, &components.KubeletConfigOptions{MachineConfigPoolSelector: map[string]string{selectorKey: selectorValue}}) + Expect(err).ToNot(HaveOccurred()) y, err := yaml.Marshal(kc) Expect(err).ToNot(HaveOccurred()) @@ -144,6 +147,7 @@ var _ = Describe("Kubelet Config", func() { } selectorKey, selectorValue := components.GetFirstKeyAndValue(profile.Spec.MachineConfigPoolSelector) kc, err := New(profile, &components.KubeletConfigOptions{MachineConfigPoolSelector: map[string]string{selectorKey: selectorValue}}) + Expect(err).ToNot(HaveOccurred()) y, err := yaml.Marshal(kc) Expect(err).ToNot(HaveOccurred()) @@ -159,15 +163,16 @@ var _ = Describe("Kubelet Config", func() { profile := testutils.NewPerformanceProfile("test") selectorKey, selectorValue := components.GetFirstKeyAndValue(profile.Spec.MachineConfigPoolSelector) kc, err := New(profile, &components.KubeletConfigOptions{MachineConfigPoolSelector: map[string]string{selectorKey: selectorValue}}) + Expect(err).ToNot(HaveOccurred()) y, err := yaml.Marshal(kc) Expect(err).ToNot(HaveOccurred()) manifest := string(y) - memoryAvaialable := "memory.available: " + string(eviction.DefaultEvictionHard[evictionHardMemoryAvailable]) - nodefsAvailable := "nodefs.available: " + string(eviction.DefaultEvictionHard[evictionHardNodefsAvaialble]) - imagefsAvailable := "imagefs.available: " + string(eviction.DefaultEvictionHard[evictionHardImagefsAvailable]) - nodefsInodesFree := "nodefs.inodesFree: " + string(eviction.DefaultEvictionHard[evictionHardNodefsInodesFree]) + memoryAvaialable := "memory.available: " + eviction.DefaultEvictionHard[evictionHardMemoryAvailable] + nodefsAvailable := "nodefs.available: " + eviction.DefaultEvictionHard[evictionHardNodefsAvaialble] + imagefsAvailable := "imagefs.available: " + eviction.DefaultEvictionHard[evictionHardImagefsAvailable] + nodefsInodesFree := "nodefs.inodesFree: " + eviction.DefaultEvictionHard[evictionHardNodefsInodesFree] Expect(manifest).To(ContainSubstring(memoryAvaialable)) Expect(manifest).To(ContainSubstring(nodefsAvailable)) @@ -182,6 +187,7 @@ var _ = Describe("Kubelet Config", func() { } selectorKey, selectorValue := components.GetFirstKeyAndValue(profile.Spec.MachineConfigPoolSelector) kc, err := New(profile, &components.KubeletConfigOptions{MachineConfigPoolSelector: map[string]string{selectorKey: selectorValue}}) + Expect(err).ToNot(HaveOccurred()) data, err := yaml.Marshal(kc) Expect(err).ToNot(HaveOccurred()) diff --git a/pkg/performanceprofile/controller/performanceprofile/components/machineconfig/machineconfig_test.go b/pkg/performanceprofile/controller/performanceprofile/components/machineconfig/machineconfig_test.go index 96cc7ac548..7c10176f5d 100644 --- a/pkg/performanceprofile/controller/performanceprofile/components/machineconfig/machineconfig_test.go +++ b/pkg/performanceprofile/controller/performanceprofile/components/machineconfig/machineconfig_test.go @@ -7,7 +7,7 @@ import ( "fmt" "regexp" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" igntypes "github.com/coreos/ignition/v2/config/v3_2/types" . "github.com/onsi/ginkgo/v2" @@ -87,7 +87,7 @@ var _ = Describe("Machine Config", func() { Context("machine config creation ", func() { It("should create machine config with valid assets", func() { profile := testutils.NewPerformanceProfile("test") - profile.Spec.HugePages.Pages[0].Node = pointer.Int32(0) + profile.Spec.HugePages.Pages[0].Node = ptr.To(int32(0)) _, err := New(profile, &components.MachineConfigOptions{}) Expect(err).ToNot(HaveOccurred()) @@ -99,7 +99,7 @@ var _ = Describe("Machine Config", func() { BeforeEach(func() { profile := testutils.NewPerformanceProfile("test") - profile.Spec.HugePages.Pages[0].Node = pointer.Int32(0) + profile.Spec.HugePages.Pages[0].Node = ptr.To(int32(0)) labelKey, labelValue := components.GetFirstKeyAndValue(profile.Spec.MachineConfigLabel) mc, err := New(profile, &components.MachineConfigOptions{}) diff --git a/pkg/performanceprofile/controller/performanceprofile/components/tuned/tuned_test.go b/pkg/performanceprofile/controller/performanceprofile/components/tuned/tuned_test.go index 2b875dece7..2499afe13f 100644 --- a/pkg/performanceprofile/controller/performanceprofile/components/tuned/tuned_test.go +++ b/pkg/performanceprofile/controller/performanceprofile/components/tuned/tuned_test.go @@ -14,7 +14,7 @@ import ( "sigs.k8s.io/yaml" cpuset "k8s.io/utils/cpuset" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) const expectedMatchSelector = ` @@ -25,7 +25,6 @@ const expectedMatchSelector = ` var ( cmdlineAdditionalArgs = "+audit=0 processor.max_cstate=1 idle=poll intel_idle.max_cstate=0" cmdlineAmdHighPowerConsumption = "processor.max_cstate=1" - cmdlineAmdPstateActive = "amd_pstate=active" cmdlineAmdPstateAutomatic = "amd_pstate=guided" cmdlineAmdPstatePassive = "amd_pstate=passive" cmdlineCPUsPartitioning = "+nohz=on rcu_nocbs=${isolated_cores} tuned.non_isolcpus=${not_isolated_cpumask} systemd.cpu_affinity=${not_isolated_cores_expanded}" @@ -33,7 +32,6 @@ var ( cmdlineHugepages = "+ default_hugepagesz=1G hugepagesz=1G hugepages=4" cmdlineIdlePoll = "idle=poll" cmdlineIntelHighPowerConsumption = "processor.max_cstate=1 intel_idle.max_cstate=0" - cmdlineIntelPstateActive = "intel_pstate=active" cmdlineIntelPstateAutomatic = "intel_pstate=${f:intel_recommended_pstate}" cmdlineIntelPstatePassive = "intel_pstate=passive" cmdlineMultipleHugePages = "+ default_hugepagesz=1G hugepagesz=1G hugepages=4 hugepagesz=2M hugepages=128" @@ -172,7 +170,7 @@ var _ = Describe("Tuned", func() { When("realtime hint disabled", func() { It("should not contain realtime related parameters", func() { - profile.Spec.WorkloadHints = &performancev2.WorkloadHints{RealTime: pointer.Bool(false)} + profile.Spec.WorkloadHints = &performancev2.WorkloadHints{RealTime: ptr.To(false)} tunedData := getTunedStructuredData(profile, components.ProfileNamePerformance) service, err := tunedData.GetSection("service") Expect(err).ToNot(HaveOccurred()) @@ -197,7 +195,7 @@ var _ = Describe("Tuned", func() { When("realtime hint enabled", func() { It("should contain realtime related parameters", func() { - profile.Spec.WorkloadHints = &performancev2.WorkloadHints{RealTime: pointer.Bool(true)} + profile.Spec.WorkloadHints = &performancev2.WorkloadHints{RealTime: ptr.To(true)} tunedData := getTunedStructuredData(profile, components.ProfileNamePerformance) service, err := tunedData.GetSection("service") Expect(err).ToNot(HaveOccurred()) @@ -217,7 +215,7 @@ var _ = Describe("Tuned", func() { Context("high power consumption hint enabled", func() { When("default realtime workload settings", func() { It("should not contain high power consumption related parameters", func() { - profile.Spec.WorkloadHints = &performancev2.WorkloadHints{HighPowerConsumption: pointer.Bool(true)} + profile.Spec.WorkloadHints = &performancev2.WorkloadHints{HighPowerConsumption: ptr.To(true)} tunedData := getTunedStructuredData(profile, components.ProfileNamePerformance) bootLoader, err := tunedData.GetSection("bootloader") Expect(err).ToNot(HaveOccurred()) @@ -228,8 +226,8 @@ var _ = Describe("Tuned", func() { When("realtime workload enabled", func() { It("should not contain idle=poll cmdline", func() { profile.Spec.WorkloadHints = &performancev2.WorkloadHints{ - HighPowerConsumption: pointer.Bool(true), - RealTime: pointer.Bool(true), + HighPowerConsumption: ptr.To(true), + RealTime: ptr.To(true), } tunedData := getTunedStructuredData(profile, components.ProfileNamePerformance) bootLoader, err := tunedData.GetSection("bootloader") @@ -241,8 +239,8 @@ var _ = Describe("Tuned", func() { When("perPodPowerManagement Hint to true", func() { It("should fail as PerPodPowerManagement and HighPowerConsumption can not be set to true", func() { profile.Spec.WorkloadHints = &performancev2.WorkloadHints{ - HighPowerConsumption: pointer.Bool(true), - PerPodPowerManagement: pointer.Bool(true), + HighPowerConsumption: ptr.To(true), + PerPodPowerManagement: ptr.To(true), } _, err := NewNodePerformance(profile) Expect(err.Error()).To(ContainSubstring("Invalid WorkloadHints configuration: HighPowerConsumption is true and PerPodPowerManagement is true")) @@ -252,8 +250,8 @@ var _ = Describe("Tuned", func() { When("perPodPowerManagement Hint is false realTime Hint false", func() { It("should not contain perPodPowerManagement related parameters", func() { - profile.Spec.WorkloadHints.PerPodPowerManagement = pointer.Bool(false) - profile.Spec.WorkloadHints.RealTime = pointer.Bool(false) + profile.Spec.WorkloadHints.PerPodPowerManagement = ptr.To(false) + profile.Spec.WorkloadHints.RealTime = ptr.To(false) tunedData := getTunedStructuredData(profile, components.ProfileNamePerformance) cpuSection, err := tunedData.GetSection("cpu") Expect(err).ToNot(HaveOccurred()) @@ -265,8 +263,8 @@ var _ = Describe("Tuned", func() { When("perPodPowerManagement Hint is false realTime Hint true", func() { It("should not contain perPodPowerManagement related parameters", func() { - profile.Spec.WorkloadHints.PerPodPowerManagement = pointer.Bool(false) - profile.Spec.WorkloadHints.RealTime = pointer.Bool(true) + profile.Spec.WorkloadHints.PerPodPowerManagement = ptr.To(false) + profile.Spec.WorkloadHints.RealTime = ptr.To(true) tunedData := getTunedStructuredData(profile, components.ProfileNamePerformance) cpuSection, err := tunedData.GetSection("cpu") Expect(err).ToNot(HaveOccurred()) @@ -278,7 +276,7 @@ var _ = Describe("Tuned", func() { When("perPodPowerManagement Hint to true", func() { It("should contain perPodPowerManagement related parameters", func() { - profile.Spec.WorkloadHints.PerPodPowerManagement = pointer.Bool(true) + profile.Spec.WorkloadHints.PerPodPowerManagement = ptr.To(true) tunedData := getTunedStructuredData(profile, components.ProfileNamePerformance) cpuSection, err := tunedData.GetSection("cpu") Expect(err).ToNot(HaveOccurred()) @@ -289,7 +287,7 @@ var _ = Describe("Tuned", func() { }) It("should generate yaml with expected parameters for Isolated balancing disabled", func() { - profile.Spec.CPU.BalanceIsolated = pointer.Bool(false) + profile.Spec.CPU.BalanceIsolated = ptr.To(false) tunedData := getTunedStructuredData(profile, components.ProfileNamePerformance) bootLoader, err := tunedData.GetSection("bootloader") Expect(err).ToNot(HaveOccurred()) @@ -297,7 +295,7 @@ var _ = Describe("Tuned", func() { }) It("should generate yaml with expected parameters for Isolated balancing enabled", func() { - profile.Spec.CPU.BalanceIsolated = pointer.Bool(true) + profile.Spec.CPU.BalanceIsolated = ptr.To(true) tunedData := getTunedStructuredData(profile, components.ProfileNamePerformance) bootLoader, err := tunedData.GetSection("bootloader") Expect(err).ToNot(HaveOccurred()) @@ -330,7 +328,7 @@ var _ = Describe("Tuned", func() { Expect(strings.Count(manifest, "hugepagesz=")).To(BeNumerically("==", 2)) Expect(strings.Count(manifest, "hugepages=")).To(BeNumerically("==", 1)) - profile.Spec.HugePages.Pages[0].Node = pointer.Int32(1) + profile.Spec.HugePages.Pages[0].Node = ptr.To(int32(1)) tunedData = getTunedStructuredData(profile, components.ProfileNamePerformance) bootloader, err = tunedData.GetSection("bootloader") @@ -346,7 +344,7 @@ var _ = Describe("Tuned", func() { profile.Spec.HugePages.Pages = append(profile.Spec.HugePages.Pages, performancev2.HugePage{ Size: components.HugepagesSize2M, Count: 128, - Node: pointer.Int32(0), + Node: ptr.To(int32(0)), }) tunedData := getTunedStructuredData(profile, components.ProfileNamePerformance) @@ -385,7 +383,7 @@ var _ = Describe("Tuned", func() { profile.Spec.HugePages.Pages = append(profile.Spec.HugePages.Pages, performancev2.HugePage{ Size: components.HugepagesSize2M, Count: 128, - Node: pointer.Int32(0), + Node: ptr.To(int32(0)), }) profile.Spec.HugePages.Pages = append(profile.Spec.HugePages.Pages, performancev2.HugePage{ Size: components.HugepagesSize2M, @@ -409,7 +407,7 @@ var _ = Describe("Tuned", func() { profile.Spec.HugePages.Pages = append(profile.Spec.HugePages.Pages, performancev2.HugePage{ Size: components.HugepagesSize2M, Count: 128, - Node: pointer.Int32(0), + Node: ptr.To(int32(0)), }) tunedData := getTunedStructuredData(profile, components.ProfileNamePerformance) @@ -425,7 +423,7 @@ var _ = Describe("Tuned", func() { Context("with default net device queues (all devices set)", func() { It("should set the default netqueues count to reserved CPUs count", func() { profile.Spec.Net = &performancev2.Net{ - UserLevelNetworking: pointer.Bool(true), + UserLevelNetworking: ptr.To(true), } manifest := getTunedManifest(profile) reservedSet, err := cpuset.Parse(string(*profile.Spec.CPU.Reserved)) @@ -440,7 +438,7 @@ var _ = Describe("Tuned", func() { devicesUdevRegex := "\\^INTERFACE=" + strings.Replace(netDeviceName, "*", "\\.\\*", -1) profile.Spec.Net = &performancev2.Net{ - UserLevelNetworking: pointer.Bool(true), + UserLevelNetworking: ptr.To(true), Devices: []performancev2.Device{ { InterfaceName: &netDeviceName, @@ -461,7 +459,7 @@ var _ = Describe("Tuned", func() { devicesUdevRegex := "\\^INTERFACE=\\(\\?!" + strings.Replace(netDeviceName, "*", "\\.\\*", -1) + "\\)" profile.Spec.Net = &performancev2.Net{ - UserLevelNetworking: pointer.Bool(true), + UserLevelNetworking: ptr.To(true), Devices: []performancev2.Device{ { InterfaceName: &netDeviceNameInverted, @@ -480,7 +478,7 @@ var _ = Describe("Tuned", func() { devicesUdevRegex := "\\^ID_VENDOR_ID=" + netDeviceVendorID profile.Spec.Net = &performancev2.Net{ - UserLevelNetworking: pointer.Bool(true), + UserLevelNetworking: ptr.To(true), Devices: []performancev2.Device{ { VendorID: &netDeviceVendorID, @@ -500,7 +498,7 @@ var _ = Describe("Tuned", func() { devicesUdevRegex := `\^ID_MODEL_ID=` + netDeviceModelID + `\[\\\\s\\\\S]\*\^ID_VENDOR_ID=` + netDeviceVendorID profile.Spec.Net = &performancev2.Net{ - UserLevelNetworking: pointer.Bool(true), + UserLevelNetworking: ptr.To(true), Devices: []performancev2.Device{ { DeviceID: &netDeviceModelID, @@ -522,7 +520,7 @@ var _ = Describe("Tuned", func() { devicesUdevRegex := `\^ID_MODEL_ID=` + netDeviceModelID + `\[\\\\s\\\\S]\*\^ID_VENDOR_ID=` + netDeviceVendorID + `\[\\\\s\\\\S]\*\^INTERFACE=` + netDeviceName profile.Spec.Net = &performancev2.Net{ - UserLevelNetworking: pointer.Bool(true), + UserLevelNetworking: ptr.To(true), Devices: []performancev2.Device{ { InterfaceName: &netDeviceName, @@ -546,7 +544,7 @@ var _ = Describe("Tuned", func() { devicesUdevRegex := `\^ID_MODEL_ID=` + netDeviceModelID + `\[\\\\s\\\\S]\*\^ID_VENDOR_ID=` + netDeviceVendorID + `\[\\\\s\\\\S]\*\^INTERFACE=\(\?!` + netDeviceName + `\)` profile.Spec.Net = &performancev2.Net{ - UserLevelNetworking: pointer.Bool(true), + UserLevelNetworking: ptr.To(true), Devices: []performancev2.Device{ { InterfaceName: &netDeviceNameInverted, @@ -578,7 +576,7 @@ var _ = Describe("Tuned", func() { Context("with amd x86 performance profile", func() { When("perPodPowerManagement Hint is false and realTime hint is false", func() { It("should contain amd_pstate set to automatic", func() { - profile.Spec.WorkloadHints.PerPodPowerManagement = pointer.Bool(false) + profile.Spec.WorkloadHints.PerPodPowerManagement = ptr.To(false) tunedData := getTunedStructuredData(profile, components.ProfileNameAmdX86) bootloaderSection, err := tunedData.GetSection("bootloader") Expect(err).ToNot(HaveOccurred()) @@ -587,8 +585,8 @@ var _ = Describe("Tuned", func() { }) When("perPodPowerManagement Hint is false and realTime hint is true", func() { It("should contain amd_pstate set to automatic", func() { - profile.Spec.WorkloadHints.PerPodPowerManagement = pointer.Bool(false) - profile.Spec.WorkloadHints.RealTime = pointer.Bool(true) + profile.Spec.WorkloadHints.PerPodPowerManagement = ptr.To(false) + profile.Spec.WorkloadHints.RealTime = ptr.To(true) tunedData := getTunedStructuredData(profile, components.ProfileNameAmdX86) bootloaderSection, err := tunedData.GetSection("bootloader") Expect(err).ToNot(HaveOccurred()) @@ -597,7 +595,7 @@ var _ = Describe("Tuned", func() { }) When("perPodPowerManagement Hint is true", func() { It("should contain amd_pstate set to passive", func() { - profile.Spec.WorkloadHints.PerPodPowerManagement = pointer.Bool(true) + profile.Spec.WorkloadHints.PerPodPowerManagement = ptr.To(true) tunedData := getTunedStructuredData(profile, components.ProfileNameAmdX86) bootloaderSection, err := tunedData.GetSection("bootloader") Expect(err).ToNot(HaveOccurred()) @@ -607,8 +605,8 @@ var _ = Describe("Tuned", func() { When("realtime workload enabled and high power consumption is enabled", func() { It("should contain idle=poll cmdline", func() { profile.Spec.WorkloadHints = &performancev2.WorkloadHints{ - HighPowerConsumption: pointer.Bool(true), - RealTime: pointer.Bool(true), + HighPowerConsumption: ptr.To(true), + RealTime: ptr.To(true), } tunedData := getTunedStructuredData(profile, components.ProfileNameAmdX86) bootloaderSection, err := tunedData.GetSection("bootloader") @@ -619,8 +617,8 @@ var _ = Describe("Tuned", func() { When("realtime workload disabled and high power consumption is disabled", func() { It("should not contain idle=poll cmdline", func() { profile.Spec.WorkloadHints = &performancev2.WorkloadHints{ - HighPowerConsumption: pointer.Bool(true), - RealTime: pointer.Bool(false), + HighPowerConsumption: ptr.To(true), + RealTime: ptr.To(false), } tunedData := getTunedStructuredData(profile, components.ProfileNameAmdX86) bootloaderSection, err := tunedData.GetSection("bootloader") @@ -630,7 +628,7 @@ var _ = Describe("Tuned", func() { }) When("high power consumption is enabled", func() { It("should contain high power consumption related parameters", func() { - profile.Spec.WorkloadHints = &performancev2.WorkloadHints{HighPowerConsumption: pointer.Bool(true)} + profile.Spec.WorkloadHints = &performancev2.WorkloadHints{HighPowerConsumption: ptr.To(true)} tunedData := getTunedStructuredData(profile, components.ProfileNameAmdX86) bootLoader, err := tunedData.GetSection("bootloader") Expect(err).ToNot(HaveOccurred()) @@ -659,7 +657,7 @@ var _ = Describe("Tuned", func() { Context("with intel x86 performance profile", func() { When("perPodPowerManagement Hint is false and realTime hint is false", func() { It("should contain intel_pstate set to automatic", func() { - profile.Spec.WorkloadHints.PerPodPowerManagement = pointer.Bool(false) + profile.Spec.WorkloadHints.PerPodPowerManagement = ptr.To(false) tunedData := getTunedStructuredData(profile, components.ProfileNameIntelX86) bootloaderSection, err := tunedData.GetSection("bootloader") Expect(err).ToNot(HaveOccurred()) @@ -668,8 +666,8 @@ var _ = Describe("Tuned", func() { }) When("perPodPowerManagement Hint is false and realTime hint is true", func() { It("should contain intel_pstate set to automatic", func() { - profile.Spec.WorkloadHints.PerPodPowerManagement = pointer.Bool(false) - profile.Spec.WorkloadHints.RealTime = pointer.Bool(true) + profile.Spec.WorkloadHints.PerPodPowerManagement = ptr.To(false) + profile.Spec.WorkloadHints.RealTime = ptr.To(true) tunedData := getTunedStructuredData(profile, components.ProfileNameIntelX86) bootloaderSection, err := tunedData.GetSection("bootloader") Expect(err).ToNot(HaveOccurred()) @@ -678,7 +676,7 @@ var _ = Describe("Tuned", func() { }) When("perPodPowerManagement Hint is true", func() { It("should contain intel_pstate set to passive", func() { - profile.Spec.WorkloadHints.PerPodPowerManagement = pointer.Bool(true) + profile.Spec.WorkloadHints.PerPodPowerManagement = ptr.To(true) tunedData := getTunedStructuredData(profile, components.ProfileNameIntelX86) bootloaderSection, err := tunedData.GetSection("bootloader") Expect(err).ToNot(HaveOccurred()) @@ -688,8 +686,8 @@ var _ = Describe("Tuned", func() { When("realtime workload enabled and high power consumption is enabled", func() { It("should contain idle=poll cmdline", func() { profile.Spec.WorkloadHints = &performancev2.WorkloadHints{ - HighPowerConsumption: pointer.Bool(true), - RealTime: pointer.Bool(true), + HighPowerConsumption: ptr.To(true), + RealTime: ptr.To(true), } tunedData := getTunedStructuredData(profile, components.ProfileNameIntelX86) bootloaderSection, err := tunedData.GetSection("bootloader") @@ -700,8 +698,8 @@ var _ = Describe("Tuned", func() { When("realtime workload disabled and high power consumption is disabled", func() { It("should not contain idle=poll cmdline", func() { profile.Spec.WorkloadHints = &performancev2.WorkloadHints{ - HighPowerConsumption: pointer.Bool(true), - RealTime: pointer.Bool(false), + HighPowerConsumption: ptr.To(true), + RealTime: ptr.To(false), } tunedData := getTunedStructuredData(profile, components.ProfileNameIntelX86) bootloaderSection, err := tunedData.GetSection("bootloader") @@ -711,7 +709,7 @@ var _ = Describe("Tuned", func() { }) When("high power consumption is enabled", func() { It("should contain high power consumption related parameters", func() { - profile.Spec.WorkloadHints = &performancev2.WorkloadHints{HighPowerConsumption: pointer.Bool(true)} + profile.Spec.WorkloadHints = &performancev2.WorkloadHints{HighPowerConsumption: ptr.To(true)} tunedData := getTunedStructuredData(profile, components.ProfileNameIntelX86) bootLoader, err := tunedData.GetSection("bootloader") Expect(err).ToNot(HaveOccurred()) diff --git a/pkg/performanceprofile/controller/performanceprofile_controller_suite_test.go b/pkg/performanceprofile/controller/performanceprofile_controller_suite_test.go index 7dc812b699..824f4f7333 100644 --- a/pkg/performanceprofile/controller/performanceprofile_controller_suite_test.go +++ b/pkg/performanceprofile/controller/performanceprofile_controller_suite_test.go @@ -10,6 +10,7 @@ import ( performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/performanceprofile/v2" tunedv1 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/tuned/v1" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/kubernetes/scheme" ) @@ -17,10 +18,10 @@ func TestPerformanceProfile(t *testing.T) { RegisterFailHandler(Fail) // add resources API to default scheme - performancev2.AddToScheme(scheme.Scheme) - configv1.AddToScheme(scheme.Scheme) - mcov1.AddToScheme(scheme.Scheme) - tunedv1.AddToScheme(scheme.Scheme) + utilruntime.Must(performancev2.AddToScheme(scheme.Scheme)) + utilruntime.Must(configv1.AddToScheme(scheme.Scheme)) + utilruntime.Must(mcov1.AddToScheme(scheme.Scheme)) + utilruntime.Must(tunedv1.AddToScheme(scheme.Scheme)) RunSpecs(t, "Performance Profile Suite") } diff --git a/pkg/performanceprofile/controller/performanceprofile_controller_test.go b/pkg/performanceprofile/controller/performanceprofile_controller_test.go index a59db8f3e1..0447d8e40a 100644 --- a/pkg/performanceprofile/controller/performanceprofile_controller_test.go +++ b/pkg/performanceprofile/controller/performanceprofile_controller_test.go @@ -45,7 +45,7 @@ import ( "k8s.io/klog" kubeletconfigv1beta1 "k8s.io/kubelet/config/v1beta1" "k8s.io/utils/cpuset" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -396,7 +396,7 @@ var _ = Describe("Controller", func() { }) It("should update MC when RT kernel gets disabled", func() { - profile.Spec.RealTimeKernel.Enabled = pointer.Bool(false) + profile.Spec.RealTimeKernel.Enabled = ptr.To(false) r := newFakeReconciler(profile, mc, kc, tunedPerformance, profileMCP, infra, clusterOperator) Expect(reconcileTimes(r, request, 1)).To(Equal(reconcile.Result{})) @@ -454,7 +454,7 @@ var _ = Describe("Controller", func() { profile.Spec.CPU = &performancev2.CPU{ Reserved: &reserved, Isolated: &isolated, - BalanceIsolated: pointer.Bool(true), + BalanceIsolated: ptr.To(true), } r := newFakeReconciler(profile, mc, kc, tunedPerformance, profileMCP, infra, clusterOperator) @@ -478,7 +478,7 @@ var _ = Describe("Controller", func() { profile.Spec.CPU = &performancev2.CPU{ Reserved: &reserved, Isolated: &isolated, - BalanceIsolated: pointer.Bool(false), + BalanceIsolated: ptr.To(false), } r := newFakeReconciler(profile, mc, kc, tunedPerformance, profileMCP, infra, clusterOperator) @@ -532,7 +532,7 @@ var _ = Describe("Controller", func() { { Count: 8, Size: size, - Node: pointer.Int32(0), + Node: ptr.To(int32(0)), }, }, } @@ -1054,7 +1054,7 @@ var _ = Describe("Controller", func() { Verification: igntypes.Verification{}, Source: "data:text/plain;charset=utf-8;base64,CnsKICAic2hhcmVkX2NwdXMiOiB7CiAgICAgImNvbnRhaW5lcnNfbGltaXQiOiAyNTYKICB9Cn0=", }, - Mode: pointer.Int(0644), + Mode: ptr.To(int(0644)), }, }, } diff --git a/pkg/performanceprofile/profilecreator/helper.go b/pkg/performanceprofile/profilecreator/helper.go index 65c0c43e32..7f6b69cf77 100644 --- a/pkg/performanceprofile/profilecreator/helper.go +++ b/pkg/performanceprofile/profilecreator/helper.go @@ -30,9 +30,6 @@ const ( // TolerationSet records the data to be tolerated or warned about based on the tool handling type TolerationSet map[string]bool -// This is a linter false positive, this function is used in unit tests. -// -//nolint:unused func newTestNode(nodeName string) *v1.Node { n := v1.Node{} n.Name = nodeName diff --git a/pkg/performanceprofile/profilecreator/profilecreator_test.go b/pkg/performanceprofile/profilecreator/profilecreator_test.go index 54e2740819..b823deca49 100644 --- a/pkg/performanceprofile/profilecreator/profilecreator_test.go +++ b/pkg/performanceprofile/profilecreator/profilecreator_test.go @@ -164,7 +164,7 @@ var _ = Describe("PerformanceProfileCreator: MCP and Node Matching in SNO", func var _ = Describe("PerformanceProfileCreator: Getting MCP from Must Gather", func() { var mcpName, mcpNodeSelectorKey, mustGatherDirAbsolutePath string var err error - Context("Identifying Nodes targetted by MCP", func() { + Context("Identifying Nodes targeted by MCP", func() { It("gets the MCP successfully", func() { mcpName = "worker-cnf" mcpNodeSelectorKey = "node-role.kubernetes.io/worker-cnf" @@ -1927,7 +1927,6 @@ func getSiblingsListForCPUSet(sysinfo systemInfo, cpus cpuset.CPUSet) cpuset.CPU siblingsSet.Insert(core.LogicalProcessors...) } } - } } siblingsInt := siblingsSet.UnsortedList() diff --git a/pkg/performanceprofile/utils/testing/testing.go b/pkg/performanceprofile/utils/testing/testing.go index 419974ae6d..d4e9f02c98 100644 --- a/pkg/performanceprofile/utils/testing/testing.go +++ b/pkg/performanceprofile/utils/testing/testing.go @@ -8,7 +8,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) const ( @@ -76,7 +76,7 @@ func NewPerformanceProfile(name string) *performancev2.PerformanceProfile { }, }, RealTimeKernel: &performancev2.RealTimeKernel{ - Enabled: pointer.Bool(true), + Enabled: ptr.To(true), }, NUMA: &performancev2.NUMA{ TopologyPolicy: &numaPolicy, @@ -91,10 +91,10 @@ func NewPerformanceProfile(name string) *performancev2.PerformanceProfile { "nodekey": "nodeValue", }, WorkloadHints: &performancev2.WorkloadHints{ - HighPowerConsumption: pointer.Bool(false), - RealTime: pointer.Bool(true), - PerPodPowerManagement: pointer.Bool(false), - MixedCpus: pointer.Bool(true), + HighPowerConsumption: ptr.To(false), + RealTime: ptr.To(true), + PerPodPowerManagement: ptr.To(false), + MixedCpus: ptr.To(true), }, AdditionalKernelArgs: additionalKernelArgs, }, diff --git a/pkg/tuned/cmd/render/cmd.go b/pkg/tuned/cmd/render/cmd.go index 9335cbcce8..502d78f7bd 100644 --- a/pkg/tuned/cmd/render/cmd.go +++ b/pkg/tuned/cmd/render/cmd.go @@ -16,8 +16,8 @@ limitations under the License. package render import ( + "errors" "flag" - "fmt" "github.com/openshift/cluster-node-tuning-operator/pkg/performanceprofile/controller/performanceprofile/components" @@ -76,7 +76,7 @@ func (r *renderOpts) Validate() error { if len(err) == 0 { return nil } - return fmt.Errorf(err) + return errors.New(err) } func (r *renderOpts) Run() error { diff --git a/test/e2e/basic/custom_node_labels.go b/test/e2e/basic/custom_node_labels.go index 212fd8419d..0d420a5066 100644 --- a/test/e2e/basic/custom_node_labels.go +++ b/test/e2e/basic/custom_node_labels.go @@ -29,11 +29,12 @@ var _ = ginkgo.Describe("[basic][custom_node_labels] Node Tuning Operator custom // Cleanup code to roll back cluster changes done by this test even if it fails in the middle of ginkgo.It() ginkgo.AfterEach(func() { + // Ignore failures to cleanup resources which are already deleted or not yet created. ginkgo.By("cluster changes rollback") if node != nil { - util.ExecAndLogCommand("oc", "label", "node", "--overwrite", node.Name, nodeLabelHugepages+"-") + _, _, _ = util.ExecAndLogCommand("oc", "label", "node", "--overwrite", node.Name, nodeLabelHugepages+"-") } - util.ExecAndLogCommand("oc", "delete", "-n", ntoconfig.WatchNamespace(), "-f", profileHugepages) + _, _, _ = util.ExecAndLogCommand("oc", "delete", "-n", ntoconfig.WatchNamespace(), "-f", profileHugepages) }) ginkgo.It(fmt.Sprintf("%s set", sysctlVar), func() { diff --git a/test/e2e/basic/custom_pod_labels.go b/test/e2e/basic/custom_pod_labels.go index e2db34beb4..0f941e7a33 100644 --- a/test/e2e/basic/custom_pod_labels.go +++ b/test/e2e/basic/custom_pod_labels.go @@ -29,11 +29,12 @@ var _ = ginkgo.Describe("[basic][custom_pod_labels] Node Tuning Operator custom // Cleanup code to roll back cluster changes done by this test even if it fails in the middle of ginkgo.It() ginkgo.AfterEach(func() { + // Ignore failures to cleanup resources which are already deleted or not yet created. ginkgo.By("cluster changes rollback") if pod != nil { - util.ExecAndLogCommand("oc", "label", "pod", "--overwrite", "-n", ntoconfig.WatchNamespace(), pod.Name, podLabelIngress+"-") + _, _, _ = util.ExecAndLogCommand("oc", "label", "pod", "--overwrite", "-n", ntoconfig.WatchNamespace(), pod.Name, podLabelIngress+"-") } - util.ExecAndLogCommand("oc", "delete", "-n", ntoconfig.WatchNamespace(), "-f", profileIngress) + _, _, _ = util.ExecAndLogCommand("oc", "delete", "-n", ntoconfig.WatchNamespace(), "-f", profileIngress) }) ginkgo.It(fmt.Sprintf("%s set", sysctlVar), func() { diff --git a/test/e2e/basic/default_irq_smp_affinity.go b/test/e2e/basic/default_irq_smp_affinity.go index 49a9345b03..9b0ebffda6 100644 --- a/test/e2e/basic/default_irq_smp_affinity.go +++ b/test/e2e/basic/default_irq_smp_affinity.go @@ -35,11 +35,12 @@ var _ = ginkgo.Describe("[basic][default_irq_smp_affinity] Node Tuning Operator // Cleanup code to roll back cluster changes done by this test even if it fails in the middle of ginkgo.It() ginkgo.AfterEach(func() { + // Ignore failures to cleanup resources which are already deleted or not yet created. ginkgo.By("cluster changes rollback") if node != nil { - util.ExecAndLogCommand("oc", "label", "node", "--overwrite", node.Name, nodeLabelAffinity+"-") + _, _, _ = util.ExecAndLogCommand("oc", "label", "node", "--overwrite", node.Name, nodeLabelAffinity+"-") } - util.ExecAndLogCommand("oc", "delete", "-n", ntoconfig.WatchNamespace(), "-f", profileAffinity0) + _, _, _ = util.ExecAndLogCommand("oc", "delete", "-n", ntoconfig.WatchNamespace(), "-f", profileAffinity0) }) ginkgo.It(fmt.Sprintf("default_irq_smp_affinity: %s set", procIrqDefaultSmpAffinity), func() { diff --git a/test/e2e/basic/modules.go b/test/e2e/basic/modules.go index c6262fa0f1..4965172756 100644 --- a/test/e2e/basic/modules.go +++ b/test/e2e/basic/modules.go @@ -29,11 +29,12 @@ var _ = ginkgo.Describe("[basic][modules] Node Tuning Operator load kernel modul // Cleanup code to roll back cluster changes done by this test even if it fails in the middle of ginkgo.It() ginkgo.AfterEach(func() { + // Ignore failures to cleanup resources which are already deleted or not yet created. ginkgo.By("cluster changes rollback") if node != nil { - util.ExecAndLogCommand("oc", "label", "node", "--overwrite", node.Name, nodeLabelModules+"-") + _, _, _ = util.ExecAndLogCommand("oc", "label", "node", "--overwrite", node.Name, nodeLabelModules+"-") } - util.ExecAndLogCommand("oc", "delete", "-n", ntoconfig.WatchNamespace(), "-f", profileModules) + _, _, _ = util.ExecAndLogCommand("oc", "delete", "-n", ntoconfig.WatchNamespace(), "-f", profileModules) }) ginkgo.It(fmt.Sprintf("modules: %s loaded", moduleName), func() { @@ -59,6 +60,7 @@ var _ = ginkgo.Describe("[basic][modules] Node Tuning Operator load kernel modul ginkgo.By(fmt.Sprintf("trying to remove the %s module if loaded", moduleName)) _, err = util.ExecCmdInPod(pod, "rmmod", moduleName) + gomega.Expect(err).To(gomega.HaveOccurred()) ginkgo.By(fmt.Sprintf("ensuring the %s module is not loaded", moduleName)) _, err = util.ExecCmdInPod(pod, cmdGrepModule...) diff --git a/test/e2e/basic/netdev_set_channels.go b/test/e2e/basic/netdev_set_channels.go index ef78e59ce3..e383579ec9 100644 --- a/test/e2e/basic/netdev_set_channels.go +++ b/test/e2e/basic/netdev_set_channels.go @@ -31,11 +31,12 @@ var _ = ginkgo.Describe("[basic][netdev_set_channels] Node Tuning Operator adjus // Cleanup code to roll back cluster changes done by this test even if it fails in the middle of ginkgo.It() ginkgo.AfterEach(func() { + // Ignore failures to cleanup resources which are already deleted or not yet created. ginkgo.By("cluster changes rollback") if node != nil { - util.ExecAndLogCommand("oc", "label", "node", "--overwrite", node.Name, nodeLabelNetdev+"-") + _, _, _ = util.ExecAndLogCommand("oc", "label", "node", "--overwrite", node.Name, nodeLabelNetdev+"-") } - util.ExecAndLogCommand("oc", "delete", "-n", ntoconfig.WatchNamespace(), "-f", profileNetdev) + _, _, _ = util.ExecAndLogCommand("oc", "delete", "-n", ntoconfig.WatchNamespace(), "-f", profileNetdev) }) ginkgo.It("adjust netdev queue count for physical network devices via ethtool", func() { @@ -187,7 +188,7 @@ var _ = ginkgo.Describe("[basic][netdev_set_channels] Node Tuning Operator adjus gomega.Expect(err).NotTo(gomega.HaveOccurred(), explain) // Set the original value of multi-purpose channels. Ignore failures. - util.ExecCmdInPod(pod, "bash", "-c", fmt.Sprintf("ethtool -L %s combined %d", phyDev, channelOrigCombined)) + _, _ = util.ExecCmdInPod(pod, "bash", "-c", fmt.Sprintf("ethtool -L %s combined %d", phyDev, channelOrigCombined)) }) }) }) diff --git a/test/e2e/basic/rollback.go b/test/e2e/basic/rollback.go index 5f47a9e368..d3b6fe666f 100644 --- a/test/e2e/basic/rollback.go +++ b/test/e2e/basic/rollback.go @@ -44,11 +44,12 @@ var _ = ginkgo.Describe("[basic][rollback] Node Tuning Operator settings rollbac // Cleanup code to roll back cluster changes done by this test even if it fails in the middle of ginkgo.It() ginkgo.AfterEach(func() { + // Ignore failures to cleanup resources which are already deleted or not yet created. ginkgo.By("cluster changes rollback") if pod != nil { - util.ExecAndLogCommand("oc", "label", "pod", "--overwrite", "-n", ntoconfig.WatchNamespace(), pod.Name, podLabelIngress+"-") + _, _, _ = util.ExecAndLogCommand("oc", "label", "pod", "--overwrite", "-n", ntoconfig.WatchNamespace(), pod.Name, podLabelIngress+"-") } - util.ExecAndLogCommand("oc", "delete", "-n", ntoconfig.WatchNamespace(), "-f", profilePath) + _, _, _ = util.ExecAndLogCommand("oc", "delete", "-n", ntoconfig.WatchNamespace(), "-f", profilePath) }) ginkgo.It(fmt.Sprintf("%s set", sysctlTCPTWReuseVar), func() { @@ -142,8 +143,9 @@ var _ = ginkgo.Describe("[basic][rollback] Node Tuning Operator settings rollbac // Cleanup code to roll back cluster changes done by this test even if it fails in the middle of ginkgo.It() ginkgo.AfterEach(func() { + // Ignore failures to cleanup resources which are already deleted or not yet created. ginkgo.By("cluster changes rollback") - util.ExecAndLogCommand("oc", "delete", "-n", ntoconfig.WatchNamespace(), "-f", profilePath) + _, _, _ = util.ExecAndLogCommand("oc", "delete", "-n", ntoconfig.WatchNamespace(), "-f", profilePath) }) ginkgo.It("kernel.shmmni set", func() { diff --git a/test/e2e/basic/sysctl_d_override.go b/test/e2e/basic/sysctl_d_override.go index 67bcc78aa3..f936a7b4fe 100644 --- a/test/e2e/basic/sysctl_d_override.go +++ b/test/e2e/basic/sysctl_d_override.go @@ -34,15 +34,16 @@ var _ = ginkgo.Describe("[basic][sysctl_d_override] Node Tuning Operator /etc/sy // Cleanup code to roll back cluster changes done by this test even if it fails in the middle of ginkgo.It() ginkgo.AfterEach(func() { + // Ignore failures to cleanup resources which are already deleted or not yet created. ginkgo.By("cluster changes rollback") if node != nil { - util.ExecAndLogCommand("oc", "label", "node", "--overwrite", node.Name, nodeLabelSysctlOverride+"-") + _, _, _ = util.ExecAndLogCommand("oc", "label", "node", "--overwrite", node.Name, nodeLabelSysctlOverride+"-") } if pod != nil { - util.ExecAndLogCommand("oc", "exec", "-n", ntoconfig.WatchNamespace(), pod.Name, "--", "rm", sysctlFile) + _, _, _ = util.ExecAndLogCommand("oc", "exec", "-n", ntoconfig.WatchNamespace(), pod.Name, "--", "rm", sysctlFile) } - util.ExecAndLogCommand("oc", "delete", "-n", ntoconfig.WatchNamespace(), "-f", profileSysctlOverride) + _, _, _ = util.ExecAndLogCommand("oc", "delete", "-n", ntoconfig.WatchNamespace(), "-f", profileSysctlOverride) }) ginkgo.It(fmt.Sprintf("%s set", sysctlVar), func() { @@ -76,7 +77,8 @@ var _ = ginkgo.Describe("[basic][sysctl_d_override] Node Tuning Operator /etc/sy fmt.Sprintf("echo %s=%s > %s; sync %s", sysctlVar, sysctlValSet, sysctlFile, sysctlFile)) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - util.ExecAndLogCommand("oc", "rsh", "-n", ntoconfig.WatchNamespace(), pod.Name, "cat", sysctlFile) + _, _, err = util.ExecAndLogCommand("oc", "rsh", "-n", ntoconfig.WatchNamespace(), pod.Name, "cat", sysctlFile) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By(fmt.Sprintf("deleting Pod %s", pod.Name)) _, _, err = util.ExecAndLogCommand("oc", "delete", "-n", ntoconfig.WatchNamespace(), "pod", pod.Name, "--wait") diff --git a/test/e2e/basic/tuned_builtin_expand.go b/test/e2e/basic/tuned_builtin_expand.go index 043eea7b9c..300e630ba5 100644 --- a/test/e2e/basic/tuned_builtin_expand.go +++ b/test/e2e/basic/tuned_builtin_expand.go @@ -30,12 +30,13 @@ var _ = ginkgo.Describe("[basic][tuned_builtin_expand] Node Tuning Operator cust // Cleanup code to roll back cluster changes done by this test even if it fails in the middle of ginkgo.It() ginkgo.AfterEach(func() { + // Ignore failures to cleanup resources which are already deleted or not yet created. ginkgo.By("cluster changes rollback") if node != nil { - util.ExecAndLogCommand("oc", "label", "node", "--overwrite", node.Name, nodeLabelBuiltinExpand+"-") + _, _, _ = util.ExecAndLogCommand("oc", "label", "node", "--overwrite", node.Name, nodeLabelBuiltinExpand+"-") } - util.ExecAndLogCommand("oc", "delete", "-n", ntoconfig.WatchNamespace(), "-f", profileBuiltinExpand) - util.ExecAndLogCommand("oc", "delete", "-n", ntoconfig.WatchNamespace(), "-f", profileHugepages) + _, _, _ = util.ExecAndLogCommand("oc", "delete", "-n", ntoconfig.WatchNamespace(), "-f", profileBuiltinExpand) + _, _, _ = util.ExecAndLogCommand("oc", "delete", "-n", ntoconfig.WatchNamespace(), "-f", profileHugepages) }) ginkgo.It(fmt.Sprintf("%s set", sysctlVar), func() { diff --git a/test/e2e/basic/tuned_errors_and_recovery.go b/test/e2e/basic/tuned_errors_and_recovery.go index 45f19052fc..6c3a00241b 100644 --- a/test/e2e/basic/tuned_errors_and_recovery.go +++ b/test/e2e/basic/tuned_errors_and_recovery.go @@ -32,15 +32,16 @@ var _ = ginkgo.Describe("[basic][tuned_errors_and_recovery] Cause TuneD daemon e // Cleanup code to roll back cluster changes done by this test even if it fails in the middle of ginkgo.It() ginkgo.AfterEach(func() { + // Ignore failures to cleanup resources which are already deleted or not yet created. ginkgo.By("cluster changes rollback") if node != nil { - util.ExecAndLogCommand("oc", "label", "node", "--overwrite", node.Name, nodeLabelCauseTunedFailure+"-") + _, _, _ = util.ExecAndLogCommand("oc", "label", "node", "--overwrite", node.Name, nodeLabelCauseTunedFailure+"-") } - util.ExecAndLogCommand("oc", "delete", "-n", ntoconfig.WatchNamespace(), "-f", profileCauseTunedFailure) - util.ExecAndLogCommand("oc", "delete", "-n", ntoconfig.WatchNamespace(), "-f", profileDummy) + _, _, _ = util.ExecAndLogCommand("oc", "delete", "-n", ntoconfig.WatchNamespace(), "-f", profileCauseTunedFailure) + _, _, _ = util.ExecAndLogCommand("oc", "delete", "-n", ntoconfig.WatchNamespace(), "-f", profileDummy) if pod != nil { // Without removing the profile directory this e2e test fails when invoking for the second time on the same system. - util.ExecAndLogCommand("oc", "exec", "-n", ntoconfig.WatchNamespace(), pod.Name, "--", "rm", "-rf", "/etc/tuned/openshift-dummy") + _, _, _ = util.ExecAndLogCommand("oc", "exec", "-n", ntoconfig.WatchNamespace(), pod.Name, "--", "rm", "-rf", "/etc/tuned/openshift-dummy") } }) diff --git a/test/e2e/deferred/basic.go b/test/e2e/deferred/basic.go index 4a0949f2d8..2d29c7c623 100644 --- a/test/e2e/deferred/basic.go +++ b/test/e2e/deferred/basic.go @@ -69,11 +69,12 @@ var _ = ginkgo.Describe("Profile deferred", ginkgo.Label("deferred", "profile-st }) ginkgo.AfterEach(func() { + // Ignore failures to cleanup resources which are already deleted or not yet created. for _, createdTuned := range createdTuneds { ginkgo.By(fmt.Sprintf("cluster changes rollback: %q", createdTuned)) - util.ExecAndLogCommand("oc", "delete", "-n", ntoconfig.WatchNamespace(), "-f", createdTuned) + _, _, _ = util.ExecAndLogCommand("oc", "delete", "-n", ntoconfig.WatchNamespace(), "-f", createdTuned) } - util.ExecAndLogCommand("oc", "label", "node", targetNode.Name, tunedMatchLabelLater+"-") + _, _, _ = util.ExecAndLogCommand("oc", "label", "node", targetNode.Name, tunedMatchLabelLater+"-") checkWorkerNodeIsDefaultState(context.Background(), targetNode) }) diff --git a/test/e2e/deferred/non_regression.go b/test/e2e/deferred/non_regression.go index b9b0723521..053bfeb451 100644 --- a/test/e2e/deferred/non_regression.go +++ b/test/e2e/deferred/non_regression.go @@ -47,9 +47,10 @@ var _ = ginkgo.Describe("Profile non-deferred", ginkgo.Label("deferred", "non-re }) ginkgo.AfterEach(func() { + // Ignore failures to cleanup resources which are already deleted or not yet created. for _, createdTuned := range createdTuneds { ginkgo.By(fmt.Sprintf("cluster changes rollback: %q", createdTuned)) - util.ExecAndLogCommand("oc", "delete", "-n", ntoconfig.WatchNamespace(), "-f", createdTuned) + _, _, _ = util.ExecAndLogCommand("oc", "delete", "-n", ntoconfig.WatchNamespace(), "-f", createdTuned) } checkWorkerNodeIsDefaultState(context.Background(), targetNode) diff --git a/test/e2e/deferred/operator_test.go b/test/e2e/deferred/operator_test.go index dc6d3ef22f..809767657a 100644 --- a/test/e2e/deferred/operator_test.go +++ b/test/e2e/deferred/operator_test.go @@ -30,8 +30,6 @@ const ( verifyCommandAnnotation = "verificationCommand" verifyOutputAnnotation = "verificationOutput" - pollInterval = 5 * time.Second - waitDuration = 5 * time.Minute // The number of Profile status conditions. Adjust when adding new conditions in the API. ProfileStatusConditions = 2 diff --git a/test/e2e/deferred/restart.go b/test/e2e/deferred/restart.go index 98f70cbe7c..10ec643167 100644 --- a/test/e2e/deferred/restart.go +++ b/test/e2e/deferred/restart.go @@ -59,9 +59,10 @@ var _ = ginkgo.Describe("Profile deferred", ginkgo.Label("deferred", "slow", "di }) ginkgo.AfterEach(func() { + // Ignore failures to cleanup resources which are already deleted or not yet created. for _, createdTuned := range createdTuneds { ginkgo.By(fmt.Sprintf("cluster changes rollback: %q", createdTuned)) - util.ExecAndLogCommand("oc", "delete", "-n", ntoconfig.WatchNamespace(), "-f", createdTuned) + _, _, _ = util.ExecAndLogCommand("oc", "delete", "-n", ntoconfig.WatchNamespace(), "-f", createdTuned) } if len(createdTuneds) == 0 { diff --git a/test/e2e/deferred/updates.go b/test/e2e/deferred/updates.go index 802d3f8f41..0a00b71ba5 100644 --- a/test/e2e/deferred/updates.go +++ b/test/e2e/deferred/updates.go @@ -70,9 +70,10 @@ var _ = ginkgo.Describe("Profile deferred", ginkgo.Label("deferred", "inplace-up }) ginkgo.AfterEach(func() { + // Ignore failures to cleanup resources which are already deleted or not yet created. for _, createdTuned := range createdTuneds { ginkgo.By(fmt.Sprintf("cluster changes rollback: %q", createdTuned)) - util.ExecAndLogCommand("oc", "delete", "-n", ntoconfig.WatchNamespace(), "-f", createdTuned) + _, _, _ = util.ExecAndLogCommand("oc", "delete", "-n", ntoconfig.WatchNamespace(), "-f", createdTuned) } checkWorkerNodeIsDefaultState(context.Background(), targetNode) diff --git a/test/e2e/performanceprofile/functests-render-command/1_render_command/render_test.go b/test/e2e/performanceprofile/functests-render-command/1_render_command/render_test.go index b538fb2c17..1211de6448 100644 --- a/test/e2e/performanceprofile/functests-render-command/1_render_command/render_test.go +++ b/test/e2e/performanceprofile/functests-render-command/1_render_command/render_test.go @@ -94,7 +94,7 @@ var _ = Describe("render command e2e test", func() { Expect(err).To(HaveOccurred(), logStderr(err)) }) - It("Must not set any owner reference if disabled explicitely", func() { + It("Must not set any owner reference if disabled explicitly", func() { cmdline := []string{ filepath.Join(binPath, "cluster-node-tuning-operator"), "render", diff --git a/test/e2e/performanceprofile/functests/0_config/config.go b/test/e2e/performanceprofile/functests/0_config/config.go index 9a7a28d8b2..a53d9771f9 100644 --- a/test/e2e/performanceprofile/functests/0_config/config.go +++ b/test/e2e/performanceprofile/functests/0_config/config.go @@ -16,7 +16,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/apimachinery/pkg/types" "k8s.io/utils/cpuset" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/performanceprofile/v2" @@ -170,7 +170,7 @@ func testProfile() (*performancev2.PerformanceProfile, error) { { Size: "1G", Count: 1, - Node: pointer.Int32(0), + Node: ptr.To(int32(0)), }, { Size: "2M", @@ -180,18 +180,18 @@ func testProfile() (*performancev2.PerformanceProfile, error) { }, NodeSelector: testutils.NodeSelectorLabels, RealTimeKernel: &performancev2.RealTimeKernel{ - Enabled: pointer.Bool(true), + Enabled: ptr.To(true), }, NUMA: &performancev2.NUMA{ - TopologyPolicy: pointer.String("single-numa-node"), + TopologyPolicy: ptr.To("single-numa-node"), }, Net: &performancev2.Net{ - UserLevelNetworking: pointer.Bool(true), + UserLevelNetworking: ptr.To(true), }, WorkloadHints: &performancev2.WorkloadHints{ - RealTime: pointer.Bool(true), - HighPowerConsumption: pointer.Bool(false), - PerPodPowerManagement: pointer.Bool(false), + RealTime: ptr.To(true), + HighPowerConsumption: ptr.To(false), + PerPodPowerManagement: ptr.To(false), }, }, } diff --git a/test/e2e/performanceprofile/functests/10_performance_ppc/10_ppc_suite_test.go b/test/e2e/performanceprofile/functests/10_performance_ppc/10_ppc_suite_test.go index ea29a6a758..da21df339b 100644 --- a/test/e2e/performanceprofile/functests/10_performance_ppc/10_ppc_suite_test.go +++ b/test/e2e/performanceprofile/functests/10_performance_ppc/10_ppc_suite_test.go @@ -36,7 +36,7 @@ var _ = BeforeSuite(func() { }) var _ = AfterSuite(func() { - nodeinspector.Delete(context.TODO()) + Expect(nodeinspector.Delete(context.TODO())).To(Succeed()) }) func TestPPC(t *testing.T) { diff --git a/test/e2e/performanceprofile/functests/10_performance_ppc/ppc.go b/test/e2e/performanceprofile/functests/10_performance_ppc/ppc.go index c50842daf9..3e4eaf391e 100644 --- a/test/e2e/performanceprofile/functests/10_performance_ppc/ppc.go +++ b/test/e2e/performanceprofile/functests/10_performance_ppc/ppc.go @@ -86,8 +86,7 @@ var _ = Describe("[rfe_id: 38968] PerformanceProfile setup helper and platform a fmt.Sprintf("--split-reserved-cpus-across-numa=%t", false), fmt.Sprintf("--must-gather-dir-path=%s", mustgatherDir), } - podmanArgs := []string{} - podmanArgs = append(defaultArgs, cmdArgs...) + podmanArgs := append(defaultArgs, cmdArgs...) session, err := ppcIntgTest.PodmanAsUserBase(podmanArgs, false, false) Expect(err).ToNot(HaveOccurred(), "Podman command failed") output := session.Wait(20).Out.Contents() @@ -120,13 +119,13 @@ var _ = Describe("[rfe_id: 38968] PerformanceProfile setup helper and platform a fmt.Sprintf("--topology-manager-policy=%s", "single-numa-node"), fmt.Sprintf("--must-gather-dir-path=%s", mustgatherDir), } - podmanArgs := []string{} - podmanArgs = append(defaultArgs, cmdArgs...) + podmanArgs := append(defaultArgs, cmdArgs...) session, err := ppcIntgTest.PodmanAsUserBase(podmanArgs, false, false) Expect(err).ToNot(HaveOccurred(), "Podman command failed") output := session.Wait(20).Err.Contents() errString := "Error: failed to obtain data from flags not appropriate to split reserved CPUs in case of topology-manager-policy: single-numa-node" ok, err := regexp.MatchString(errString, string(output)) + Expect(err).ToNot(HaveOccurred()) if ok { testlog.Info(errString) } @@ -150,8 +149,7 @@ var _ = Describe("[rfe_id: 38968] PerformanceProfile setup helper and platform a fmt.Sprintf("--split-reserved-cpus-across-numa=%t", true), fmt.Sprintf("--must-gather-dir-path=%s", mustgatherDir), } - podmanArgs := []string{} - podmanArgs = append(defaultArgs, cmdArgs...) + podmanArgs := append(defaultArgs, cmdArgs...) session, err := ppcIntgTest.PodmanAsUserBase(podmanArgs, false, false) Expect(err).ToNot(HaveOccurred(), "Podman command failed") output := session.Wait(20).Err.Contents() @@ -181,8 +179,7 @@ var _ = Describe("[rfe_id: 38968] PerformanceProfile setup helper and platform a fmt.Sprintf("--split-reserved-cpus-across-numa=%t", true), fmt.Sprintf("--must-gather-dir-path=%s", mustgatherDir), } - podmanArgs := []string{} - podmanArgs = append(defaultArgs, cmdArgs...) + podmanArgs := append(defaultArgs, cmdArgs...) session, err := ppcIntgTest.PodmanAsUserBase(podmanArgs, false, false) Expect(err).ToNot(HaveOccurred(), "Podman command failed") output := session.Wait(20).Err.Contents() diff --git a/test/e2e/performanceprofile/functests/11_mixedcpus/11_mixedcpus_suite_test.go b/test/e2e/performanceprofile/functests/11_mixedcpus/11_mixedcpus_suite_test.go index 136df1ef58..fb5cdc056c 100644 --- a/test/e2e/performanceprofile/functests/11_mixedcpus/11_mixedcpus_suite_test.go +++ b/test/e2e/performanceprofile/functests/11_mixedcpus/11_mixedcpus_suite_test.go @@ -15,5 +15,5 @@ func TestMixedCPUs(t *testing.T) { } var _ = AfterSuite(func() { - nodeinspector.Delete(context.TODO()) + Expect(nodeinspector.Delete(context.TODO())).To(Succeed()) }) diff --git a/test/e2e/performanceprofile/functests/11_mixedcpus/mixedcpus.go b/test/e2e/performanceprofile/functests/11_mixedcpus/mixedcpus.go index 1529c7f2d6..279a08d51e 100644 --- a/test/e2e/performanceprofile/functests/11_mixedcpus/mixedcpus.go +++ b/test/e2e/performanceprofile/functests/11_mixedcpus/mixedcpus.go @@ -17,7 +17,7 @@ import ( "k8s.io/client-go/kubernetes" kubeletconfig "k8s.io/kubelet/config/v1beta1" "k8s.io/utils/cpuset" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" @@ -395,7 +395,7 @@ var _ = Describe("Mixedcpus", Ordered, Label(string(label.MixedCPUs)), func() { withRequests(rl), withLimits(rl), withRuntime(components.GetComponentName(profile.Name, components.ComponentNamePrefix))) - expectedError := "more than a single \"workload.openshift.io/enable-shared-cpus\" resource is forbiden, please set the request to 1 or remove it" + expectedError := "more than a single \"workload.openshift.io/enable-shared-cpus\" resource is forbidden, please set the request to 1 or remove it" Expect(err).To(HaveOccurred()) Expect(err.Error()).To(ContainSubstring(expectedError)) }) @@ -466,7 +466,7 @@ var _ = Describe("Mixedcpus", Ordered, Label(string(label.MixedCPUs)), func() { By("Editing performanceProfile to have empty shared cpus and setting mixedCpus to false") profile.Spec.CPU.Shared = cpuSetToPerformanceCPUSet(&cpuset.CPUSet{}) - profile.Spec.WorkloadHints.MixedCpus = pointer.Bool(false) + profile.Spec.WorkloadHints.MixedCpus = ptr.To(false) By("Applying new performanceProfile") testprofiles.UpdateWithRetry(profile) @@ -492,7 +492,7 @@ var _ = Describe("Mixedcpus", Ordered, Label(string(label.MixedCPUs)), func() { By("Reverting the cluster to previous state") Expect(testclient.Client.Get(ctx, client.ObjectKeyFromObject(profile), profile)) profile.Spec.CPU.Shared = cpuSetToPerformanceCPUSet(ppShared) - profile.Spec.WorkloadHints.MixedCpus = pointer.Bool(true) + profile.Spec.WorkloadHints.MixedCpus = ptr.To(true) testprofiles.UpdateWithRetry(profile) mcp, err = mcps.GetByProfile(profile) Expect(err).ToNot(HaveOccurred()) @@ -520,7 +520,7 @@ func setup(ctx context.Context) func(ctx2 context.Context) { updatedIsolated := isolated.Difference(sharedcpu) profile.Spec.CPU.Isolated = cpuSetToPerformanceCPUSet(&updatedIsolated) profile.Spec.CPU.Shared = cpuSetToPerformanceCPUSet(&sharedcpu) - profile.Spec.WorkloadHints.MixedCpus = pointer.Bool(true) + profile.Spec.WorkloadHints.MixedCpus = ptr.To(true) testlog.Infof("enable mixed cpus for profile %q", profile.Name) updateNeeded = true } else { @@ -554,7 +554,7 @@ func setup(ctx context.Context) func(ctx2 context.Context) { mcps.WaitForCondition(mcp, machineconfigv1.MachineConfigPoolUpdated, corev1.ConditionTrue) teardown := func(ctx2 context.Context) { - By(fmt.Sprintf("executing teardown - revert profile %q back to its intial state", profile.Name)) + By(fmt.Sprintf("executing teardown - revert profile %q back to its initial state", profile.Name)) Expect(testclient.Client.Get(ctx2, client.ObjectKeyFromObject(initialProfile), profile)) testprofiles.UpdateWithRetry(initialProfile) @@ -726,5 +726,4 @@ func checkSchedulingDomains(workerRTNode *corev1.Node, podCpus cpuset.CPUSet, te cpuIDs := cpuset.New(cpuIDList...) return testFunc(cpuIDs) }).WithTimeout(2*time.Minute).WithPolling(5*time.Second).ShouldNot(HaveOccurred(), errMsg) - } diff --git a/test/e2e/performanceprofile/functests/12_hypershift/hypershift.go b/test/e2e/performanceprofile/functests/12_hypershift/hypershift.go index 49305be39d..69cbbabe79 100644 --- a/test/e2e/performanceprofile/functests/12_hypershift/hypershift.go +++ b/test/e2e/performanceprofile/functests/12_hypershift/hypershift.go @@ -7,7 +7,7 @@ import ( . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" kubeletconfigv1beta1 "k8s.io/kubelet/config/v1beta1" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/performanceprofile/v2" @@ -112,7 +112,7 @@ var _ = Describe("Multiple performance profile in hypershift", Label("Hypershift By("Modifying the second profile CPU and NUMA configurations") secondProfile.Spec.CPU = &performancev2.CPU{ - BalanceIsolated: pointer.Bool(false), + BalanceIsolated: ptr.To(false), Reserved: &reserved, Isolated: &isolated, } diff --git a/test/e2e/performanceprofile/functests/1_performance/cpu_management.go b/test/e2e/performanceprofile/functests/1_performance/cpu_management.go index e439411d98..1ed65efb1b 100644 --- a/test/e2e/performanceprofile/functests/1_performance/cpu_management.go +++ b/test/e2e/performanceprofile/functests/1_performance/cpu_management.go @@ -18,7 +18,7 @@ import ( "k8s.io/apimachinery/pkg/types" kubeletconfigv1beta1 "k8s.io/kubelet/config/v1beta1" "k8s.io/utils/cpuset" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" . "github.com/onsi/ginkgo/v2" @@ -299,6 +299,7 @@ var _ = Describe("[rfe_id:27363][performance] CPU Management", Ordered, func() { } err := testclient.DataPlaneClient.Create(context.TODO(), testpod) + Expect(err).ToNot(HaveOccurred()) testpod, err = pods.WaitForCondition(context.TODO(), client.ObjectKeyFromObject(testpod), corev1.PodReady, corev1.ConditionTrue, 10*time.Minute) logEventsForPod(testpod) Expect(err).ToNot(HaveOccurred()) @@ -331,6 +332,7 @@ var _ = Describe("[rfe_id:27363][performance] CPU Management", Ordered, func() { By("fetch Default cpuset from cpu manager state after restart") cpuManagerCpusetAfterRestart, err := nodes.CpuManagerCpuSet(ctx, workerRTNode) + Expect(err).ToNot(HaveOccurred()) Expect(cpuManagerCpusetBeforeRestart).To(Equal(cpuManagerCpusetAfterRestart)) }) }) @@ -427,7 +429,7 @@ var _ = Describe("[rfe_id:27363][performance] CPU Management", Ordered, func() { testpod = pods.GetTestPod() testpod.Namespace = testutils.NamespaceTesting testpod.Spec.NodeSelector = map[string]string{testutils.LabelHostname: workerRTNode.Name} - testpod.Spec.ShareProcessNamespace = pointer.Bool(true) + testpod.Spec.ShareProcessNamespace = ptr.To(true) err := testclient.DataPlaneClient.Create(context.TODO(), testpod) Expect(err).ToNot(HaveOccurred()) @@ -820,6 +822,7 @@ var _ = Describe("[rfe_id:27363][performance] CPU Management", Ordered, func() { // Get cpus used by the container tasksetcmd := []string{"/bin/taskset", "-pc", "1"} testpodAffinity, err := pods.ExecCommandOnPod(testclient.K8sClient, testpod, testpod.Spec.Containers[0].Name, tasksetcmd) + Expect(err).ToNot(HaveOccurred()) podCpusStr := string(testpodAffinity) parts := strings.Split(strings.TrimSpace(podCpusStr), ":") testpodCpus := strings.TrimSpace(parts[1]) @@ -955,7 +958,7 @@ func checkPodHTSiblings(ctx context.Context, testpod *corev1.Pod) bool { ) output = testutils.ToString(out) - // output is newline seperated. Convert to cpulist format by replacing internal "\n" chars with "," + // output is newline separated. Convert to cpulist format by replacing internal "\n" chars with "," hostHTSiblings := strings.ReplaceAll( strings.Trim(fmt.Sprint(output), "\n"), "\n", ",", ) @@ -966,8 +969,8 @@ func checkPodHTSiblings(ctx context.Context, testpod *corev1.Pod) bool { // pod cpu list should have the same siblings as the host for the same cpus return hostcpus.Equals(podcpus) - } + func startHTtestPod(ctx context.Context, cpuCount int) *corev1.Pod { var testpod *corev1.Pod @@ -1183,7 +1186,6 @@ func checkSchedulingDomains(workerRTNode *corev1.Node, podCpus cpuset.CPUSet, te cpuIDs := cpuset.New(cpuIDList...) return testFunc(cpuIDs) }).WithTimeout(2*time.Minute).WithPolling(5*time.Second).ShouldNot(HaveOccurred(), errMsg) - } // busyCpuImageEnv return busycpus image used for crio quota annotations test diff --git a/test/e2e/performanceprofile/functests/1_performance/hugepages.go b/test/e2e/performanceprofile/functests/1_performance/hugepages.go index 3c8ed7353c..e05246160a 100644 --- a/test/e2e/performanceprofile/functests/1_performance/hugepages.go +++ b/test/e2e/performanceprofile/functests/1_performance/hugepages.go @@ -72,7 +72,7 @@ var _ = Describe("[performance]Hugepages", Ordered, func() { // We have multiple hugepages e2e tests under the upstream, so the only thing that we should check, if the PAO configure // correctly number of hugepages that will be available on the node Context("[rfe_id:27369]when NUMA node specified", Label(string(label.Tier0)), func() { - It("[test_id:27752][crit:high][vendor:cnf-qe@redhat.com][level:acceptance] should be allocated on the specifed NUMA node", func() { + It("[test_id:27752][crit:high][vendor:cnf-qe@redhat.com][level:acceptance] should be allocated on the specified NUMA node", func() { for _, page := range profile.Spec.HugePages.Pages { if page.Node == nil { continue diff --git a/test/e2e/performanceprofile/functests/1_performance/netqueues.go b/test/e2e/performanceprofile/functests/1_performance/netqueues.go index 8633ecca8c..355e3e7d5e 100644 --- a/test/e2e/performanceprofile/functests/1_performance/netqueues.go +++ b/test/e2e/performanceprofile/functests/1_performance/netqueues.go @@ -11,7 +11,7 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/utils/cpuset" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -79,7 +79,7 @@ var _ = Describe("[ref_id: 40307][pao]Resizing Network Queues", Ordered, Label(s if profile.Spec.Net == nil { By("Enable UserLevelNetworking in Profile") profile.Spec.Net = &performancev2.Net{ - UserLevelNetworking: pointer.Bool(true), + UserLevelNetworking: ptr.To(true), } By("Updating the performance profile") profiles.UpdateWithRetry(profile) @@ -127,7 +127,7 @@ var _ = Describe("[ref_id: 40307][pao]Resizing Network Queues", Ordered, Label(s if profile.Spec.Net.UserLevelNetworking != nil && *profile.Spec.Net.UserLevelNetworking && len(profile.Spec.Net.Devices) == 0 { By("Enable UserLevelNetworking and add Devices in Profile") profile.Spec.Net = &performancev2.Net{ - UserLevelNetworking: pointer.Bool(true), + UserLevelNetworking: ptr.To(true), Devices: []performancev2.Device{ { InterfaceName: &device, @@ -175,7 +175,7 @@ var _ = Describe("[ref_id: 40307][pao]Resizing Network Queues", Ordered, Label(s if profile.Spec.Net.UserLevelNetworking != nil && *profile.Spec.Net.UserLevelNetworking && len(profile.Spec.Net.Devices) == 0 { By("Enable UserLevelNetworking and add Devices in Profile") profile.Spec.Net = &performancev2.Net{ - UserLevelNetworking: pointer.Bool(true), + UserLevelNetworking: ptr.To(true), Devices: []performancev2.Device{ { InterfaceName: &devicePattern, @@ -231,7 +231,7 @@ var _ = Describe("[ref_id: 40307][pao]Resizing Network Queues", Ordered, Label(s if profile.Spec.Net.UserLevelNetworking != nil && *profile.Spec.Net.UserLevelNetworking && len(profile.Spec.Net.Devices) == 0 { By("Enable UserLevelNetworking and add Devices in Profile") profile.Spec.Net = &performancev2.Net{ - UserLevelNetworking: pointer.Bool(true), + UserLevelNetworking: ptr.To(true), Devices: []performancev2.Device{ { InterfaceName: &devicePattern, @@ -286,7 +286,7 @@ var _ = Describe("[ref_id: 40307][pao]Resizing Network Queues", Ordered, Label(s if profile.Spec.Net.UserLevelNetworking != nil && *profile.Spec.Net.UserLevelNetworking && len(profile.Spec.Net.Devices) == 0 { By("Enable UserLevelNetworking and add DeviceID, VendorID and Interface in Profile") profile.Spec.Net = &performancev2.Net{ - UserLevelNetworking: pointer.Bool(true), + UserLevelNetworking: ptr.To(true), Devices: []performancev2.Device{ { InterfaceName: &device, @@ -364,6 +364,7 @@ func checkDeviceSupport(ctx context.Context, workernodes []corev1.Node, nodesDev cmdCombinedChannelsCurrent := []string{"bash", "-c", fmt.Sprintf("ethtool -l %s | sed -n '/Current hardware settings:/,/Combined:/{s/^Combined:\\s*//p}'", d)} out, err := pods.WaitForPodOutput(ctx, testclient.K8sClient, tunedPod, cmdCombinedChannelsCurrent) + Expect(err).ToNot(HaveOccurred()) if strings.Contains(string(out), "n/a") { fmt.Printf("Device %s doesn't support multiple queues\n", d) } else { diff --git a/test/e2e/performanceprofile/functests/1_performance/performance.go b/test/e2e/performanceprofile/functests/1_performance/performance.go index d7bd73fe3f..fe208b5c1f 100644 --- a/test/e2e/performanceprofile/functests/1_performance/performance.go +++ b/test/e2e/performanceprofile/functests/1_performance/performance.go @@ -18,7 +18,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/klog" "k8s.io/utils/cpuset" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" @@ -158,7 +158,7 @@ var _ = Describe("[rfe_id:27368][performance]", Ordered, func() { for _, node := range workerRTNodes { cmdline, err := nodes.ExecCommand(context.TODO(), &node, []string{"cat", "/proc/cmdline"}) Expect(err).ToNot(HaveOccurred()) - if profile.Spec.CPU.BalanceIsolated != nil && *profile.Spec.CPU.BalanceIsolated == false { + if profile.Spec.CPU.BalanceIsolated != nil && !*profile.Spec.CPU.BalanceIsolated { Expect(string(cmdline)).To(ContainSubstring("isolcpus=domain,managed_irq,")) } else { Expect(string(cmdline)).To(ContainSubstring("isolcpus=managed_irq,")) @@ -250,7 +250,7 @@ var _ = Describe("[rfe_id:27368][performance]", Ordered, func() { if profile.Spec.RealTimeKernel == nil || profile.Spec.RealTimeKernel.Enabled == nil || - *profile.Spec.RealTimeKernel.Enabled != true { + !*profile.Spec.RealTimeKernel.Enabled { Expect(stalld_prio).To(BeNumerically("<", ksoftirq_prio)) testlog.Warning("Skip checking rcu since RT kernel is disabled") return @@ -275,7 +275,7 @@ var _ = Describe("[rfe_id:27368][performance]", Ordered, func() { }) - Context("Additional kernel arguments added from perfomance profile", Label(string(label.Tier0)), func() { + Context("Additional kernel arguments added from performance profile", Label(string(label.Tier0)), func() { It("[test_id:28611][crit:high][vendor:cnf-qe@redhat.com][level:acceptance] Should set additional kernel arguments on the machine", func() { if profile.Spec.AdditionalKernelArgs != nil { for _, node := range workerRTNodes { @@ -390,7 +390,7 @@ var _ = Describe("[rfe_id:27368][performance]", Ordered, func() { Expect(allInterfaces).ToNot(BeNil()) // collect all veth interfaces in a list for _, iface := range allInterfaces { - if iface.Bridge == true && iface.Physical == false { + if iface.Bridge && !iface.Physical { vethInterfaces = append(vethInterfaces, iface.Name) } } @@ -549,13 +549,13 @@ var _ = Describe("[rfe_id:27368][performance]", Ordered, func() { }, NodeSelector: map[string]string{newLabel: ""}, RealTimeKernel: &performancev2.RealTimeKernel{ - Enabled: pointer.Bool(true), + Enabled: ptr.To(true), }, AdditionalKernelArgs: []string{ "NEW_ARGUMENT", }, NUMA: &performancev2.NUMA{ - TopologyPolicy: pointer.String("restricted"), + TopologyPolicy: ptr.To("restricted"), }, }, } @@ -783,7 +783,7 @@ var _ = Describe("[rfe_id:27368][performance]", Ordered, func() { profile.Name = testProfileName profile.ResourceVersion = "" profile.Spec.NodeSelector = map[string]string{"test/test": "test"} - profile.Spec.GloballyDisableIrqLoadBalancing = pointer.Bool(globallyDisableIrqLoadBalancing) + profile.Spec.GloballyDisableIrqLoadBalancing = ptr.To(globallyDisableIrqLoadBalancing) profile.Spec.MachineConfigPoolSelector = nil profile.Spec.MachineConfigLabel = nil @@ -904,11 +904,11 @@ var _ = Describe("[rfe_id:27368][performance]", Ordered, func() { }, Spec: performancev1alpha1.PerformanceProfileSpec{ RealTimeKernel: &performancev1alpha1.RealTimeKernel{ - Enabled: pointer.Bool(true), + Enabled: ptr.To(true), }, NodeSelector: map[string]string{"v1alpha1/v1alpha1": "v1alpha1"}, NUMA: &performancev1alpha1.NUMA{ - TopologyPolicy: pointer.String("restricted"), + TopologyPolicy: ptr.To("restricted"), }, }, } @@ -963,11 +963,11 @@ var _ = Describe("[rfe_id:27368][performance]", Ordered, func() { }, Spec: performancev1.PerformanceProfileSpec{ RealTimeKernel: &performancev1.RealTimeKernel{ - Enabled: pointer.Bool(true), + Enabled: ptr.To(true), }, NodeSelector: map[string]string{"v1/v1": "v1"}, NUMA: &performancev1.NUMA{ - TopologyPolicy: pointer.String("restricted"), + TopologyPolicy: ptr.To("restricted"), }, }, } @@ -1022,11 +1022,11 @@ var _ = Describe("[rfe_id:27368][performance]", Ordered, func() { }, Spec: performancev2.PerformanceProfileSpec{ RealTimeKernel: &performancev2.RealTimeKernel{ - Enabled: pointer.Bool(true), + Enabled: ptr.To(true), }, NodeSelector: map[string]string{"v2/v2": "v2"}, NUMA: &performancev2.NUMA{ - TopologyPolicy: pointer.String("restricted"), + TopologyPolicy: ptr.To("restricted"), }, }, } @@ -1367,21 +1367,6 @@ func execSysctlOnWorkers(ctx context.Context, workerNodes []corev1.Node, sysctlM } } -// check scheduler settings. on RHCOS9.2 all scheduler settings are moved to /sys/kernel/debug/sched/ -func checkSchedKnobs(ctx context.Context, workerNodes []corev1.Node, schedKnobs map[string]string) { - var err error - var out []byte - for _, node := range workerNodes { - for param, expected := range schedKnobs { - By(fmt.Sprintf("Checking scheduler knob %s", param)) - knob := fmt.Sprintf("/rootfs/sys/kernel/debug/sched/%s", param) - out, err = nodes.ExecCommand(ctx, &node, []string{"cat", knob}) - Expect(err).ToNot(HaveOccurred()) - Expect(strings.TrimSpace(string(out))).Should(Equal(expected), "parameter %s value is not %s.", param, expected) - } - } -} - // execute sysctl command inside container in a tuned pod func validateTunedActiveProfile(ctx context.Context, wrknodes []corev1.Node) { var err error diff --git a/test/e2e/performanceprofile/functests/1_performance/rt-kernel.go b/test/e2e/performanceprofile/functests/1_performance/rt-kernel.go index 60bacc1bda..9df5f693d7 100644 --- a/test/e2e/performanceprofile/functests/1_performance/rt-kernel.go +++ b/test/e2e/performanceprofile/functests/1_performance/rt-kernel.go @@ -24,7 +24,7 @@ var _ = Describe("[performance]RT Kernel", Ordered, Label(string(label.Tier0)), func(profile performancev2.PerformanceProfile) bool { if profile.Spec.RealTimeKernel != nil && profile.Spec.RealTimeKernel.Enabled != nil && - *profile.Spec.RealTimeKernel.Enabled == true { + *profile.Spec.RealTimeKernel.Enabled { return true } return false diff --git a/test/e2e/performanceprofile/functests/1_performance/test_suite_performance_test.go b/test/e2e/performanceprofile/functests/1_performance/test_suite_performance_test.go index 9b8f365c62..d08936ffc6 100644 --- a/test/e2e/performanceprofile/functests/1_performance/test_suite_performance_test.go +++ b/test/e2e/performanceprofile/functests/1_performance/test_suite_performance_test.go @@ -55,7 +55,8 @@ var _ = AfterSuite(func() { err := testclient.DataPlaneClient.Delete(context.TODO(), namespaces.TestingNamespace) Expect(err).ToNot(HaveOccurred()) err = namespaces.WaitForDeletion(testutils.NamespaceTesting, 5*time.Minute) - nodeinspector.Delete(context.TODO()) + Expect(err).ToNot(HaveOccurred()) + Expect(nodeinspector.Delete(context.TODO())).To(Succeed()) }) func TestPerformance(t *testing.T) { diff --git a/test/e2e/performanceprofile/functests/2_performance_update/memorymanager.go b/test/e2e/performanceprofile/functests/2_performance_update/memorymanager.go index f33ff083a6..aafc9b16f8 100644 --- a/test/e2e/performanceprofile/functests/2_performance_update/memorymanager.go +++ b/test/e2e/performanceprofile/functests/2_performance_update/memorymanager.go @@ -29,7 +29,7 @@ import ( "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -312,12 +312,12 @@ var _ = Describe("[rfe_id: 43186][memorymanager] Memorymanager feature", Label(s { Count: int32(numaZone0HugepagesCount), Size: hpSize2M, - Node: pointer.Int32(0), + Node: ptr.To(int32(0)), }, { Count: int32(numaZone1HugepagesCount), Size: hpSize2M, - Node: pointer.Int32(1), + Node: ptr.To(int32(1)), }, }, } @@ -624,12 +624,12 @@ var _ = Describe("[rfe_id: 43186][memorymanager] Memorymanager feature", Label(s { Count: int32(numaZone0HugepagesCount), Size: hpSize2M, - Node: pointer.Int32(0), + Node: ptr.To(int32(0)), }, { Count: int32(numaZone1HugepagesCount), Size: hpSize1G, - Node: pointer.Int32(1), + Node: ptr.To(int32(1)), }, }, } @@ -777,8 +777,11 @@ func (mm MMPod) removePod(ctx context.Context, testPod *corev1.Pod) error { return err } err = testclient.DataPlaneClient.Delete(ctx, testPod) - err = pods.WaitForDeletion(ctx, testPod, pods.DefaultDeletionTimeout*time.Second) - return err + if err != nil { + return err + } + + return pods.WaitForDeletion(ctx, testPod, pods.DefaultDeletionTimeout*time.Second) } // InitializePod initialize pods which we want to be in running state @@ -806,9 +809,16 @@ func GetMemoryNodes(ctx context.Context, testPod *corev1.Pod, targetNode *corev1 return "", fmt.Errorf("Failed to fetch containerId for %v", testPod) } pid, err := nodes.ContainerPid(context.TODO(), targetNode, containerID) + if err != nil { + return "", fmt.Errorf("Unable to get container PID: %v", err) + } cmd := []string{"cat", fmt.Sprintf("/rootfs/proc/%s/cgroup", pid)} out, err := nodes.ExecCommand(context.TODO(), targetNode, cmd) + if err != nil { + return "", err + } containerCgroup, err = cgroup.PidParser(out) + Expect(err).ToNot(HaveOccurred()) fmt.Println("Container Cgroup = ", containerCgroup) cgroupv2, err := cgroup.IsVersion2(context.TODO(), testclient.DataPlaneClient) if err != nil { @@ -823,6 +833,9 @@ func GetMemoryNodes(ctx context.Context, testPod *corev1.Pod, targetNode *corev1 } cmd = []string{"cat", cpusetMemsPath} out, err = nodes.ExecCommand(ctx, targetNode, cmd) + if err != nil { + return "", err + } memoryNodes = testutils.ToString(out) testlog.Infof("test pod %s with container id %s has Memory nodes %s", testPod.Name, containerID, memoryNodes) return memoryNodes, err diff --git a/test/e2e/performanceprofile/functests/2_performance_update/test_suite_performance_update_test.go b/test/e2e/performanceprofile/functests/2_performance_update/test_suite_performance_update_test.go index 6cf0e3f5da..c5d0ec1862 100644 --- a/test/e2e/performanceprofile/functests/2_performance_update/test_suite_performance_update_test.go +++ b/test/e2e/performanceprofile/functests/2_performance_update/test_suite_performance_update_test.go @@ -42,7 +42,8 @@ var _ = AfterSuite(func() { err := testclient.Client.Delete(context.TODO(), namespaces.TestingNamespace) Expect(err).ToNot(HaveOccurred()) err = namespaces.WaitForDeletion(testutils.NamespaceTesting, 5*time.Minute) - nodeinspector.Delete(context.TODO()) + Expect(err).ToNot(HaveOccurred()) + Expect(nodeinspector.Delete(context.TODO())).To(Succeed()) }) func TestPerformanceUpdate(t *testing.T) { diff --git a/test/e2e/performanceprofile/functests/2_performance_update/updating_profile.go b/test/e2e/performanceprofile/functests/2_performance_update/updating_profile.go index 98c71fb6b2..3aef48a459 100644 --- a/test/e2e/performanceprofile/functests/2_performance_update/updating_profile.go +++ b/test/e2e/performanceprofile/functests/2_performance_update/updating_profile.go @@ -20,7 +20,7 @@ import ( "k8s.io/klog" kubeletconfigv1beta1 "k8s.io/kubelet/config/v1beta1" "k8s.io/utils/cpuset" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" machineconfigv1 "github.com/openshift/api/machineconfiguration/v1" @@ -130,7 +130,7 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance Expect(err).ToNot(HaveOccurred()) if len(numaInfo) < 2 { skipTests = true - klog.Infof(fmt.Sprintf("This test need 2 NUMA nodes.The number of NUMA nodes on node %s < 2", node.Name)) + klog.Infof("This test need 2 NUMA nodes. The number of NUMA nodes on node %s < 2", node.Name) return } } @@ -151,7 +151,7 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance By("Modifying profile") profile.Spec.CPU = &performancev2.CPU{ - BalanceIsolated: pointer.Bool(false), + BalanceIsolated: ptr.To(false), Reserved: &reserved, Isolated: &isolated, } @@ -161,17 +161,17 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance { Count: hpCntOnNuma0, Size: hpSize2M, - Node: pointer.Int32(0), + Node: ptr.To(int32(0)), }, { Count: hpCntOnNuma1, Size: hpSize2M, - Node: pointer.Int32(1), + Node: ptr.To(int32(1)), }, }, } profile.Spec.RealTimeKernel = &performancev2.RealTimeKernel{ - Enabled: pointer.Bool(true), + Enabled: ptr.To(true), } By("Updating the performance profile") @@ -247,7 +247,7 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance }, } profile.Spec.CPU = &performancev2.CPU{ - BalanceIsolated: pointer.Bool(false), + BalanceIsolated: ptr.To(false), Reserved: &reserved, Isolated: &isolated, } @@ -255,7 +255,7 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance TopologyPolicy: &policy, } profile.Spec.RealTimeKernel = &performancev2.RealTimeKernel{ - Enabled: pointer.Bool(false), + Enabled: ptr.To(false), } if profile.Spec.AdditionalKernelArgs == nil { @@ -305,6 +305,7 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance DescribeTable("Verify that kubelet parameters were updated", func(ctx context.Context, cmdFn checkFunction, getterFn func(kubeletCfg *kubeletconfigv1beta1.KubeletConfiguration) string, wantedValue string) { for _, node := range workerRTNodes { result, err := cmdFn(ctx, &node) + Expect(err).ToNot(HaveOccurred()) obj, err := manifestsutil.DeserializeObjectFromData([]byte(result), kubeletconfigv1beta1.AddToScheme) Expect(err).ToNot(HaveOccurred()) @@ -342,7 +343,7 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance It("[test_id:22764] verify that by default RT kernel is disabled", func() { conditionUpdating := machineconfigv1.MachineConfigPoolUpdating - if profile.Spec.RealTimeKernel == nil || *profile.Spec.RealTimeKernel.Enabled == true { + if profile.Spec.RealTimeKernel == nil || *profile.Spec.RealTimeKernel.Enabled { Skip("Skipping test - This test expects RT Kernel to be disabled. Found it to be enabled or nil.") } profile.Spec.RealTimeKernel = nil @@ -465,6 +466,7 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance It("[test_id:28440]Verifies that nodeSelector can be updated in performance profile", func() { kubeletConfig, err := nodes.GetKubeletConfig(context.TODO(), newCnfNode) + Expect(err).ToNot(HaveOccurred()) Expect(kubeletConfig.TopologyManagerPolicy).ToNot(BeEmpty()) out, err := nodes.ExecCommand(context.TODO(), newCnfNode, chkCmdLine) Expect(err).ToNot(HaveOccurred(), "failed to execute %s", chkCmdLine) @@ -494,11 +496,12 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance Expect(cmdline).NotTo(ContainSubstring("tuned.non_isolcpus")) kblcfg, err := nodes.GetKubeletConfig(context.TODO(), newCnfNode) + Expect(err).ToNot(HaveOccurred()) Expect(kblcfg.ReservedSystemCPUs).NotTo(ContainSubstring("reservedSystemCPUs")) }) AfterEach(func() { - if labelsDeletion == false { + if !labelsDeletion { removeLabels(profile.Spec.NodeSelector, newCnfNode) } @@ -509,6 +512,7 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance nodeSelector := strings.Join(selectorLabels, ",") profile.Spec.NodeSelector = oldNodeSelector spec, err := json.Marshal(profile.Spec) + Expect(err).ToNot(HaveOccurred()) Expect(testclient.Client.Patch(context.TODO(), profile, client.RawPatch( types.JSONPatchType, @@ -621,6 +625,7 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance Expect(err).ToNot(HaveOccurred()) offlinedOutput := testutils.ToString(out) offlinedCPUSet, err := cpuset.Parse(offlinedOutput) + Expect(err).ToNot(HaveOccurred()) offlinedCPUSetProfile, err := cpuset.Parse(string(offlined)) Expect(err).ToNot(HaveOccurred()) Expect(offlinedCPUSet.Equals(offlinedCPUSetProfile)) @@ -690,6 +695,7 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance Expect(err).ToNot(HaveOccurred()) offlinedOutput := testutils.ToString(out) offlinedCPUSet, err := cpuset.Parse(offlinedOutput) + Expect(err).ToNot(HaveOccurred()) offlinedCPUSetProfile, err := cpuset.Parse(string(offlinedSet)) Expect(err).ToNot(HaveOccurred()) Expect(offlinedCPUSet.Equals(offlinedCPUSetProfile)) @@ -755,6 +761,7 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance Expect(err).ToNot(HaveOccurred()) offlinedOutput := testutils.ToString(out) offlinedCPUSet, err := cpuset.Parse(offlinedOutput) + Expect(err).ToNot(HaveOccurred()) offlinedCPUSetProfile, err := cpuset.Parse(string(offlinedSet)) Expect(err).ToNot(HaveOccurred()) Expect(offlinedCPUSet.Equals(offlinedCPUSetProfile)) @@ -768,7 +775,7 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance Skip(fmt.Sprintf("This test need 2 NUMA nodes, available only %d", len(numaCoreSiblings))) } if len(numaCoreSiblings[0]) < 20 { - Skip(fmt.Sprintf("This test needs systems with at least 20 cores per socket")) + Skip("This test needs systems with at least 20 cores per socket") } // Get reserved core siblings from 0, 1 for reservedCores := 0; reservedCores < 2; reservedCores++ { @@ -830,6 +837,7 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance Expect(err).ToNot(HaveOccurred()) offlinedOutput := testutils.ToString(out) offlinedCPUSet, err := cpuset.Parse(offlinedOutput) + Expect(err).ToNot(HaveOccurred()) offlinedCPUSetProfile, err := cpuset.Parse(string(offlinedSet)) Expect(err).ToNot(HaveOccurred()) Expect(offlinedCPUSet.Equals(offlinedCPUSetProfile)) @@ -898,7 +906,7 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance } } } - return fmt.Sprint("Profile applied successfully") + return "Profile applied successfully" }, 10*time.Minute, 5*time.Second).Should(ContainSubstring("isolated and offlined cpus overlap")) }) @@ -961,6 +969,7 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance Expect(err).ToNot(HaveOccurred()) offlinedOutput := testutils.ToString(out) offlinedCPUSet, err := cpuset.Parse(offlinedOutput) + Expect(err).ToNot(HaveOccurred()) offlinedCPUSetProfile, err := cpuset.Parse(string(offlinedSet)) Expect(err).ToNot(HaveOccurred()) Expect(offlinedCPUSet.Equals(offlinedCPUSetProfile)) @@ -1126,13 +1135,13 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance It("[test_id:54191]Verify RPS Mask is not applied when RealtimeHint is disabled", func() { By("Modifying profile") profile.Spec.WorkloadHints = &performancev2.WorkloadHints{ - HighPowerConsumption: pointer.Bool(false), - RealTime: pointer.Bool(false), - PerPodPowerManagement: pointer.Bool(false), + HighPowerConsumption: ptr.To(false), + RealTime: ptr.To(false), + PerPodPowerManagement: ptr.To(false), } profile.Spec.RealTimeKernel = &performancev2.RealTimeKernel{ - Enabled: pointer.Bool(false), + Enabled: ptr.To(false), } By("Updating the performance profile") profiles.UpdateWithRetry(profile) @@ -1197,7 +1206,7 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance } for i := 0; i < len(workerRTNodes); i++ { - By("Determing the default container runtime used in the node") + By("Determining the default container runtime used in the node") tunedPod, err := tuned.GetPod(context.TODO(), &workerRTNodes[i]) Expect(err).ToNot(HaveOccurred()) expectedRuntime, err = runtime.GetContainerRuntimeTypeFor(context.TODO(), testclient.Client, tunedPod) diff --git a/test/e2e/performanceprofile/functests/4_latency/latency.go b/test/e2e/performanceprofile/functests/4_latency/latency.go index eba435d090..0e86950d52 100644 --- a/test/e2e/performanceprofile/functests/4_latency/latency.go +++ b/test/e2e/performanceprofile/functests/4_latency/latency.go @@ -31,7 +31,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/cpuset" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -373,7 +373,7 @@ func getLatencyTestPod(profile *performancev2.PerformanceProfile, node *corev1.N }, }, SecurityContext: &corev1.SecurityContext{ - Privileged: pointer.Bool(true), + Privileged: ptr.To(true), }, VolumeMounts: []corev1.VolumeMount{ { diff --git a/test/e2e/performanceprofile/functests/4_latency/test_suite_latency_test.go b/test/e2e/performanceprofile/functests/4_latency/test_suite_latency_test.go index 67a2ff8e51..2547db7154 100644 --- a/test/e2e/performanceprofile/functests/4_latency/test_suite_latency_test.go +++ b/test/e2e/performanceprofile/functests/4_latency/test_suite_latency_test.go @@ -55,7 +55,8 @@ var _ = AfterSuite(func() { err := testclient.Client.Delete(context.TODO(), namespaces.TestingNamespace) Expect(err).ToNot(HaveOccurred()) err = namespaces.WaitForDeletion(testutils.NamespaceTesting, 5*time.Minute) - nodeinspector.Delete(context.TODO()) + Expect(err).ToNot(HaveOccurred()) + Expect(nodeinspector.Delete(context.TODO())).To(Succeed()) }) func TestLatency(t *testing.T) { diff --git a/test/e2e/performanceprofile/functests/5_latency_testing/5_latency_testing_suite_test.go b/test/e2e/performanceprofile/functests/5_latency_testing/5_latency_testing_suite_test.go index ae4751eaec..3e9ff418e8 100644 --- a/test/e2e/performanceprofile/functests/5_latency_testing/5_latency_testing_suite_test.go +++ b/test/e2e/performanceprofile/functests/5_latency_testing/5_latency_testing_suite_test.go @@ -105,7 +105,7 @@ var _ = AfterSuite(func() { testlog.Errorf("could not restore the initial profile: %v", err) } } - nodeinspector.Delete(context.TODO()) + Expect(nodeinspector.Delete(context.TODO())).To(Succeed()) }) func Test5LatencyTesting(t *testing.T) { diff --git a/test/e2e/performanceprofile/functests/5_latency_testing/latency_testing.go b/test/e2e/performanceprofile/functests/5_latency_testing/latency_testing.go index 5595ba0e73..24d2b7e5f3 100644 --- a/test/e2e/performanceprofile/functests/5_latency_testing/latency_testing.go +++ b/test/e2e/performanceprofile/functests/5_latency_testing/latency_testing.go @@ -96,6 +96,8 @@ var _ = DescribeTable("Test latency measurement tools tests", func(testGroup []l format.MaxLength = 0 var output []byte var err error + skipInsufficientCpuRegex := regexp.MustCompile(skipInsufficientCpu) + successRegex := regexp.MustCompile(success) for _, test := range testGroup { clearEnv() testDescription := setEnvAndGetDescription(test) @@ -114,10 +116,7 @@ var _ = DescribeTable("Test latency measurement tools tests", func(testGroup []l testlog.Info(err.Error()) } - ok, matchErr := regexp.MatchString(skipInsufficientCpu, string(output)) - if matchErr != nil { - testlog.Error(matchErr.Error()) - } + ok := skipInsufficientCpuRegex.MatchString(string(output)) if ok { testlog.Info(skipInsufficientCpu) continue @@ -129,10 +128,7 @@ var _ = DescribeTable("Test latency measurement tools tests", func(testGroup []l } Expect(string(output)).NotTo(MatchRegexp(unexpectedError), "Unexpected error was detected in a positive test") //Check runtime argument in the pod's log only if the tool is expected to be executed - ok, matchErr := regexp.MatchString(success, string(output)) - if matchErr != nil { - testlog.Error(matchErr.Error()) - } + ok := successRegex.MatchString(string(output)) if ok { //verify the command is executed with the expected args //this lists of args depend on the ones the latency tool runners adds to tool command in cnf-features-deploy. @@ -231,7 +227,7 @@ func getValidValuesTests(toolToTest string) []latencyTest { //testCpus: for tests that expect a success output message, note that an even CPU number is needed, otherwise the test would fail with SMTAlignmentError successRuntime := "30" - // Using a timeout value such that ginkgo timeout > runtime + latency to ensure successfull runs + // Using a timeout value such that ginkgo timeout > runtime + latency to ensure successful runs successGinkgoTimeout := "200s" testSet = append(testSet, latencyTest{testDelay: "140", testRuntime: successRuntime, testMaxLatency: untunedLatencyThreshold, testCpus: "4", outputMsgs: []string{success}, toolToTest: toolToTest, ginkgoTimeout: successGinkgoTimeout}) testSet = append(testSet, latencyTest{testDelay: "0", testRuntime: successRuntime, testMaxLatency: untunedLatencyThreshold, testCpus: "4", outputMsgs: []string{success}, toolToTest: toolToTest, ginkgoTimeout: successGinkgoTimeout}) diff --git a/test/e2e/performanceprofile/functests/6_mustgather_testing/test_suite_mustgather_test.go b/test/e2e/performanceprofile/functests/6_mustgather_testing/test_suite_mustgather_test.go index af9b1bd58e..2b6862643c 100644 --- a/test/e2e/performanceprofile/functests/6_mustgather_testing/test_suite_mustgather_test.go +++ b/test/e2e/performanceprofile/functests/6_mustgather_testing/test_suite_mustgather_test.go @@ -46,7 +46,7 @@ var _ = BeforeSuite(func() { var _ = AfterSuite(func() { os.RemoveAll(destDir) - nodeinspector.Delete(context.TODO()) + Expect(nodeinspector.Delete(context.TODO())).To(Succeed()) }) func TestPaoMustgatherTests(t *testing.T) { diff --git a/test/e2e/performanceprofile/functests/7_performance_kubelet_node/cgroups.go b/test/e2e/performanceprofile/functests/7_performance_kubelet_node/cgroups.go index 417e61b555..17cdf44af5 100644 --- a/test/e2e/performanceprofile/functests/7_performance_kubelet_node/cgroups.go +++ b/test/e2e/performanceprofile/functests/7_performance_kubelet_node/cgroups.go @@ -292,6 +292,7 @@ var _ = Describe("[performance] Cgroups and affinity", Ordered, Label(string(lab // we don't need to check cpus used by all the containers // we take first container containerPid, err := nodes.ContainerPid(ctx, workerRTNode, ovnContainerids[0]) + Expect(err).ToNot(HaveOccurred()) // we need to wait as process affinity can change time.Sleep(30 * time.Second) ctnCpuset := taskSet(ctx, containerPid, workerRTNode) @@ -383,6 +384,7 @@ var _ = Describe("[performance] Cgroups and affinity", Ordered, Label(string(lab tasksetcmd := []string{"taskset", "-pc", "1"} testpod1Cpus, err := pods.ExecCommandOnPod(testclient.K8sClient, testpod1, "", tasksetcmd) + Expect(err).ToNot(HaveOccurred()) testlog.Infof("%v pod is using %v cpus", testpod1.Name, string(testpod1Cpus)) // Create testpod2 @@ -403,6 +405,7 @@ var _ = Describe("[performance] Cgroups and affinity", Ordered, Label(string(lab By("fetch cpus used by container process using taskset") testpod2Cpus, err := pods.ExecCommandOnPod(testclient.K8sClient, testpod2, "", tasksetcmd) + Expect(err).ToNot(HaveOccurred()) testlog.Infof("%v pod is using %v cpus", testpod2.Name, string(testpod2Cpus)) // Get cpus used by the ovnkubenode-pods containers @@ -410,6 +413,7 @@ var _ = Describe("[performance] Cgroups and affinity", Ordered, Label(string(lab ovnContainers, err := ovnPodContainers(&ovnPod) Expect(err).ToNot(HaveOccurred()) containerPid, err := nodes.ContainerPid(context.TODO(), workerRTNode, ovnContainers[0]) + Expect(err).ToNot(HaveOccurred()) // we need to wait as process affinity can change time.Sleep(30 * time.Second) ovnContainerCpuset1 := taskSet(ctx, containerPid, workerRTNode) @@ -487,6 +491,7 @@ var _ = Describe("[performance] Cgroups and affinity", Ordered, Label(string(lab ovnContainerids, err := ovnPodContainers(&ovnPod) Expect(err).ToNot(HaveOccurred()) containerPid, err := nodes.ContainerPid(ctx, workerRTNode, ovnContainerids[0]) + Expect(err).ToNot(HaveOccurred()) // we need to wait as process affinity can change time.Sleep(30 * time.Second) ovnContainerCpuset := taskSet(ctx, containerPid, workerRTNode) @@ -535,11 +540,13 @@ var _ = Describe("[performance] Cgroups and affinity", Ordered, Label(string(lab } err = waitForCondition(dp, desiredStatus) + Expect(err).ToNot(HaveOccurred()) ovnPodAfterReboot, err := ovnCnfNodePod(ctx, workerRTNode) Expect(err).ToNot(HaveOccurred(), "Unable to get ovnPod") ovnContainerIdsAfterReboot, err := ovnPodContainers(&ovnPodAfterReboot) Expect(err).ToNot(HaveOccurred()) containerPid, err = nodes.ContainerPid(ctx, workerRTNode, ovnContainerIdsAfterReboot[0]) + Expect(err).ToNot(HaveOccurred()) // we need to wait as process affinity can change time.Sleep(30 * time.Second) ovnContainerCpusetAfterReboot := taskSet(ctx, containerPid, workerRTNode) @@ -630,7 +637,7 @@ var _ = Describe("[performance] Cgroups and affinity", Ordered, Label(string(lab return false } for _, s := range podList.Items[0].Status.ContainerStatuses { - if s.Ready == false { + if !s.Ready { return false } } @@ -646,6 +653,7 @@ var _ = Describe("[performance] Cgroups and affinity", Ordered, Label(string(lab testpodCpus := bytes.Split(outputb, []byte(":")) testlog.Infof("%v pod is using cpus %v", pod.Name, string(testpodCpus[1])) podcpus, err := cpuset.Parse(strings.TrimSpace(string(testpodCpus[1]))) + Expect(err).ToNot(HaveOccurred()) for _, line := range postDeploymentThreadAffinity { if line != "" { cpumask := strings.Split(line, ":") @@ -689,7 +697,7 @@ var _ = Describe("[performance] Cgroups and affinity", Ordered, Label(string(lab return false } for _, s := range podList.Items[0].Status.ContainerStatuses { - if s.Ready == false { + if !s.Ready { return false } } @@ -705,6 +713,7 @@ var _ = Describe("[performance] Cgroups and affinity", Ordered, Label(string(lab testpodCpus := bytes.Split(outputb, []byte(":")) testlog.Infof("%v pod is using cpus %v", pod.Name, string(testpodCpus[1])) podcpus, err := cpuset.Parse(strings.TrimSpace(string(testpodCpus[1]))) + Expect(err).ToNot(HaveOccurred()) for _, line := range refresshedThreadAffinity { if line != "" { cpumask := strings.Split(line, ":") @@ -812,15 +821,16 @@ func ovnCnfNodePod(ctx context.Context, workerNode *corev1.Node) (corev1.Pod, er // ovnPodContainers returns containerids of all containers running inside ovn kube node pod func ovnPodContainers(ovnKubeNodePod *corev1.Pod) ([]string, error) { var ovnKubeNodePodContainerids []string - var err error + var errRet error for _, ovnctn := range ovnKubeNodePod.Spec.Containers { ctnName, err := pods.GetContainerIDByName(ovnKubeNodePod, ovnctn.Name) if err != nil { err = fmt.Errorf("unable to fetch container id of %v", ovnctn) + errRet = err } ovnKubeNodePodContainerids = append(ovnKubeNodePodContainerids, ctnName) } - return ovnKubeNodePodContainerids, err + return ovnKubeNodePodContainerids, errRet } // getCPUMaskForPids returns cpu mask of ovs process pids @@ -922,7 +932,7 @@ func ovsSystemdServicesOnOvsSlice(ctx context.Context, workerRTNode *corev1.Node // ovsPids Returns Pid of ovs services running inside the ovs.slice cgroup func ovsPids(ctx context.Context, ovsSystemdServices []string, workerRTNode *corev1.Node) ([]string, error) { var pidList []string - var err error + var errRet error for _, service := range ovsSystemdServices { //we need to ignore oneshot services which are part of ovs.slices serviceType, err := systemd.ShowProperty(ctx, service, "Type", workerRTNode) @@ -933,10 +943,13 @@ func ovsPids(ctx context.Context, ovsSystemdServices []string, workerRTNode *cor continue } pid, err := systemd.ShowProperty(context.TODO(), service, "ExecMainPID", workerRTNode) + if errRet != nil { + errRet = err + } ovsPid := strings.Split(strings.TrimSpace(pid), "=") pidList = append(pidList, ovsPid[1]) } - return pidList, err + return pidList, errRet } // taskSet returns cpus used by the pid diff --git a/test/e2e/performanceprofile/functests/7_performance_kubelet_node/kubelet.go b/test/e2e/performanceprofile/functests/7_performance_kubelet_node/kubelet.go index 5fa84a8285..2c5d04f1c9 100644 --- a/test/e2e/performanceprofile/functests/7_performance_kubelet_node/kubelet.go +++ b/test/e2e/performanceprofile/functests/7_performance_kubelet_node/kubelet.go @@ -43,7 +43,9 @@ var _ = Describe("[ref_id: 45487][performance]additional kubelet arguments", Ord ) testutils.CustomBeforeAll(func() { - workerRTNodes, err := nodes.GetByLabels(testutils.NodeSelectorLabels) + var err error + + workerRTNodes, err = nodes.GetByLabels(testutils.NodeSelectorLabels) Expect(err).ToNot(HaveOccurred()) workerRTNodes, err = nodes.MatchingOptionalSelector(workerRTNodes) @@ -60,6 +62,7 @@ var _ = Describe("[ref_id: 45487][performance]additional kubelet arguments", Ord } } else { hostedClusterName, err := hypershift.GetHostedClusterName() + Expect(err).ToNot(HaveOccurred()) np, err := nodepools.GetByClusterName(ctx, testclient.ControlPlaneClient, hostedClusterName) Expect(err).ToNot(HaveOccurred()) poolName = client.ObjectKeyFromObject(np).String() @@ -162,7 +165,9 @@ var _ = Describe("[ref_id: 45487][performance]additional kubelet arguments", Ord evictionMemory := kubeletConfig.EvictionHard["memory.available"] kubeReserved := kubeletConfig.KubeReserved["memory"] evictionMemoryInt, err := strconv.ParseInt(strings.TrimSuffix(evictionMemory, "Mi"), 10, 64) + Expect(err).ToNot(HaveOccurred()) kubeReservedMemoryInt, err := strconv.ParseInt(strings.TrimSuffix(kubeReserved, "Mi"), 10, 64) + Expect(err).ToNot(HaveOccurred()) systemReservedResource := resource.NewQuantity(300*1024*1024, resource.BinarySI) kubeReservedMemoryResource := resource.NewQuantity(kubeReservedMemoryInt*1024*1024, resource.BinarySI) evictionMemoryResource := resource.NewQuantity(evictionMemoryInt*1024*1024, resource.BinarySI) @@ -173,7 +178,7 @@ var _ = Describe("[ref_id: 45487][performance]additional kubelet arguments", Ord } }) It("[test_id:45495] Test setting PAO managed parameters", func() { - var paoParameters string = "" + var paoParameters string if *profile.Spec.NUMA.TopologyPolicy == "single-numa-node" { paoParameters = "{\"topologyManagerPolicy\":\"restricted\"}" } else { diff --git a/test/e2e/performanceprofile/functests/7_performance_kubelet_node/test_suite_performance_kubelet_node_test.go b/test/e2e/performanceprofile/functests/7_performance_kubelet_node/test_suite_performance_kubelet_node_test.go index 977205a0e8..ba8858f189 100644 --- a/test/e2e/performanceprofile/functests/7_performance_kubelet_node/test_suite_performance_kubelet_node_test.go +++ b/test/e2e/performanceprofile/functests/7_performance_kubelet_node/test_suite_performance_kubelet_node_test.go @@ -43,7 +43,8 @@ var _ = AfterSuite(func() { err := testclient.DataPlaneClient.Delete(context.TODO(), namespaces.TestingNamespace) Expect(err).ToNot(HaveOccurred()) err = namespaces.WaitForDeletion(testutils.NamespaceTesting, 5*time.Minute) - nodeinspector.Delete(context.TODO()) + Expect(err).ToNot(HaveOccurred()) + Expect(nodeinspector.Delete(context.TODO())).To(Succeed()) }) func TestPerformanceKubelet(t *testing.T) { diff --git a/test/e2e/performanceprofile/functests/8_performance_workloadhints/test_suite_performance_workloadhints_test.go b/test/e2e/performanceprofile/functests/8_performance_workloadhints/test_suite_performance_workloadhints_test.go index c6f5901f96..62bb3db76d 100644 --- a/test/e2e/performanceprofile/functests/8_performance_workloadhints/test_suite_performance_workloadhints_test.go +++ b/test/e2e/performanceprofile/functests/8_performance_workloadhints/test_suite_performance_workloadhints_test.go @@ -42,7 +42,8 @@ var _ = AfterSuite(func() { err := testclient.DataPlaneClient.Delete(context.TODO(), namespaces.TestingNamespace) Expect(err).ToNot(HaveOccurred()) err = namespaces.WaitForDeletion(testutils.NamespaceTesting, 5*time.Minute) - nodeinspector.Delete(context.TODO()) + Expect(err).ToNot(HaveOccurred()) + Expect(nodeinspector.Delete(context.TODO())).To(Succeed()) }) func TestPerformanceUpdate(t *testing.T) { diff --git a/test/e2e/performanceprofile/functests/8_performance_workloadhints/workloadhints.go b/test/e2e/performanceprofile/functests/8_performance_workloadhints/workloadhints.go index 87d147e720..c18a72274c 100644 --- a/test/e2e/performanceprofile/functests/8_performance_workloadhints/workloadhints.go +++ b/test/e2e/performanceprofile/functests/8_performance_workloadhints/workloadhints.go @@ -19,7 +19,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/klog" "k8s.io/utils/cpuset" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" machineconfigv1 "github.com/openshift/api/machineconfiguration/v1" @@ -96,7 +96,7 @@ var _ = Describe("[rfe_id:49062][workloadHints] Telco friendly workload specific profile.Spec.WorkloadHints = nil profile.Spec.RealTimeKernel = &performancev2.RealTimeKernel{ - Enabled: pointer.Bool(false), + Enabled: ptr.To(false), } // If current workload hints already contains the changes skip updating if !(cmp.Equal(currentWorkloadHints, profile.Spec.WorkloadHints)) { @@ -152,11 +152,11 @@ var _ = Describe("[rfe_id:49062][workloadHints] Telco friendly workload specific currentWorkloadHints := profile.Spec.WorkloadHints By("Modifying profile") profile.Spec.WorkloadHints = &performancev2.WorkloadHints{ - HighPowerConsumption: pointer.Bool(false), - RealTime: pointer.Bool(true), + HighPowerConsumption: ptr.To(false), + RealTime: ptr.To(true), } profile.Spec.RealTimeKernel = &performancev2.RealTimeKernel{ - Enabled: pointer.Bool(false), + Enabled: ptr.To(false), } // If current workload hints already contains the changes skip updating @@ -211,12 +211,12 @@ var _ = Describe("[rfe_id:49062][workloadHints] Telco friendly workload specific currentWorkloadHints := profile.Spec.WorkloadHints By("Modifying profile") profile.Spec.WorkloadHints = &performancev2.WorkloadHints{ - HighPowerConsumption: pointer.Bool(true), - RealTime: pointer.Bool(false), + HighPowerConsumption: ptr.To(true), + RealTime: ptr.To(false), } profile.Spec.RealTimeKernel = &performancev2.RealTimeKernel{ - Enabled: pointer.Bool(false), + Enabled: ptr.To(false), } // If current workload hints already contains the changes skip updating @@ -270,9 +270,9 @@ var _ = Describe("[rfe_id:49062][workloadHints] Telco friendly workload specific It("[test_id:50993][crit:high][vendor:cnf-qe@redhat.com][level:acceptance]should update kernel arguments and tuned accordingly", Label(string(label.Slow)), func() { currentWorkloadHints := profile.Spec.WorkloadHints profile.Spec.WorkloadHints = &performancev2.WorkloadHints{ - HighPowerConsumption: pointer.Bool(true), - RealTime: pointer.Bool(true), - PerPodPowerManagement: pointer.Bool(false), + HighPowerConsumption: ptr.To(true), + RealTime: ptr.To(true), + PerPodPowerManagement: ptr.To(false), } // If current workload hints already contains the changes // skip mcp wait @@ -330,9 +330,9 @@ var _ = Describe("[rfe_id:49062][workloadHints] Telco friendly workload specific It("[test_id:54177]should update kernel arguments and tuned accordingly", Label(string(label.Slow)), func() { currentWorkloadHints := profile.Spec.WorkloadHints profile.Spec.WorkloadHints = &performancev2.WorkloadHints{ - PerPodPowerManagement: pointer.Bool(true), - HighPowerConsumption: pointer.Bool(false), - RealTime: pointer.Bool(true), + PerPodPowerManagement: ptr.To(true), + HighPowerConsumption: ptr.To(false), + RealTime: ptr.To(true), } if !(cmp.Equal(currentWorkloadHints, profile.Spec.WorkloadHints)) { By("Patching the performance profile with workload hints") @@ -375,13 +375,13 @@ var _ = Describe("[rfe_id:49062][workloadHints] Telco friendly workload specific currentWorkloadHints := profile.Spec.WorkloadHints By("Modifying profile") profile.Spec.WorkloadHints = &performancev2.WorkloadHints{ - HighPowerConsumption: pointer.Bool(true), - RealTime: pointer.Bool(true), - PerPodPowerManagement: pointer.Bool(false), + HighPowerConsumption: ptr.To(true), + RealTime: ptr.To(true), + PerPodPowerManagement: ptr.To(false), } if !*profile.Spec.RealTimeKernel.Enabled { profile.Spec.RealTimeKernel = &performancev2.RealTimeKernel{ - Enabled: pointer.Bool(true), + Enabled: ptr.To(true), } } if !(cmp.Equal(currentWorkloadHints, profile.Spec.WorkloadHints)) { @@ -434,13 +434,13 @@ var _ = Describe("[rfe_id:49062][workloadHints] Telco friendly workload specific //Update the profile to disable HighPowerConsumption and enable PerPodPowerManagment profile.Spec.WorkloadHints = &performancev2.WorkloadHints{ - HighPowerConsumption: pointer.Bool(false), - RealTime: pointer.Bool(true), - PerPodPowerManagement: pointer.Bool(true), + HighPowerConsumption: ptr.To(false), + RealTime: ptr.To(true), + PerPodPowerManagement: ptr.To(true), } if !*profile.Spec.RealTimeKernel.Enabled { profile.Spec.RealTimeKernel = &performancev2.RealTimeKernel{ - Enabled: pointer.Bool(true), + Enabled: ptr.To(true), } } @@ -498,13 +498,13 @@ var _ = Describe("[rfe_id:49062][workloadHints] Telco friendly workload specific // First enable HighPowerConsumption By("Modifying profile") profile.Spec.WorkloadHints = &performancev2.WorkloadHints{ - HighPowerConsumption: pointer.Bool(false), - RealTime: pointer.Bool(true), - PerPodPowerManagement: pointer.Bool(true), + HighPowerConsumption: ptr.To(false), + RealTime: ptr.To(true), + PerPodPowerManagement: ptr.To(true), } if !*profile.Spec.RealTimeKernel.Enabled { profile.Spec.RealTimeKernel = &performancev2.RealTimeKernel{ - Enabled: pointer.Bool(true), + Enabled: ptr.To(true), } } if !(cmp.Equal(currentWorkloadHints, profile.Spec.WorkloadHints)) { @@ -554,13 +554,13 @@ var _ = Describe("[rfe_id:49062][workloadHints] Telco friendly workload specific //Update the profile to enable HighPowerConsumption and disable PerPodPowerManagment profile.Spec.WorkloadHints = &performancev2.WorkloadHints{ - HighPowerConsumption: pointer.Bool(true), - RealTime: pointer.Bool(true), - PerPodPowerManagement: pointer.Bool(false), + HighPowerConsumption: ptr.To(true), + RealTime: ptr.To(true), + PerPodPowerManagement: ptr.To(false), } if !*profile.Spec.RealTimeKernel.Enabled { profile.Spec.RealTimeKernel = &performancev2.RealTimeKernel{ - Enabled: pointer.Bool(true), + Enabled: ptr.To(true), } } By("Updating the performance profile") @@ -612,9 +612,9 @@ var _ = Describe("[rfe_id:49062][workloadHints] Telco friendly workload specific It("[test_id:54184]Verify enabling both HighPowerConsumption and PerPodPowerManagment fails", Label(string(label.Tier0)), func() { profile.Spec.WorkloadHints = &performancev2.WorkloadHints{ - PerPodPowerManagement: pointer.Bool(true), - HighPowerConsumption: pointer.Bool(true), - RealTime: pointer.Bool(true), + PerPodPowerManagement: ptr.To(true), + HighPowerConsumption: ptr.To(true), + RealTime: ptr.To(true), } if hypershift.IsHypershiftCluster() { hostedClusterName, err := hypershift.GetHostedClusterName() @@ -638,7 +638,7 @@ var _ = Describe("[rfe_id:49062][workloadHints] Telco friendly workload specific statusErr, _ := err.(*errors.StatusError) return statusErr.Status().Message } - return fmt.Sprint("Profile applied successfully") + return "Profile applied successfully" }, time.Minute, 5*time.Second).Should(ContainSubstring("HighPowerConsumption and PerPodPowerManagement can not be both enabled")) } }) @@ -651,9 +651,9 @@ var _ = Describe("[rfe_id:49062][workloadHints] Telco friendly workload specific checkHardwareCapability(context.TODO(), workerRTNodes) currentWorkloadHints := profile.Spec.WorkloadHints profile.Spec.WorkloadHints = &performancev2.WorkloadHints{ - PerPodPowerManagement: pointer.Bool(true), - HighPowerConsumption: pointer.Bool(false), - RealTime: pointer.Bool(true), + PerPodPowerManagement: ptr.To(true), + HighPowerConsumption: ptr.To(false), + RealTime: ptr.To(true), } if !(cmp.Equal(currentWorkloadHints, profile.Spec.WorkloadHints)) { By("Updating the performance profile") @@ -755,9 +755,9 @@ var _ = Describe("[rfe_id:49062][workloadHints] Telco friendly workload specific checkHardwareCapability(context.TODO(), workerRTNodes) currentWorkloadHints := profile.Spec.WorkloadHints profile.Spec.WorkloadHints = &performancev2.WorkloadHints{ - PerPodPowerManagement: pointer.Bool(false), - HighPowerConsumption: pointer.Bool(true), - RealTime: pointer.Bool(true), + PerPodPowerManagement: ptr.To(false), + HighPowerConsumption: ptr.To(true), + RealTime: ptr.To(true), } if !(cmp.Equal(currentWorkloadHints, profile.Spec.WorkloadHints)) { By("Updating the performance profile") @@ -805,9 +805,12 @@ var _ = Describe("[rfe_id:49062][workloadHints] Telco friendly workload specific Expect(err).ToNot(HaveOccurred()) pid, err := nodes.ContainerPid(context.TODO(), &workerRTNodes[0], containerID) + Expect(err).ToNot(HaveOccurred()) cmd := []string{"cat", fmt.Sprintf("/rootfs/proc/%s/cgroup", pid)} out, err := nodes.ExecCommand(context.TODO(), &workerRTNodes[0], cmd) + Expect(err).ToNot(HaveOccurred()) containerCgroup, err = cgroup.PidParser(out) + Expect(err).ToNot(HaveOccurred()) cgroupv2, err := cgroup.IsVersion2(context.TODO(), testclient.DataPlaneClient) Expect(err).ToNot(HaveOccurred()) if cgroupv2 { @@ -823,12 +826,14 @@ var _ = Describe("[rfe_id:49062][workloadHints] Telco friendly workload specific Expect(err).ToNot(HaveOccurred()) output := testutils.ToString(out) cpus, err := cpuset.Parse(output) + Expect(err).ToNot(HaveOccurred()) targetCpus := cpus.List() err = checkCpuGovernorsAndResumeLatency(context.TODO(), targetCpus, &workerRTNodes[0], "n/a", "performance") Expect(err).ToNot(HaveOccurred()) By("Verify the rest of cpus have default power setting") var otherCpus []int numaInfo, err := nodes.GetNumaNodes(context.TODO(), &workerRTNodes[0]) + Expect(err).ToNot(HaveOccurred()) for _, cpusiblings := range numaInfo { for _, cpu := range cpusiblings { if cpu != targetCpus[0] && cpu != targetCpus[1] { @@ -838,10 +843,12 @@ var _ = Describe("[rfe_id:49062][workloadHints] Telco friendly workload specific } //Verify cpus not assigned to the pod have default power settings err = checkCpuGovernorsAndResumeLatency(context.TODO(), otherCpus, &workerRTNodes[0], "0", "performance") + Expect(err).ToNot(HaveOccurred()) deleteTestPod(context.TODO(), testpod) //Test after pod is deleted the governors are set back to default for the cpus that were alloted to containers. By("Verify after pod is delete cpus assigned to container have default powersave settings") err = checkCpuGovernorsAndResumeLatency(context.TODO(), targetCpus, &workerRTNodes[0], "0", "performance") + Expect(err).ToNot(HaveOccurred()) }) }) @@ -940,7 +947,7 @@ func checkHardwareCapability(ctx context.Context, workerRTNodes []corev1.Node) { numaInfo, err := nodes.GetNumaNodes(ctx, &node) Expect(err).ToNot(HaveOccurred()) if len(numaInfo) < 2 { - Skip(fmt.Sprintf("This test need 2 NUMA nodes.The number of NUMA nodes on node %s < 2", node.Name)) + Skip(fmt.Sprintf("This test need 2 NUMA nodes. The number of NUMA nodes on node %s < 2", node.Name)) } // Additional check so that test gets skipped on vm with fake numa out, err := nodes.ExecCommand(ctx, &node, []string{"nproc", "--all"}) diff --git a/test/e2e/performanceprofile/functests/9_reboot/devices.go b/test/e2e/performanceprofile/functests/9_reboot/devices.go index be502878f3..5534e91e91 100644 --- a/test/e2e/performanceprofile/functests/9_reboot/devices.go +++ b/test/e2e/performanceprofile/functests/9_reboot/devices.go @@ -123,7 +123,7 @@ var _ = Describe("[disruptive][node][kubelet][devicemanager] Device management t // Power loss scenarios, aka hard reboot, deferred to another test. // intentionally ignoring error. We need to tolerate connection error or disconnect // because the node is rebooting. - runCommandOnNodeThroughMCD(context.TODO(), node, "reboot", rebootNodeCommandMCD) + Expect(runCommandOnNodeThroughMCD(context.TODO(), node, "reboot", rebootNodeCommandMCD)).To(Succeed()) // this is (likely) a SNO. We need to tolerate connection errors, // because the apiserver is going down as well. // we intentionally use a generous timeout. @@ -201,7 +201,7 @@ var _ = Describe("[disruptive][node][kubelet][devicemanager] Device management t testlog.Infof("pod %q %s/%s ready", podUID, updatedPod.Namespace, updatedPod.Name) // phase3: the kubelet restart - runCommandOnNodeThroughMCD(context.TODO(), node, "kubelet restart", kubeletRestartCommandMCD) + Expect(runCommandOnNodeThroughMCD(context.TODO(), node, "kubelet restart", kubeletRestartCommandMCD)).To(Succeed()) waitForNodeReadyOrFail("post restart", targetNode, 20*time.Minute, 3*time.Second) diff --git a/test/e2e/performanceprofile/functests/9_reboot/test_suite_reboot_test.go b/test/e2e/performanceprofile/functests/9_reboot/test_suite_reboot_test.go index ec686e6ce8..9fd47a44e8 100644 --- a/test/e2e/performanceprofile/functests/9_reboot/test_suite_reboot_test.go +++ b/test/e2e/performanceprofile/functests/9_reboot/test_suite_reboot_test.go @@ -43,7 +43,8 @@ var _ = AfterSuite(func() { err := testclient.Client.Delete(context.TODO(), namespaces.TestingNamespace) Expect(err).ToNot(HaveOccurred()) err = namespaces.WaitForDeletion(testutils.NamespaceTesting, 5*time.Minute) - nodeinspector.Delete(context.TODO()) + Expect(err).ToNot(HaveOccurred()) + Expect(nodeinspector.Delete(context.TODO())).To(Succeed()) }) func TestReboot(t *testing.T) { diff --git a/test/e2e/performanceprofile/functests/utils/cgroup/v2/v2.go b/test/e2e/performanceprofile/functests/utils/cgroup/v2/v2.go index f7c05a3dcb..77dc28064f 100644 --- a/test/e2e/performanceprofile/functests/utils/cgroup/v2/v2.go +++ b/test/e2e/performanceprofile/functests/utils/cgroup/v2/v2.go @@ -58,7 +58,7 @@ func (cm *ControllersManager) Cpu(ctx context.Context, pod *corev1.Pod, containe cfg.Quota = quotaAndPeriod[0] cfg.Period = quotaAndPeriod[1] cfg.Stat, err = stat(cm.k8sClient, pod, containerName, childName) - return cfg, nil + return cfg, err } // stat fetch cpu.stat values diff --git a/test/e2e/performanceprofile/functests/utils/consts.go b/test/e2e/performanceprofile/functests/utils/consts.go index a2e7dddce6..f91cafd937 100644 --- a/test/e2e/performanceprofile/functests/utils/consts.go +++ b/test/e2e/performanceprofile/functests/utils/consts.go @@ -13,7 +13,7 @@ import ( // RoleWorkerCNF contains role name of cnf worker nodes var RoleWorkerCNF string -// NodeSelectorLabels contains the node labels the perfomance profile should match +// NodeSelectorLabels contains the node labels the performance profile should match var NodeSelectorLabels map[string]string // PerformanceProfileName contains the name of the PerformanceProfile created for tests diff --git a/test/e2e/performanceprofile/functests/utils/infrastructure/vm.go b/test/e2e/performanceprofile/functests/utils/infrastructure/vm.go index 6d6cc4169f..47064eadc6 100644 --- a/test/e2e/performanceprofile/functests/utils/infrastructure/vm.go +++ b/test/e2e/performanceprofile/functests/utils/infrastructure/vm.go @@ -23,7 +23,7 @@ func IsVM(ctx context.Context, node *corev1.Node) (bool, error) { } statusCode := strings.TrimSpace(string(output)) - isVM := string(statusCode) == "0" + isVM := statusCode == "0" return isVM, nil } diff --git a/test/e2e/performanceprofile/functests/utils/mcps/mcps.go b/test/e2e/performanceprofile/functests/utils/mcps/mcps.go index 3255bae52e..be83024ca2 100644 --- a/test/e2e/performanceprofile/functests/utils/mcps/mcps.go +++ b/test/e2e/performanceprofile/functests/utils/mcps/mcps.go @@ -158,7 +158,6 @@ func GetConditionReason(mcpName string, conditionType machineconfigv1.MachineCon // WaitForCondition waits for the MCP with given name having a condition of given type with given status func WaitForCondition(mcpName string, conditionType machineconfigv1.MachineConfigPoolConditionType, conditionStatus corev1.ConditionStatus) { - var cnfNodes []corev1.Node runningOnSingleNode, err := cluster.IsSingleNode() ExpectWithOffset(1, err).ToNot(HaveOccurred()) @@ -201,7 +200,6 @@ func WaitForCondition(mcpName string, conditionType machineconfigv1.MachineConfi // WaitForCondition waits for the MCP with given name having a condition of given type with given status using the given helper function func WaitForConditionFunc(mcpName string, conditionType machineconfigv1.MachineConfigPoolConditionType, conditionStatus corev1.ConditionStatus, mcpCondGetter func(mcpName string, conditionType machineconfigv1.MachineConfigPoolConditionType) corev1.ConditionStatus) { - var cnfNodes []corev1.Node runningOnSingleNode, err := cluster.IsSingleNode() ExpectWithOffset(1, err).ToNot(HaveOccurred()) diff --git a/test/e2e/performanceprofile/functests/utils/node_inspector/inspector.go b/test/e2e/performanceprofile/functests/utils/node_inspector/inspector.go index 975041006f..8b4351a772 100644 --- a/test/e2e/performanceprofile/functests/utils/node_inspector/inspector.go +++ b/test/e2e/performanceprofile/functests/utils/node_inspector/inspector.go @@ -13,7 +13,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" testutils "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils" @@ -197,7 +197,7 @@ func createDaemonSet(name, namespace, serviceAccountName, image string) *appsv1. HostPID: true, HostNetwork: true, ServiceAccountName: serviceAccountName, - TerminationGracePeriodSeconds: pointer.Int64(0), + TerminationGracePeriodSeconds: ptr.To(int64(0)), NodeSelector: map[string]string{"kubernetes.io/os": "linux"}, Containers: []corev1.Container{ { @@ -212,8 +212,8 @@ func createDaemonSet(name, namespace, serviceAccountName, image string) *appsv1. }, }, SecurityContext: &corev1.SecurityContext{ - Privileged: pointer.Bool(true), - ReadOnlyRootFilesystem: pointer.Bool(true), + Privileged: ptr.To(true), + ReadOnlyRootFilesystem: ptr.To(true), }, VolumeMounts: []corev1.VolumeMount{ { diff --git a/test/e2e/performanceprofile/functests/utils/nodes/nodes.go b/test/e2e/performanceprofile/functests/utils/nodes/nodes.go index 53f63d1508..629a3a6c48 100644 --- a/test/e2e/performanceprofile/functests/utils/nodes/nodes.go +++ b/test/e2e/performanceprofile/functests/utils/nodes/nodes.go @@ -125,7 +125,7 @@ func GetByName(nodeName string) (*corev1.Node, error) { return node, nil } -// GetNonPerformancesWorkers returns list of nodes with non matching perfomance profile labels +// GetNonPerformancesWorkers returns list of nodes with non matching performance profile labels func GetNonPerformancesWorkers(nodeSelectorLabels map[string]string) ([]corev1.Node, error) { nonPerformanceWorkerNodes := []corev1.Node{} workerNodes, err := GetByRole(testutils.RoleWorker) @@ -268,7 +268,7 @@ func GetSMTLevel(ctx context.Context, cpuID int, node *corev1.Node) int { threadSiblingsList := testutils.ToString(out) // how many thread sibling you have = SMT level // example: 2-way SMT means 2 threads sibling for each thread - cpus, err := cpuset.Parse(strings.TrimSpace(string(threadSiblingsList))) + cpus, err := cpuset.Parse(strings.TrimSpace(threadSiblingsList)) ExpectWithOffset(1, err).ToNot(HaveOccurred()) return cpus.Size() } @@ -302,12 +302,15 @@ func GetNumaNodes(ctx context.Context, node *corev1.Node) (map[int][]int, error) // GetCoreSiblings returns the siblings of core per numa node func GetCoreSiblings(ctx context.Context, node *corev1.Node) (map[int]map[int][]int, error) { + coreSiblings := make(map[int]map[int][]int) lscpuCmd := []string{"lscpu", "-e=node,core,cpu", "-J"} output, err := ExecCommand(ctx, node, lscpuCmd) + if err != nil { + return coreSiblings, err + } out := testutils.ToString(output) var result NumaNodes var numaNode, core, cpu int - coreSiblings := make(map[int]map[int][]int) err = json.Unmarshal([]byte(out), &result) if err != nil { return nil, err @@ -332,7 +335,6 @@ func GetCoreSiblings(ctx context.Context, node *corev1.Node) (map[int]map[int][] // TunedForNode find tuned pod for appropriate node func TunedForNode(node *corev1.Node, sno bool) *corev1.Pod { - listOptions := &client.ListOptions{ Namespace: components.NamespaceNodeTuningOperator, FieldSelector: fields.SelectorFromSet(fields.Set{"spec.nodeName": node.Name}), @@ -349,12 +351,11 @@ func TunedForNode(node *corev1.Node, sno bool) *corev1.Pod { return false } for _, s := range tunedList.Items[0].Status.ContainerStatuses { - if s.Ready == false { + if !s.Ready { return false } } return true - }, cluster.ComputeTestTimeout(testTimeout*time.Second, sno), testPollInterval*time.Second).Should(BeTrue(), "there should be one tuned daemon per node") @@ -438,7 +439,7 @@ func GetNumaRanges(cpuString string) string { // Get Node Ethernet/Virtual Interfaces func GetNodeInterfaces(ctx context.Context, node corev1.Node) ([]NodeInterface, error) { var nodeInterfaces []NodeInterface - listNetworkInterfacesCmd := []string{"/bin/sh", "-c", fmt.Sprintf("ls -l /sys/class/net")} + listNetworkInterfacesCmd := []string{"/bin/sh", "-c", "ls -l /sys/class/net"} networkInterfaces, err := ExecCommand(ctx, &node, listNetworkInterfacesCmd) if err != nil { return nil, err @@ -532,6 +533,9 @@ func CpuManagerCpuSet(ctx context.Context, node *corev1.Node) (cpuset.CPUSet, er var stateData CpuManagerStateInfo cmd := []string{"/usr/sbin/chroot", "/rootfs", "cat", stateFilePath} data, err := ExecCommand(ctx, node, cmd) + if err != nil { + return cpuset.New(), err + } err = json.Unmarshal(data, &stateData) if err != nil { return cpuset.New(), err diff --git a/test/e2e/performanceprofile/functests/utils/profilesupdate/profile_update.go b/test/e2e/performanceprofile/functests/utils/profilesupdate/profile_update.go index 36c8b185a7..3f5efd3a1e 100644 --- a/test/e2e/performanceprofile/functests/utils/profilesupdate/profile_update.go +++ b/test/e2e/performanceprofile/functests/utils/profilesupdate/profile_update.go @@ -25,7 +25,7 @@ import ( "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/profiles" ) -// UpdateIsolatedReservedCpus Updates the current performance profile with new sets of isolated and reserved cpus, and returns true if the update was successfull and false otherwise +// UpdateIsolatedReservedCpus Updates the current performance profile with new sets of isolated and reserved cpus, and returns true if the update was successful and false otherwise func UpdateIsolatedReservedCpus(isolatedSet performancev2.CPUSet, reservedSet performancev2.CPUSet) error { profile, err := profiles.GetByNodeLabels(testutils.NodeSelectorLabels) if err != nil { diff --git a/test/e2e/reboots/kernel_parameter_add_rm.go b/test/e2e/reboots/kernel_parameter_add_rm.go index 405b06c4e8..abd0f00ffb 100644 --- a/test/e2e/reboots/kernel_parameter_add_rm.go +++ b/test/e2e/reboots/kernel_parameter_add_rm.go @@ -33,13 +33,15 @@ var _ = ginkgo.Describe("[reboots][kernel_parameter_add_rm] Node Tuning Operator ginkgo.AfterEach(func() { // This cleanup code ignores issues outlined in rhbz#1816239; // this can cause a degraded MachineConfigPool + + // Ignore failures to cleanup resources which are already deleted or not yet created. ginkgo.By("cluster changes rollback") if node != nil { - util.ExecAndLogCommand("oc", "label", "node", "--overwrite", node.Name, nodeLabelRealtime+"-") + _, _, _ = util.ExecAndLogCommand("oc", "label", "node", "--overwrite", node.Name, nodeLabelRealtime+"-") } - util.ExecAndLogCommand("oc", "delete", "-n", ntoconfig.WatchNamespace(), "-f", profileParent) - util.ExecAndLogCommand("oc", "delete", "-n", ntoconfig.WatchNamespace(), "-f", profileChild) - util.ExecAndLogCommand("oc", "delete", "-f", mcpRealtime) + _, _, _ = util.ExecAndLogCommand("oc", "delete", "-n", ntoconfig.WatchNamespace(), "-f", profileParent) + _, _, _ = util.ExecAndLogCommand("oc", "delete", "-n", ntoconfig.WatchNamespace(), "-f", profileChild) + _, _, _ = util.ExecAndLogCommand("oc", "delete", "-f", mcpRealtime) }) ginkgo.It("kernel parameters set", func() { @@ -104,6 +106,7 @@ var _ = ginkgo.Describe("[reboots][kernel_parameter_add_rm] Node Tuning Operator ginkgo.By(fmt.Sprintf("getting the current %s value in Pod %s", procCmdline, pod.Name)) cmdlineNew, err = util.WaitForCmdInPod(pollInterval, waitDuration, pod, cmdCatCmdline...) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) util.Logf("%s has %s: %s", pod.Name, procCmdline, cmdlineNew) ginkgo.By("ensuring the custom worker child profile was set") diff --git a/test/e2e/reboots/operator_test.go b/test/e2e/reboots/operator_test.go index 7c715f23d9..de4e98a5dc 100644 --- a/test/e2e/reboots/operator_test.go +++ b/test/e2e/reboots/operator_test.go @@ -24,8 +24,9 @@ func TestNodeTuningOperator(t *testing.T) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) nodeCount, err := util.GetClusterNodes(cs) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) if nodeCount == 1 && controlPlaneTopology == configv1.SingleReplicaTopologyMode { - // This looks like an SNO cluster. For the "reboots" tests to work, "master" MCP needs to be targetted. + // This looks like an SNO cluster. For the "reboots" tests to work, "master" MCP needs to be targeted. util.Logf("seeing only %d node and control plane topology is %v, skipping test suite", nodeCount, controlPlaneTopology) return } diff --git a/test/e2e/reboots/sno/kernel_parameter_add_rm.go b/test/e2e/reboots/sno/kernel_parameter_add_rm.go index d9c37ec052..294664d98f 100644 --- a/test/e2e/reboots/sno/kernel_parameter_add_rm.go +++ b/test/e2e/reboots/sno/kernel_parameter_add_rm.go @@ -30,9 +30,11 @@ var _ = ginkgo.Describe("[reboots][kernel_parameter_add_rm] Node Tuning Operator // Cleanup code to roll back cluster changes done by this test even if it fails in the middle of ginkgo.It() ginkgo.AfterEach(func() { // The cleanup will not work during the time API server is unavailable, e.g. during SNO reboot. + + // Ignore failures to cleanup resources which are already deleted or not yet created. ginkgo.By("cluster changes rollback") - util.ExecAndLogCommand("oc", "delete", "-n", ntoconfig.WatchNamespace(), "-f", profileParent) - util.ExecAndLogCommand("oc", "delete", "-n", ntoconfig.WatchNamespace(), "-f", profileChild) + _, _, _ = util.ExecAndLogCommand("oc", "delete", "-n", ntoconfig.WatchNamespace(), "-f", profileParent) + _, _, _ = util.ExecAndLogCommand("oc", "delete", "-n", ntoconfig.WatchNamespace(), "-f", profileChild) }) ginkgo.It("kernel parameters set", func() { @@ -89,6 +91,7 @@ var _ = ginkgo.Describe("[reboots][kernel_parameter_add_rm] Node Tuning Operator ginkgo.By(fmt.Sprintf("getting the current %s value in Pod %s", procCmdline, pod.Name)) cmdlineNew, err = util.WaitForCmdInPod(pollInterval, waitDuration, pod, cmdCatCmdline...) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) util.Logf("%s has %s: %s", pod.Name, procCmdline, cmdlineNew) ginkgo.By("ensuring the custom master child profile was set") diff --git a/test/e2e/reboots/sno/operator_test.go b/test/e2e/reboots/sno/operator_test.go index c7977c8938..0705cf0849 100644 --- a/test/e2e/reboots/sno/operator_test.go +++ b/test/e2e/reboots/sno/operator_test.go @@ -24,6 +24,7 @@ func TestNodeTuningOperator(t *testing.T) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) nodeCount, err := util.GetClusterNodes(cs) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) if nodeCount != 1 || controlPlaneTopology != configv1.SingleReplicaTopologyMode { // This does not seem to be an SNO cluster. util.Logf("the cluster does not seem to be an SNO cluster, skipping test suite") diff --git a/test/e2e/reboots/sno/stalld.go b/test/e2e/reboots/sno/stalld.go index f5ad054842..28edec5a5a 100644 --- a/test/e2e/reboots/sno/stalld.go +++ b/test/e2e/reboots/sno/stalld.go @@ -28,9 +28,11 @@ var _ = ginkgo.Describe("[reboots][stalld] Node Tuning Operator installing syste // Cleanup code to roll back cluster changes done by this test even if it fails in the middle of ginkgo.It() ginkgo.AfterEach(func() { // The cleanup will not work during the time API server is unavailable, e.g. during SNO reboot. + + // Ignore failures to cleanup resources which are already deleted or not yet created. ginkgo.By("cluster changes rollback") - util.ExecAndLogCommand("oc", "delete", "-n", ntoconfig.WatchNamespace(), "-f", profileStalldOff) - util.ExecAndLogCommand("oc", "delete", "-n", ntoconfig.WatchNamespace(), "-f", profileStalldOn) + _, _, _ = util.ExecAndLogCommand("oc", "delete", "-n", ntoconfig.WatchNamespace(), "-f", profileStalldOff) + _, _, _ = util.ExecAndLogCommand("oc", "delete", "-n", ntoconfig.WatchNamespace(), "-f", profileStalldOn) }) ginkgo.It("stalld process started/stopped", func() { diff --git a/test/e2e/reboots/stalld.go b/test/e2e/reboots/stalld.go index 267659dc1b..65b7fad2b7 100644 --- a/test/e2e/reboots/stalld.go +++ b/test/e2e/reboots/stalld.go @@ -31,13 +31,15 @@ var _ = ginkgo.Describe("[reboots][stalld] Node Tuning Operator installing syste ginkgo.AfterEach(func() { // This cleanup code ignores issues outlined in rhbz#1816239; // this can cause a degraded MachineConfigPool + + // Ignore failures to cleanup resources which are already deleted or not yet created. ginkgo.By("cluster changes rollback") if node != nil { - util.ExecAndLogCommand("oc", "label", "node", "--overwrite", node.Name, nodeLabelRealtime+"-") + _, _, _ = util.ExecAndLogCommand("oc", "label", "node", "--overwrite", node.Name, nodeLabelRealtime+"-") } - util.ExecAndLogCommand("oc", "delete", "-n", ntoconfig.WatchNamespace(), "-f", profileStalldOff) - util.ExecAndLogCommand("oc", "delete", "-n", ntoconfig.WatchNamespace(), "-f", profileStalldOn) - util.ExecAndLogCommand("oc", "delete", "-f", mcpRealtime) + _, _, _ = util.ExecAndLogCommand("oc", "delete", "-n", ntoconfig.WatchNamespace(), "-f", profileStalldOff) + _, _, _ = util.ExecAndLogCommand("oc", "delete", "-n", ntoconfig.WatchNamespace(), "-f", profileStalldOn) + _, _, _ = util.ExecAndLogCommand("oc", "delete", "-f", mcpRealtime) }) ginkgo.It("stalld process started/stopped", func() { diff --git a/test/e2e/util/util.go b/test/e2e/util/util.go index 36e85d18d8..e640572138 100644 --- a/test/e2e/util/util.go +++ b/test/e2e/util/util.go @@ -53,10 +53,9 @@ func GetCurrentDirPath() (string, error) { } // Logf formats using the default formats for its operands and writes to -// ginkgo.GinkgoWriter and a newline is appended. It returns the number of -// bytes written and any write error encountered. -func Logf(format string, args ...interface{}) (n int, err error) { - return fmt.Fprintf(ginkgo.GinkgoWriter, format+"\n", args...) +// ginkgo.GinkgoWriter and a newline is appended. +func Logf(format string, args ...interface{}) { + fmt.Fprintf(ginkgo.GinkgoWriter, format+"\n", args...) } // getNodes returns a list of nodes that match the labelSelector.