diff --git a/PROJECT b/PROJECT index 54f37f051..b0f6ff4f5 100644 --- a/PROJECT +++ b/PROJECT @@ -20,6 +20,7 @@ resources: version: v1beta2 webhooks: conversion: true + validation: true webhookVersion: v1 - controller: true group: core diff --git a/apis/flowcollector/v1beta2/flowcollector_validation_webhook.go b/apis/flowcollector/v1beta2/flowcollector_validation_webhook.go new file mode 100644 index 000000000..25499a8bb --- /dev/null +++ b/apis/flowcollector/v1beta2/flowcollector_validation_webhook.go @@ -0,0 +1,188 @@ +package v1beta2 + +import ( + "context" + "errors" + "fmt" + "slices" + "strconv" + "strings" + + kerr "k8s.io/apimachinery/pkg/api/errors" + runtime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + "github.com/netobserv/network-observability-operator/pkg/cluster" +) + +var ( + log = logf.Log.WithName("flowcollector-resource") + CurrentClusterInfo *cluster.Info +) + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type +func (r *FlowCollector) ValidateCreate(ctx context.Context, newObj runtime.Object) (admission.Warnings, error) { + log.Info("validate create", "name", r.Name) + fc, ok := newObj.(*FlowCollector) + if !ok { + return nil, kerr.NewBadRequest(fmt.Sprintf("expected a FlowCollector but got a %T", newObj)) + } + return r.validate(ctx, fc) +} + +// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type +func (r *FlowCollector) ValidateUpdate(ctx context.Context, _, newObj runtime.Object) (admission.Warnings, error) { + log.Info("validate update", "name", r.Name) + fc, ok := newObj.(*FlowCollector) + if !ok { + return nil, kerr.NewBadRequest(fmt.Sprintf("expected a FlowCollector but got a %T", newObj)) + } + return r.validate(ctx, fc) +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type +func (r *FlowCollector) ValidateDelete(_ context.Context, _ runtime.Object) (admission.Warnings, error) { + log.Info("validate delete", "name", r.Name) + return nil, nil +} + +func (r *FlowCollector) validate(ctx context.Context, fc *FlowCollector) (admission.Warnings, error) { + var allW admission.Warnings + var allE []error + w, errs := r.validateAgent(ctx, fc) + allW, allE = collect(allW, allE, w, errs) + w = r.warnLogLevels(fc) + allW, allE = collect(allW, allE, w, nil) + return allW, errors.Join(allE...) +} + +func collect(wPool admission.Warnings, errsPool []error, w admission.Warnings, errs []error) (admission.Warnings, []error) { + if len(w) > 0 { + wPool = append(wPool, w...) + } + if len(errs) > 0 { + errsPool = append(errsPool, errs...) + } + return wPool, errsPool +} + +func (r *FlowCollector) warnLogLevels(fc *FlowCollector) admission.Warnings { + var w admission.Warnings + if fc.Spec.Agent.EBPF.LogLevel == "debug" || fc.Spec.Agent.EBPF.LogLevel == "trace" { + w = append(w, fmt.Sprintf("The log level for the eBPF agent is %s, which impacts performance and resource footprint.", fc.Spec.Agent.EBPF.LogLevel)) + } + if fc.Spec.Processor.LogLevel == "debug" || fc.Spec.Processor.LogLevel == "trace" { + w = append(w, fmt.Sprintf("The log level for the processor (flowlogs-pipeline) is %s, which impacts performance and resource footprint.", fc.Spec.Processor.LogLevel)) + } + return w +} + +// nolint:cyclop +func (r *FlowCollector) validateAgent(_ context.Context, fc *FlowCollector) (admission.Warnings, []error) { + var warnings admission.Warnings + if slices.Contains(fc.Spec.Agent.EBPF.Features, NetworkEvents) { + // Make sure required version of ocp is installed + if CurrentClusterInfo != nil && CurrentClusterInfo.IsOpenShift() { + b, err := CurrentClusterInfo.OpenShiftVersionIsAtLeast("4.18.0") + if err != nil { + warnings = append(warnings, fmt.Sprintf("Could not detect OpenShift cluster version: %s", err.Error())) + } else if !b { + warnings = append(warnings, fmt.Sprintf("The NetworkEvents feature requires OpenShift 4.18 or above (version detected: %s)", CurrentClusterInfo.GetOpenShiftVersion())) + } + } else { + warnings = append(warnings, "The NetworkEvents feature is only supported with OpenShift") + } + if !fc.Spec.Agent.EBPF.Privileged { + warnings = append(warnings, "The NetworkEvents feature requires eBPF Agent to run in privileged mode") + } + } + if slices.Contains(fc.Spec.Agent.EBPF.Features, PacketDrop) && !fc.Spec.Agent.EBPF.Privileged { + warnings = append(warnings, "The PacketDrop feature requires eBPF Agent to run in privileged mode") + } + var errs []error + if fc.Spec.Agent.EBPF.FlowFilter != nil && fc.Spec.Agent.EBPF.FlowFilter.Enable != nil && *fc.Spec.Agent.EBPF.FlowFilter.Enable { + hasPorts := fc.Spec.Agent.EBPF.FlowFilter.Ports.IntVal > 0 || fc.Spec.Agent.EBPF.FlowFilter.Ports.StrVal != "" + if hasPorts { + if err := validateFilterPortConfig(fc.Spec.Agent.EBPF.FlowFilter.Ports); err != nil { + errs = append(errs, err) + } + } + hasSrcPorts := fc.Spec.Agent.EBPF.FlowFilter.SourcePorts.IntVal > 0 || fc.Spec.Agent.EBPF.FlowFilter.SourcePorts.StrVal != "" + if hasSrcPorts { + if err := validateFilterPortConfig(fc.Spec.Agent.EBPF.FlowFilter.SourcePorts); err != nil { + errs = append(errs, err) + } + } + hasDstPorts := fc.Spec.Agent.EBPF.FlowFilter.DestPorts.IntVal > 0 || fc.Spec.Agent.EBPF.FlowFilter.DestPorts.StrVal != "" + if hasDstPorts { + if err := validateFilterPortConfig(fc.Spec.Agent.EBPF.FlowFilter.DestPorts); err != nil { + errs = append(errs, err) + } + } + if hasPorts && hasSrcPorts { + errs = append(errs, errors.New("cannot configure agent filter with ports and sourcePorts, they are mutually exclusive")) + } + if hasPorts && hasDstPorts { + errs = append(errs, errors.New("cannot configure agent filter with ports and destPorts, they are mutually exclusive")) + } + } + return warnings, errs +} + +func validateFilterPortConfig(value intstr.IntOrString) error { + if value.Type == intstr.Int { + return nil + } + sVal := value.String() + if strings.Contains(sVal, "-") { + ps := strings.SplitN(sVal, "-", 2) + if len(ps) != 2 { + return fmt.Errorf("invalid ports range: expected two integers separated by '-' but found %s", sVal) + } + start, err := validatePortString(ps[0]) + if err != nil { + return fmt.Errorf("start port in range: %w", err) + } + end, err := validatePortString(ps[1]) + if err != nil { + return fmt.Errorf("end port in range: %w", err) + } + if start >= end { + return fmt.Errorf("invalid port range: start is greater or equal to end") + } + return nil + } else if strings.Contains(sVal, ",") { + ps := strings.Split(sVal, ",") + if len(ps) != 2 { + return fmt.Errorf("invalid ports couple: expected two integers separated by ',' but found %s", sVal) + } + _, err := validatePortString(ps[0]) + if err != nil { + return fmt.Errorf("first port: %w", err) + } + _, err = validatePortString(ps[1]) + if err != nil { + return fmt.Errorf("second port: %w", err) + } + return nil + } + // Should be a single port then + _, err := validatePortString(sVal) + if err != nil { + return err + } + return nil +} + +func validatePortString(s string) (uint16, error) { + p, err := strconv.ParseUint(s, 10, 16) + if err != nil { + return 0, fmt.Errorf("invalid port number %w", err) + } + if p == 0 { + return 0, fmt.Errorf("invalid port 0") + } + return uint16(p), nil +} diff --git a/apis/flowcollector/v1beta2/flowcollector_validation_webhook_test.go b/apis/flowcollector/v1beta2/flowcollector_validation_webhook_test.go new file mode 100644 index 000000000..055df760b --- /dev/null +++ b/apis/flowcollector/v1beta2/flowcollector_validation_webhook_test.go @@ -0,0 +1,265 @@ +package v1beta2 + +import ( + "context" + "testing" + + "github.com/netobserv/network-observability-operator/pkg/cluster" + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +func TestValidateAgent(t *testing.T) { + tests := []struct { + name string + fc *FlowCollector + ocpVersion string + expectedError string + expectedWarnings admission.Warnings + }{ + { + name: "Empty config is valid", + fc: &FlowCollector{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster", + }, + Spec: FlowCollectorSpec{}, + }, + }, + { + name: "Valid configuration", + fc: &FlowCollector{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster", + }, + Spec: FlowCollectorSpec{ + Agent: FlowCollectorAgent{ + Type: AgentEBPF, + EBPF: FlowCollectorEBPF{ + Features: []AgentFeature{DNSTracking, FlowRTT, PacketDrop}, + Privileged: true, + Sampling: ptr.To(int32(100)), + FlowFilter: &EBPFFlowFilter{ + Enable: ptr.To(true), + Action: "Accept", + CIDR: "0.0.0.0/0", + Direction: "Egress", + }, + }, + }, + }, + }, + }, + { + name: "PacketDrop without privilege triggers warning", + fc: &FlowCollector{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster", + }, + Spec: FlowCollectorSpec{ + Agent: FlowCollectorAgent{ + Type: AgentEBPF, + EBPF: FlowCollectorEBPF{ + Features: []AgentFeature{PacketDrop}, + }, + }, + }, + }, + expectedWarnings: admission.Warnings{"The PacketDrop feature requires eBPF Agent to run in privileged mode"}, + }, + { + name: "NetworkEvents on ocp 4.16 triggers warning", + ocpVersion: "4.16.5", + fc: &FlowCollector{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster", + }, + Spec: FlowCollectorSpec{ + Agent: FlowCollectorAgent{ + Type: AgentEBPF, + EBPF: FlowCollectorEBPF{ + Features: []AgentFeature{NetworkEvents}, + Privileged: true, + }, + }, + }, + }, + expectedWarnings: admission.Warnings{"The NetworkEvents feature requires OpenShift 4.18 or above (version detected: 4.16.5)"}, + }, + { + name: "NetworkEvents without privilege triggers warning", + ocpVersion: "4.18.0", + fc: &FlowCollector{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster", + }, + Spec: FlowCollectorSpec{ + Agent: FlowCollectorAgent{ + Type: AgentEBPF, + EBPF: FlowCollectorEBPF{ + Features: []AgentFeature{NetworkEvents}, + }, + }, + }, + }, + expectedWarnings: admission.Warnings{"The NetworkEvents feature requires eBPF Agent to run in privileged mode"}, + }, + { + name: "FlowFilter different ports configs are mutually exclusive", + fc: &FlowCollector{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster", + }, + Spec: FlowCollectorSpec{ + Agent: FlowCollectorAgent{ + Type: AgentEBPF, + EBPF: FlowCollectorEBPF{ + FlowFilter: &EBPFFlowFilter{ + Enable: ptr.To(true), + Action: "Accept", + CIDR: "0.0.0.0/0", + Ports: intstr.FromInt(80), + SourcePorts: intstr.FromInt(443), + }, + }, + }, + }, + }, + expectedError: "cannot configure agent filter with ports and sourcePorts, they are mutually exclusive", + }, + { + name: "FlowFilter expect invalid ports", + fc: &FlowCollector{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster", + }, + Spec: FlowCollectorSpec{ + Agent: FlowCollectorAgent{ + Type: AgentEBPF, + EBPF: FlowCollectorEBPF{ + FlowFilter: &EBPFFlowFilter{ + Enable: ptr.To(true), + Ports: intstr.FromString("abcd"), + }, + }, + }, + }, + }, + expectedError: "invalid port number", + }, + { + name: "FlowFilter expect valid ports range", + fc: &FlowCollector{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster", + }, + Spec: FlowCollectorSpec{ + Agent: FlowCollectorAgent{ + Type: AgentEBPF, + EBPF: FlowCollectorEBPF{ + FlowFilter: &EBPFFlowFilter{ + Enable: ptr.To(true), + Ports: intstr.FromString("80-255"), + }, + }, + }, + }, + }, + }, + { + name: "FlowFilter expect invalid ports range (order)", + fc: &FlowCollector{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster", + }, + Spec: FlowCollectorSpec{ + Agent: FlowCollectorAgent{ + Type: AgentEBPF, + EBPF: FlowCollectorEBPF{ + FlowFilter: &EBPFFlowFilter{ + Enable: ptr.To(true), + Ports: intstr.FromString("255-80"), + }, + }, + }, + }, + }, + expectedError: "start is greater or equal", + }, + { + name: "FlowFilter expect invalid ports range", + fc: &FlowCollector{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster", + }, + Spec: FlowCollectorSpec{ + Agent: FlowCollectorAgent{ + Type: AgentEBPF, + EBPF: FlowCollectorEBPF{ + FlowFilter: &EBPFFlowFilter{ + Enable: ptr.To(true), + Ports: intstr.FromString("80-?"), + }, + }, + }, + }, + }, + expectedError: "invalid port number", + }, + { + name: "FlowFilter expect valid ports couple", + fc: &FlowCollector{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster", + }, + Spec: FlowCollectorSpec{ + Agent: FlowCollectorAgent{ + Type: AgentEBPF, + EBPF: FlowCollectorEBPF{ + FlowFilter: &EBPFFlowFilter{ + Enable: ptr.To(true), + Ports: intstr.FromString("255,80"), + }, + }, + }, + }, + }, + }, + { + name: "FlowFilter expect invalid ports couple", + fc: &FlowCollector{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster", + }, + Spec: FlowCollectorSpec{ + Agent: FlowCollectorAgent{ + Type: AgentEBPF, + EBPF: FlowCollectorEBPF{ + FlowFilter: &EBPFFlowFilter{ + Enable: ptr.To(true), + Ports: intstr.FromString("80,100,250"), + }, + }, + }, + }, + }, + expectedError: "expected two integers", + }, + } + + CurrentClusterInfo = &cluster.Info{} + for _, test := range tests { + CurrentClusterInfo.MockOpenShiftVersion(test.ocpVersion) + warnings, errs := test.fc.validateAgent(context.TODO(), test.fc) + if test.expectedError == "" { + assert.Empty(t, errs, test.name) + } else { + assert.Len(t, errs, 1, test.name) + assert.ErrorContains(t, errs[0], test.expectedError, test.name) + } + assert.Equal(t, test.expectedWarnings, warnings, test.name) + } +} diff --git a/apis/flowcollector/v1beta2/flowcollector_webhook.go b/apis/flowcollector/v1beta2/flowcollector_webhook.go index c3e3a71e8..acbdc14a4 100644 --- a/apis/flowcollector/v1beta2/flowcollector_webhook.go +++ b/apis/flowcollector/v1beta2/flowcollector_webhook.go @@ -16,12 +16,15 @@ limitations under the License. package v1beta2 -import ctrl "sigs.k8s.io/controller-runtime" +import ( + ctrl "sigs.k8s.io/controller-runtime" +) -// +kubebuilder:webhook:verbs=create;update,path=/validate-netobserv-io-v1beta2-flowcollector,mutating=false,failurePolicy=fail,groups=netobserv.io,resources=flowcollectors,versions=v1beta2,name=flowcollectorconversionwebhook.netobserv.io,sideEffects=None,admissionReviewVersions=v1 +// +kubebuilder:webhook:verbs=create;update,path=/validate-flows-netobserv-io-v1beta2-flowcollector,mutating=false,failurePolicy=fail,sideEffects=None,groups=flows.netobserv.io,resources=flowcollectors,versions=v1beta2,name=flowcollectorconversionwebhook.netobserv.io,admissionReviewVersions=v1 func (r *FlowCollector) SetupWebhookWithManager(mgr ctrl.Manager) error { return ctrl.NewWebhookManagedBy(mgr). For(r). + WithValidator(r). Complete() } diff --git a/apis/flowcollector/v1beta2/zz_generated.deepcopy.go b/apis/flowcollector/v1beta2/zz_generated.deepcopy.go index f4dc8658f..8108a9745 100644 --- a/apis/flowcollector/v1beta2/zz_generated.deepcopy.go +++ b/apis/flowcollector/v1beta2/zz_generated.deepcopy.go @@ -24,7 +24,7 @@ import ( "k8s.io/api/autoscaling/v2" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. diff --git a/apis/flowmetrics/v1alpha1/flowmetric_webhook.go b/apis/flowmetrics/v1alpha1/flowmetric_webhook.go index ef0a4f72d..e48aa13d0 100644 --- a/apis/flowmetrics/v1alpha1/flowmetric_webhook.go +++ b/apis/flowmetrics/v1alpha1/flowmetric_webhook.go @@ -43,7 +43,7 @@ func (r *FlowMetricWebhook) ValidateCreate(ctx context.Context, newObj runtime.O if !ok { return nil, apierrors.NewBadRequest(fmt.Sprintf("expected an FlowMetric but got a %T", newObj)) } - return nil, validateFlowMetric(ctx, newFlowMetric) + return validateFlowMetric(ctx, newFlowMetric) } // ValidateUpdate implements webhook.Validator so a webhook will be registered for the type @@ -53,7 +53,7 @@ func (r *FlowMetricWebhook) ValidateUpdate(ctx context.Context, _, newObj runtim if !ok { return nil, apierrors.NewBadRequest(fmt.Sprintf("expected an FlowMetric but got a %T", newObj)) } - return nil, validateFlowMetric(ctx, newFlowMetric) + return validateFlowMetric(ctx, newFlowMetric) } // ValidateDelete implements webhook.Validator so a webhook will be registered for the type @@ -62,18 +62,24 @@ func (r *FlowMetricWebhook) ValidateDelete(_ context.Context, _ runtime.Object) return nil, nil } -func checkFlowMetricCartinality(fMetric *FlowMetric) { +func checkFlowMetricCartinality(fMetric *FlowMetric) admission.Warnings { + w := admission.Warnings{} r, err := cardinality.CheckCardinality(fMetric.Spec.Labels...) if err != nil { flowmetriclog.WithValues("FlowMetric name", fMetric.Name).Error(err, "Could not check metrics cardinality") + w = append(w, "Could not check metrics cardinality") } overallCardinality := r.GetOverall() if overallCardinality == cardinality.WarnAvoid || overallCardinality == cardinality.WarnUnknown { flowmetriclog.WithValues("FlowMetric name", fMetric.Name).Info("Warning: unsafe metric detected with potentially very high cardinality, please check its definition.", "Details", r.GetDetails()) + w = append(w, "This metric looks unsafe, with a potentially very high cardinality: "+r.GetDetails()) + } else if overallCardinality == cardinality.WarnCareful { + w = append(w, "This metric has a potentially high cardinality: "+r.GetDetails()) } + return w } -func validateFlowMetric(_ context.Context, fMetric *FlowMetric) error { +func validateFlowMetric(_ context.Context, fMetric *FlowMetric) (admission.Warnings, error) { var str []string var allErrs field.ErrorList @@ -121,10 +127,10 @@ func validateFlowMetric(_ context.Context, fMetric *FlowMetric) error { } if len(allErrs) != 0 { - return apierrors.NewInvalid( + return nil, apierrors.NewInvalid( schema.GroupKind{Group: GroupVersion.Group, Kind: FlowMetric{}.Kind}, fMetric.Name, allErrs) } - checkFlowMetricCartinality(fMetric) - return nil + w := checkFlowMetricCartinality(fMetric) + return w, nil } diff --git a/apis/flowmetrics/v1alpha1/flowmetric_webhook_test.go b/apis/flowmetrics/v1alpha1/flowmetric_webhook_test.go index f82201556..5c5ca5bba 100644 --- a/apis/flowmetrics/v1alpha1/flowmetric_webhook_test.go +++ b/apis/flowmetrics/v1alpha1/flowmetric_webhook_test.go @@ -108,7 +108,7 @@ func TestFlowMetric(t *testing.T) { } for _, test := range tests { - err := validateFlowMetric(context.TODO(), test.m) + _, err := validateFlowMetric(context.TODO(), test.m) if err == nil { if test.expectedError != "" { t.Errorf("%s: ValidateFlowMetric failed, no error found while expected: \"%s\"", test.desc, test.expectedError) diff --git a/bundle/manifests/netobserv-operator.clusterserviceversion.yaml b/bundle/manifests/netobserv-operator.clusterserviceversion.yaml index 26946a19a..c3da3b300 100644 --- a/bundle/manifests/netobserv-operator.clusterserviceversion.yaml +++ b/bundle/manifests/netobserv-operator.clusterserviceversion.yaml @@ -1373,7 +1373,7 @@ spec: generateName: flowcollectorconversionwebhook.netobserv.io rules: - apiGroups: - - netobserv.io + - flows.netobserv.io apiVersions: - v1beta2 operations: @@ -1384,7 +1384,7 @@ spec: sideEffects: None targetPort: 9443 type: ValidatingAdmissionWebhook - webhookPath: /validate-netobserv-io-v1beta2-flowcollector + webhookPath: /validate-flows-netobserv-io-v1beta2-flowcollector - admissionReviewVersions: - v1 containerPort: 443 diff --git a/config/webhook/manifests.yaml b/config/webhook/manifests.yaml index cb7d2fa2f..a7eb919d9 100644 --- a/config/webhook/manifests.yaml +++ b/config/webhook/manifests.yaml @@ -12,12 +12,12 @@ webhooks: service: name: webhook-service namespace: system - path: /validate-netobserv-io-v1beta2-flowcollector + path: /validate-flows-netobserv-io-v1beta2-flowcollector failurePolicy: Fail name: flowcollectorconversionwebhook.netobserv.io rules: - apiGroups: - - netobserv.io + - flows.netobserv.io apiVersions: - v1beta2 operations: diff --git a/controllers/consoleplugin/consoleplugin_objects.go b/controllers/consoleplugin/consoleplugin_objects.go index 578a31582..e405c62f1 100644 --- a/controllers/consoleplugin/consoleplugin_objects.go +++ b/controllers/consoleplugin/consoleplugin_objects.go @@ -384,7 +384,7 @@ func (b *builder) getPromConfig(ctx context.Context) cfg.PrometheusConfig { config.Timeout = api.Duration{Duration: b.desired.Prometheus.Querier.Timeout.Duration} } if b.desired.Prometheus.Querier.Mode == "" || b.desired.Prometheus.Querier.Mode == flowslatest.PromModeAuto { - if b.info.UseOpenShiftSCC /* aka IsOpenShift */ { + if b.info.ClusterInfo.IsOpenShift() { config.URL = "https://thanos-querier.openshift-monitoring.svc:9091/" // requires cluster-monitoringv-view cluster role config.DevURL = "https://thanos-querier.openshift-monitoring.svc:9092/" // restricted to a particular namespace config.ForwardUserToken = true diff --git a/controllers/consoleplugin/consoleplugin_reconciler.go b/controllers/consoleplugin/consoleplugin_reconciler.go index 0dcd2e0bb..6f8122521 100644 --- a/controllers/consoleplugin/consoleplugin_reconciler.go +++ b/controllers/consoleplugin/consoleplugin_reconciler.go @@ -45,7 +45,7 @@ func NewReconciler(cmn *reconcilers.Instance) CPReconciler { serviceAccount: cmn.Managed.NewServiceAccount(constants.PluginName), configMap: cmn.Managed.NewConfigMap(configMapName), } - if cmn.AvailableAPIs.HasSvcMonitor() { + if cmn.ClusterInfo.HasSvcMonitor() { rec.serviceMonitor = cmn.Managed.NewServiceMonitor(constants.PluginName) } return rec @@ -67,13 +67,13 @@ func (r *CPReconciler) Reconcile(ctx context.Context, desired *flowslatest.FlowC return err } - if r.AvailableAPIs.HasConsolePlugin() { + if r.ClusterInfo.HasConsolePlugin() { if err = r.checkAutoPatch(ctx, desired); err != nil { return err } } - if helper.UseConsolePlugin(&desired.Spec) && (r.AvailableAPIs.HasConsolePlugin() || helper.UseTestConsolePlugin(&desired.Spec)) { + if helper.UseConsolePlugin(&desired.Spec) && (r.ClusterInfo.HasConsolePlugin() || helper.UseTestConsolePlugin(&desired.Spec)) { // Create object builder builder := newBuilder(r.Instance, &desired.Spec) @@ -81,7 +81,7 @@ func (r *CPReconciler) Reconcile(ctx context.Context, desired *flowslatest.FlowC return err } - if r.AvailableAPIs.HasConsolePlugin() { + if r.ClusterInfo.HasConsolePlugin() { if err = r.reconcilePlugin(ctx, &builder, &desired.Spec); err != nil { return err } @@ -225,7 +225,7 @@ func (r *CPReconciler) reconcileServices(ctx context.Context, builder *builder) if err := r.ReconcileService(ctx, r.metricsService, builder.metricsService(), &report); err != nil { return err } - if r.AvailableAPIs.HasSvcMonitor() { + if r.ClusterInfo.HasSvcMonitor() { serviceMonitor := builder.serviceMonitor() if err := reconcilers.GenericReconcile(ctx, r.Managed, &r.Client, r.serviceMonitor, serviceMonitor, &report, helper.ServiceMonitorChanged); err != nil { return err diff --git a/controllers/consoleplugin/consoleplugin_test.go b/controllers/consoleplugin/consoleplugin_test.go index 02aebe36a..2c463ac71 100644 --- a/controllers/consoleplugin/consoleplugin_test.go +++ b/controllers/consoleplugin/consoleplugin_test.go @@ -19,6 +19,7 @@ import ( config "github.com/netobserv/network-observability-operator/controllers/consoleplugin/config" "github.com/netobserv/network-observability-operator/controllers/constants" "github.com/netobserv/network-observability-operator/controllers/reconcilers" + "github.com/netobserv/network-observability-operator/pkg/cluster" "github.com/netobserv/network-observability-operator/pkg/helper" "github.com/netobserv/network-observability-operator/pkg/manager/status" ) @@ -109,7 +110,7 @@ func getAutoScalerSpecs() (ascv2.HorizontalPodAutoscaler, flowslatest.FlowCollec } func getBuilder(spec *flowslatest.FlowCollectorSpec, lk *helper.LokiConfig) builder { - info := reconcilers.Common{Namespace: testNamespace, Loki: lk} + info := reconcilers.Common{Namespace: testNamespace, Loki: lk, ClusterInfo: &cluster.Info{}} b := newBuilder(info.NewInstance(testImage, status.Instance{}), spec) _, _, _ = b.configMap(context.Background()) // build configmap to update builder's volumes return b diff --git a/controllers/ebpf/agent_controller.go b/controllers/ebpf/agent_controller.go index f83d84600..d5556b31e 100644 --- a/controllers/ebpf/agent_controller.go +++ b/controllers/ebpf/agent_controller.go @@ -132,10 +132,10 @@ func NewAgentController(common *reconcilers.Instance) *AgentController { permissions: permissions.NewReconciler(common), promSvc: common.Managed.NewService(constants.EBPFAgentMetricsSvcName), } - if common.AvailableAPIs.HasSvcMonitor() { + if common.ClusterInfo.HasSvcMonitor() { agent.serviceMonitor = common.Managed.NewServiceMonitor(constants.EBPFAgentMetricsSvcMonitoringName) } - if common.AvailableAPIs.HasPromRule() { + if common.ClusterInfo.HasPromRule() { agent.prometheusRule = common.Managed.NewPrometheusRule(constants.EBPFAgentPromoAlertRule) } return &agent diff --git a/controllers/ebpf/agent_metrics.go b/controllers/ebpf/agent_metrics.go index 60575cff6..3a39ed68e 100644 --- a/controllers/ebpf/agent_metrics.go +++ b/controllers/ebpf/agent_metrics.go @@ -21,10 +21,10 @@ func (c *AgentController) reconcileMetricsService(ctx context.Context, target *f if !helper.IsEBPFMetricsEnabled(target) { c.Managed.TryDelete(ctx, c.promSvc) - if c.AvailableAPIs.HasSvcMonitor() { + if c.ClusterInfo.HasSvcMonitor() { c.Managed.TryDelete(ctx, c.serviceMonitor) } - if c.AvailableAPIs.HasPromRule() { + if c.ClusterInfo.HasPromRule() { c.Managed.TryDelete(ctx, c.prometheusRule) } return nil @@ -33,7 +33,7 @@ func (c *AgentController) reconcileMetricsService(ctx context.Context, target *f if err := c.ReconcileService(ctx, c.promSvc, c.promService(target), &report); err != nil { return err } - if c.AvailableAPIs.HasSvcMonitor() { + if c.ClusterInfo.HasSvcMonitor() { serviceMonitor := c.promServiceMonitoring(target) if err := reconcilers.GenericReconcile(ctx, c.Managed, &c.Client, c.serviceMonitor, serviceMonitor, &report, helper.ServiceMonitorChanged); err != nil { @@ -41,7 +41,7 @@ func (c *AgentController) reconcileMetricsService(ctx context.Context, target *f } } - if c.AvailableAPIs.HasPromRule() { + if c.ClusterInfo.HasPromRule() { promRules := c.agentPrometheusRule(target) if err := reconcilers.GenericReconcile(ctx, c.Managed, &c.Client, c.prometheusRule, promRules, &report, helper.PrometheusRuleChanged); err != nil { return err diff --git a/controllers/ebpf/internal/permissions/permissions.go b/controllers/ebpf/internal/permissions/permissions.go index 11ef569af..542669190 100644 --- a/controllers/ebpf/internal/permissions/permissions.go +++ b/controllers/ebpf/internal/permissions/permissions.go @@ -128,7 +128,7 @@ func (c *Reconciler) reconcileServiceAccount(ctx context.Context) error { func (c *Reconciler) reconcileVendorPermissions( ctx context.Context, desired *flowslatest.FlowCollectorEBPF, ) error { - if c.UseOpenShiftSCC { + if c.ClusterInfo.IsOpenShift() { return c.reconcileOpenshiftPermissions(ctx, desired) } return nil diff --git a/controllers/flowcollector_controller.go b/controllers/flowcollector_controller.go index 3bb4df6b1..d6a8e190b 100644 --- a/controllers/flowcollector_controller.go +++ b/controllers/flowcollector_controller.go @@ -56,15 +56,15 @@ func Start(ctx context.Context, mgr *manager.Manager) error { Owns(&corev1.Service{}). Owns(&corev1.ServiceAccount{}) - if mgr.IsOpenShift() { + if mgr.ClusterInfo.IsOpenShift() { builder.Owns(&securityv1.SecurityContextConstraints{}) } - if mgr.HasConsolePlugin() { + if mgr.ClusterInfo.HasConsolePlugin() { builder.Owns(&osv1.ConsolePlugin{}) } else { log.Info("Console not detected: the console plugin is not available") } - if !mgr.HasCNO() { + if !mgr.ClusterInfo.HasCNO() { log.Info("CNO not detected: using ovnKubernetes config and reconciler") } @@ -181,8 +181,7 @@ func (r *FlowCollectorReconciler) newCommonInfo(clh *helper.Client, ns, prevNs s Client: *clh, Namespace: ns, PreviousNamespace: prevNs, - UseOpenShiftSCC: r.mgr.IsOpenShift(), - AvailableAPIs: &r.mgr.AvailableAPIs, + ClusterInfo: r.mgr.ClusterInfo, Watcher: r.watcher, Loki: loki, IsDownstream: r.mgr.Config.DownstreamDeployment, diff --git a/controllers/flp/flp_common_objects.go b/controllers/flp/flp_common_objects.go index 371cd8d6b..32dbd063f 100644 --- a/controllers/flp/flp_common_objects.go +++ b/controllers/flp/flp_common_objects.go @@ -143,7 +143,7 @@ func (b *builder) NewKafkaPipeline() PipelineBuilder { } func (b *builder) initPipeline(ingest config.PipelineBuilderStage) PipelineBuilder { - pipeline := newPipelineBuilder(b.desired, b.flowMetrics, b.detectedSubnets, b.info.Loki, b.info.ClusterID, &b.volumes, &ingest) + pipeline := newPipelineBuilder(b.desired, b.flowMetrics, b.detectedSubnets, b.info.Loki, b.info.ClusterInfo.ID, &b.volumes, &ingest) b.pipeline = &pipeline return pipeline } diff --git a/controllers/flp/flp_controller.go b/controllers/flp/flp_controller.go index 37a00d876..1fa1f1921 100644 --- a/controllers/flp/flp_controller.go +++ b/controllers/flp/flp_controller.go @@ -34,7 +34,6 @@ type Reconciler struct { mgr *manager.Manager watcher *watchers.Watcher status status.Instance - clusterID string currentNamespace string } @@ -126,19 +125,13 @@ func (r *Reconciler) reconcile(ctx context.Context, clh *helper.Client, fc *flow r.watcher.Reset(ns) // obtain default cluster ID - api is specific to openshift - if r.mgr.IsOpenShift() && r.clusterID == "" { - cversion := &configv1.ClusterVersion{} - key := client.ObjectKey{Name: "version"} - if err := r.Client.Get(ctx, key, cversion); err != nil { - log.Error(err, "unable to obtain cluster ID") - } else { - r.clusterID = string(cversion.Spec.ClusterID) - } + if err := r.mgr.ClusterInfo.CheckClusterInfo(ctx, r.Client); err != nil { + log.Error(err, "unable to obtain cluster ID") } // Auto-detect subnets var subnetLabels []flowslatest.SubnetLabel - if r.mgr.IsOpenShift() && helper.AutoDetectOpenShiftNetworks(&fc.Spec.Processor) { + if r.mgr.ClusterInfo.IsOpenShift() && helper.AutoDetectOpenShiftNetworks(&fc.Spec.Processor) { var err error subnetLabels, err = r.getOpenShiftSubnets(ctx) if err != nil { @@ -190,11 +183,9 @@ func (r *Reconciler) newCommonInfo(clh *helper.Client, ns, prevNs string, loki * Client: *clh, Namespace: ns, PreviousNamespace: prevNs, - UseOpenShiftSCC: r.mgr.IsOpenShift(), - AvailableAPIs: &r.mgr.AvailableAPIs, + ClusterInfo: r.mgr.ClusterInfo, Watcher: r.watcher, Loki: loki, - ClusterID: r.clusterID, IsDownstream: r.mgr.Config.DownstreamDeployment, } } @@ -278,7 +269,7 @@ func (r *Reconciler) getOpenShiftSubnets(ctx context.Context) ([]flowslatest.Sub var subnets []flowslatest.SubnetLabel // Pods and Services subnets are found in CNO config - if r.mgr.HasCNO() { + if r.mgr.ClusterInfo.HasCNO() { network := &configv1.Network{} err := r.Get(ctx, types.NamespacedName{Name: "cluster"}, network) if err != nil { diff --git a/controllers/flp/flp_monolith_objects.go b/controllers/flp/flp_monolith_objects.go index 9a4a415b3..46d34f136 100644 --- a/controllers/flp/flp_monolith_objects.go +++ b/controllers/flp/flp_monolith_objects.go @@ -23,7 +23,7 @@ func newMonolithBuilder(info *reconcilers.Instance, desired *flowslatest.FlowCol } func (b *monolithBuilder) daemonSet(annotations map[string]string) *appsv1.DaemonSet { - pod := b.generic.podTemplate(true /*listens*/, !b.generic.info.UseOpenShiftSCC, annotations) + pod := b.generic.podTemplate(true /*listens*/, !b.generic.info.ClusterInfo.IsOpenShift(), annotations) return &appsv1.DaemonSet{ ObjectMeta: metav1.ObjectMeta{ Name: b.generic.name(), diff --git a/controllers/flp/flp_monolith_reconciler.go b/controllers/flp/flp_monolith_reconciler.go index 44d89b6e3..d7f7e1090 100644 --- a/controllers/flp/flp_monolith_reconciler.go +++ b/controllers/flp/flp_monolith_reconciler.go @@ -43,10 +43,10 @@ func newMonolithReconciler(cmn *reconcilers.Instance) *monolithReconciler { roleBindingIn: cmn.Managed.NewCRB(RoleBindingMonoName(ConfKafkaIngester)), roleBindingTr: cmn.Managed.NewCRB(RoleBindingMonoName(ConfKafkaTransformer)), } - if cmn.AvailableAPIs.HasSvcMonitor() { + if cmn.ClusterInfo.HasSvcMonitor() { rec.serviceMonitor = cmn.Managed.NewServiceMonitor(serviceMonitorName(ConfMonolith)) } - if cmn.AvailableAPIs.HasPromRule() { + if cmn.ClusterInfo.HasPromRule() { rec.prometheusRule = cmn.Managed.NewPrometheusRule(prometheusRuleName(ConfMonolith)) } return &rec @@ -160,13 +160,13 @@ func (r *monolithReconciler) reconcilePrometheusService(ctx context.Context, bui if err := r.ReconcileService(ctx, r.promService, builder.promService(), &report); err != nil { return err } - if r.AvailableAPIs.HasSvcMonitor() { + if r.ClusterInfo.HasSvcMonitor() { serviceMonitor := builder.generic.serviceMonitor() if err := reconcilers.GenericReconcile(ctx, r.Managed, &r.Client, r.serviceMonitor, serviceMonitor, &report, helper.ServiceMonitorChanged); err != nil { return err } } - if r.AvailableAPIs.HasPromRule() { + if r.ClusterInfo.HasPromRule() { promRules := builder.generic.prometheusRule() if err := reconcilers.GenericReconcile(ctx, r.Managed, &r.Client, r.prometheusRule, promRules, &report, helper.PrometheusRuleChanged); err != nil { return err @@ -194,7 +194,7 @@ func (r *monolithReconciler) reconcilePermissions(ctx context.Context, builder * return r.CreateOwned(ctx, builder.serviceAccount()) } // We only configure name, update is not needed for now - cr := buildClusterRoleIngester(r.UseOpenShiftSCC) + cr := buildClusterRoleIngester(r.ClusterInfo.IsOpenShift()) if err := r.ReconcileClusterRole(ctx, cr); err != nil { return err } diff --git a/controllers/flp/flp_test.go b/controllers/flp/flp_test.go index 7cbb3cb33..05b32fbb9 100644 --- a/controllers/flp/flp_test.go +++ b/controllers/flp/flp_test.go @@ -28,6 +28,7 @@ import ( metricslatest "github.com/netobserv/network-observability-operator/apis/flowmetrics/v1alpha1" "github.com/netobserv/network-observability-operator/controllers/constants" "github.com/netobserv/network-observability-operator/controllers/reconcilers" + "github.com/netobserv/network-observability-operator/pkg/cluster" "github.com/netobserv/network-observability-operator/pkg/helper" "github.com/netobserv/network-observability-operator/pkg/manager/status" @@ -170,14 +171,14 @@ func getAutoScalerSpecs() (ascv2.HorizontalPodAutoscaler, flowslatest.FlowCollec func monoBuilder(ns string, cfg *flowslatest.FlowCollectorSpec) monolithBuilder { loki := helper.NewLokiConfig(&cfg.Loki, "any") - info := reconcilers.Common{Namespace: ns, Loki: &loki} + info := reconcilers.Common{Namespace: ns, Loki: &loki, ClusterInfo: &cluster.Info{}} b, _ := newMonolithBuilder(info.NewInstance(image, status.Instance{}), cfg, &metricslatest.FlowMetricList{}, nil) return b } func transfBuilder(ns string, cfg *flowslatest.FlowCollectorSpec) transfoBuilder { loki := helper.NewLokiConfig(&cfg.Loki, "any") - info := reconcilers.Common{Namespace: ns, Loki: &loki} + info := reconcilers.Common{Namespace: ns, Loki: &loki, ClusterInfo: &cluster.Info{}} b, _ := newTransfoBuilder(info.NewInstance(image, status.Instance{}), cfg, &metricslatest.FlowMetricList{}, nil) return b } @@ -552,7 +553,7 @@ func TestServiceMonitorChanged(t *testing.T) { assert.Contains(report.String(), "ServiceMonitor spec changed") // Check labels change - info := reconcilers.Common{Namespace: "namespace2"} + info := reconcilers.Common{Namespace: "namespace2", ClusterInfo: &cluster.Info{}} b, _ = newMonolithBuilder(info.NewInstance(image2, status.Instance{}), &cfg, b.generic.flowMetrics, nil) third := b.generic.serviceMonitor() @@ -605,7 +606,7 @@ func TestPrometheusRuleChanged(t *testing.T) { assert.Contains(report.String(), "PrometheusRule spec changed") // Check labels change - info := reconcilers.Common{Namespace: "namespace2"} + info := reconcilers.Common{Namespace: "namespace2", ClusterInfo: &cluster.Info{}} b, _ = newMonolithBuilder(info.NewInstance(image2, status.Instance{}), &cfg, b.generic.flowMetrics, nil) third := b.generic.prometheusRule() @@ -754,7 +755,7 @@ func TestLabels(t *testing.T) { assert := assert.New(t) cfg := getConfig() - info := reconcilers.Common{Namespace: "ns"} + info := reconcilers.Common{Namespace: "ns", ClusterInfo: &cluster.Info{}} builder, _ := newMonolithBuilder(info.NewInstance(image, status.Instance{}), &cfg, &metricslatest.FlowMetricList{}, nil) tBuilder, _ := newTransfoBuilder(info.NewInstance(image, status.Instance{}), &cfg, &metricslatest.FlowMetricList{}, nil) diff --git a/controllers/flp/flp_transfo_reconciler.go b/controllers/flp/flp_transfo_reconciler.go index cd74eda70..b3a395dc5 100644 --- a/controllers/flp/flp_transfo_reconciler.go +++ b/controllers/flp/flp_transfo_reconciler.go @@ -44,10 +44,10 @@ func newTransformerReconciler(cmn *reconcilers.Instance) *transformerReconciler dynamicConfigMap: cmn.Managed.NewConfigMap(dynamicConfigMapName(ConfKafkaTransformer)), roleBinding: cmn.Managed.NewCRB(RoleBindingName(ConfKafkaTransformer)), } - if cmn.AvailableAPIs.HasSvcMonitor() { + if cmn.ClusterInfo.HasSvcMonitor() { rec.serviceMonitor = cmn.Managed.NewServiceMonitor(serviceMonitorName(ConfKafkaTransformer)) } - if cmn.AvailableAPIs.HasPromRule() { + if cmn.ClusterInfo.HasPromRule() { rec.prometheusRule = cmn.Managed.NewPrometheusRule(prometheusRuleName(ConfKafkaTransformer)) } return &rec @@ -198,13 +198,13 @@ func (r *transformerReconciler) reconcilePrometheusService(ctx context.Context, if err := r.ReconcileService(ctx, r.promService, builder.promService(), &report); err != nil { return err } - if r.AvailableAPIs.HasSvcMonitor() { + if r.ClusterInfo.HasSvcMonitor() { serviceMonitor := builder.generic.serviceMonitor() if err := reconcilers.GenericReconcile(ctx, r.Managed, &r.Client, r.serviceMonitor, serviceMonitor, &report, helper.ServiceMonitorChanged); err != nil { return err } } - if r.AvailableAPIs.HasPromRule() { + if r.ClusterInfo.HasPromRule() { promRules := builder.generic.prometheusRule() if err := reconcilers.GenericReconcile(ctx, r.Managed, &r.Client, r.prometheusRule, promRules, &report, helper.PrometheusRuleChanged); err != nil { return err diff --git a/controllers/flp/metrics_api_test.go b/controllers/flp/metrics_api_test.go index 6ca0fabe9..72e20c63d 100644 --- a/controllers/flp/metrics_api_test.go +++ b/controllers/flp/metrics_api_test.go @@ -12,6 +12,7 @@ import ( metricslatest "github.com/netobserv/network-observability-operator/apis/flowmetrics/v1alpha1" "github.com/netobserv/network-observability-operator/controllers/reconcilers" + "github.com/netobserv/network-observability-operator/pkg/cluster" "github.com/netobserv/network-observability-operator/pkg/helper" "github.com/netobserv/network-observability-operator/pkg/manager/status" ) @@ -33,7 +34,7 @@ func getConfiguredMetrics(cm *corev1.ConfigMap) (api.MetricsItems, error) { func defaultBuilderWithMetrics(metrics *metricslatest.FlowMetricList) (monolithBuilder, error) { cfg := getConfig() loki := helper.NewLokiConfig(&cfg.Loki, "any") - info := reconcilers.Common{Namespace: "namespace", Loki: &loki} + info := reconcilers.Common{Namespace: "namespace", Loki: &loki, ClusterInfo: &cluster.Info{}} return newMonolithBuilder(info.NewInstance(image, status.Instance{}), &cfg, metrics, nil) } diff --git a/controllers/monitoring/monitoring_controller.go b/controllers/monitoring/monitoring_controller.go index 35e777a96..06951066f 100644 --- a/controllers/monitoring/monitoring_controller.go +++ b/controllers/monitoring/monitoring_controller.go @@ -122,7 +122,7 @@ func (r *Reconciler) reconcile(ctx context.Context, clh *helper.Client, desired } // Dashboards - if r.mgr.IsOpenShift() && r.mgr.HasSvcMonitor() { + if r.mgr.ClusterInfo.IsOpenShift() && r.mgr.ClusterInfo.HasSvcMonitor() { // List custom metrics fm := metricslatest.FlowMetricList{} if err := r.Client.List(ctx, &fm, &client.ListOptions{Namespace: ns}); err != nil { diff --git a/controllers/networkpolicy/np_objects.go b/controllers/networkpolicy/np_objects.go index 39790e007..11b73dae3 100644 --- a/controllers/networkpolicy/np_objects.go +++ b/controllers/networkpolicy/np_objects.go @@ -55,8 +55,8 @@ func buildMainNetworkPolicy(desired *flowslatest.FlowCollector, mgr *manager.Man }, } - if mgr.IsOpenShift() { - if helper.UseConsolePlugin(&desired.Spec) && mgr.AvailableAPIs.HasConsolePlugin() { + if mgr.ClusterInfo.IsOpenShift() { + if helper.UseConsolePlugin(&desired.Spec) && mgr.ClusterInfo.HasConsolePlugin() { advanced := helper.GetAdvancedPluginConfig(desired.Spec.ConsolePlugin.Advanced) np.Spec.Ingress = append(np.Spec.Ingress, networkingv1.NetworkPolicyIngressRule{ From: []networkingv1.NetworkPolicyPeer{ @@ -117,7 +117,7 @@ func buildPrivilegedNetworkPolicy(desired *flowslatest.FlowCollector, mgr *manag }, } - if mgr.IsOpenShift() { + if mgr.ClusterInfo.IsOpenShift() { if mgr.Config.DownstreamDeployment { np.Spec.Ingress = append(np.Spec.Ingress, networkingv1.NetworkPolicyIngressRule{ From: []networkingv1.NetworkPolicyPeer{ diff --git a/controllers/networkpolicy/np_test.go b/controllers/networkpolicy/np_test.go index bc6a02c33..ad1c6b59c 100644 --- a/controllers/networkpolicy/np_test.go +++ b/controllers/networkpolicy/np_test.go @@ -4,6 +4,7 @@ import ( "testing" flowslatest "github.com/netobserv/network-observability-operator/apis/flowcollector/v1beta2" + "github.com/netobserv/network-observability-operator/pkg/cluster" "github.com/netobserv/network-observability-operator/pkg/manager" "github.com/stretchr/testify/assert" @@ -70,7 +71,7 @@ func TestNpBuilder(t *testing.T) { assert := assert.New(t) desired := getConfig() - mgr := &manager.Manager{} + mgr := &manager.Manager{ClusterInfo: &cluster.Info{}} desired.Spec.NetworkPolicy.Enable = nil name, np := buildMainNetworkPolicy(&desired, mgr) diff --git a/controllers/reconcilers/common.go b/controllers/reconcilers/common.go index dabfe9bdc..73ef3bf1f 100644 --- a/controllers/reconcilers/common.go +++ b/controllers/reconcilers/common.go @@ -4,7 +4,7 @@ import ( "context" "github.com/netobserv/network-observability-operator/controllers/constants" - "github.com/netobserv/network-observability-operator/pkg/discover" + "github.com/netobserv/network-observability-operator/pkg/cluster" "github.com/netobserv/network-observability-operator/pkg/helper" "github.com/netobserv/network-observability-operator/pkg/manager/status" "github.com/netobserv/network-observability-operator/pkg/watchers" @@ -17,10 +17,8 @@ type Common struct { Watcher *watchers.Watcher Namespace string PreviousNamespace string - UseOpenShiftSCC bool - AvailableAPIs *discover.AvailableAPIs + ClusterInfo *cluster.Info Loki *helper.LokiConfig - ClusterID string IsDownstream bool } diff --git a/go.mod b/go.mod index 00f2bfba8..f65a0e568 100644 --- a/go.mod +++ b/go.mod @@ -5,6 +5,7 @@ go 1.22.3 toolchain go1.22.4 require ( + github.com/coreos/go-semver v0.3.1 github.com/go-logr/logr v1.4.2 github.com/netobserv/flowlogs-pipeline v1.6.1-crc0.0.20240920063618-2ea1a7ce77b8 github.com/onsi/ginkgo/v2 v2.20.2 diff --git a/go.sum b/go.sum index 721be6898..165b4104d 100644 --- a/go.sum +++ b/go.sum @@ -2,6 +2,8 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= +github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= diff --git a/main.go b/main.go index bb223a3b3..82183014b 100644 --- a/main.go +++ b/main.go @@ -25,22 +25,19 @@ import ( _ "net/http/pprof" "os" - "go.uber.org/zap/zapcore" - // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) // to ensure that exec-entrypoint and run can make use of them. osv1 "github.com/openshift/api/console/v1" operatorsv1 "github.com/openshift/api/operator/v1" securityv1 "github.com/openshift/api/security/v1" monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + "go.uber.org/zap/zapcore" ascv2 "k8s.io/api/autoscaling/v2" corev1 "k8s.io/api/core/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" - _ "k8s.io/client-go/plugin/pkg/client/auth" apiregv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" ctrl "sigs.k8s.io/controller-runtime" diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go new file mode 100644 index 000000000..1cb657c07 --- /dev/null +++ b/pkg/cluster/cluster.go @@ -0,0 +1,154 @@ +package cluster + +import ( + "context" + "errors" + "fmt" + + "github.com/coreos/go-semver/semver" + configv1 "github.com/openshift/api/config/v1" + osv1 "github.com/openshift/api/console/v1" + operatorv1 "github.com/openshift/api/operator/v1" + securityv1 "github.com/openshift/api/security/v1" + monv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + "k8s.io/client-go/discovery" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type Info struct { + ID string + openShiftVersion *semver.Version + apisMap map[string]bool + fetchedClusterVersion bool +} + +var ( + consolePlugin = "consoleplugins." + osv1.GroupVersion.String() + cno = "networks." + operatorv1.GroupVersion.String() + svcMonitor = "servicemonitors." + monv1.SchemeGroupVersion.String() + promRule = "prometheusrules." + monv1.SchemeGroupVersion.String() + ocpSecurity = "securitycontextconstraints." + securityv1.SchemeGroupVersion.String() +) + +func NewInfo(dcl *discovery.DiscoveryClient) (Info, error) { + info := Info{} + if err := info.fetchAvailableAPIs(dcl); err != nil { + return info, err + } + return info, nil +} + +func (c *Info) CheckClusterInfo(ctx context.Context, cl client.Client) error { + if c.IsOpenShift() && !c.fetchedClusterVersion { + if err := c.fetchOpenShiftClusterVersion(ctx, cl); err != nil { + return err + } + } + return nil +} + +func (c *Info) fetchAvailableAPIs(client *discovery.DiscoveryClient) error { + c.apisMap = map[string]bool{ + consolePlugin: false, + cno: false, + svcMonitor: false, + promRule: false, + ocpSecurity: false, + } + _, resources, err := client.ServerGroupsAndResources() + if err != nil { + return err + } + for apiName := range c.apisMap { + out: + for i := range resources { + for j := range resources[i].APIResources { + gvk := resources[i].APIResources[j].Name + "." + resources[i].GroupVersion + if apiName == gvk { + c.apisMap[apiName] = true + break out + } + } + } + } + return nil +} + +func (c *Info) fetchOpenShiftClusterVersion(ctx context.Context, cl client.Client) error { + key := client.ObjectKey{Name: "version"} + cversion := &configv1.ClusterVersion{} + if err := cl.Get(ctx, key, cversion); err != nil { + return fmt.Errorf("could not fetch ClusterVersion: %w", err) + } + c.ID = string(cversion.Spec.ClusterID) + // Get version; use the same method as via `oc get clusterversion`, where printed column uses jsonPath: + // .status.history[?(@.state=="Completed")].version + for _, history := range cversion.Status.History { + if history.State == "Completed" { + c.openShiftVersion = semver.New(history.Version) + break + } + } + c.fetchedClusterVersion = true + return nil +} + +// MockOpenShiftVersion shouldn't be used except for testing +func (c *Info) MockOpenShiftVersion(v string) { + if c.apisMap == nil { + c.apisMap = make(map[string]bool) + } + if v == "" { + // No OpenShift + c.apisMap[ocpSecurity] = false + c.openShiftVersion = nil + } else { + c.apisMap[ocpSecurity] = true + c.openShiftVersion = semver.New(v) + } +} + +func (c *Info) GetOpenShiftVersion() string { + return c.openShiftVersion.String() +} + +func (c *Info) OpenShiftVersionIsAtLeast(v string) (bool, error) { + if c.openShiftVersion == nil { + return false, errors.New("OpenShift version not defined, can't compare versions") + } + version, err := semver.NewVersion(v) + if err != nil { + return false, err + } + return !c.openShiftVersion.LessThan(*version), nil +} + +// IsOpenShift assumes having openshift SCC API <=> being on openshift +func (c *Info) IsOpenShift() bool { + return c.HasOCPSecurity() +} + +// HasConsolePlugin returns true if "consoleplugins.console.openshift.io" API was found +func (c *Info) HasConsolePlugin() bool { + return c.apisMap[consolePlugin] +} + +// HasOCPSecurity returns true if "consoles.config.openshift.io" API was found +func (c *Info) HasOCPSecurity() bool { + return c.apisMap[ocpSecurity] +} + +// HasCNO returns true if "networks.operator.openshift.io" API was found +func (c *Info) HasCNO() bool { + return c.apisMap[cno] +} + +// HasSvcMonitor returns true if "servicemonitors.monitoring.coreos.com" API was found +func (c *Info) HasSvcMonitor() bool { + return c.apisMap[svcMonitor] +} + +// HasPromRule returns true if "prometheusrules.monitoring.coreos.com" API was found +func (c *Info) HasPromRule() bool { + return c.apisMap[promRule] +} diff --git a/pkg/discover/apis.go b/pkg/discover/apis.go deleted file mode 100644 index b821fdffd..000000000 --- a/pkg/discover/apis.go +++ /dev/null @@ -1,66 +0,0 @@ -package discover - -import ( - osv1 "github.com/openshift/api/console/v1" - operatorv1 "github.com/openshift/api/operator/v1" - monv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" - "k8s.io/client-go/discovery" -) - -var ( - consolePlugin = "consoleplugins." + osv1.GroupVersion.String() - cno = "networks." + operatorv1.GroupVersion.String() - svcMonitor = "servicemonitors." + monv1.SchemeGroupVersion.String() - promRule = "prometheusrules." + monv1.SchemeGroupVersion.String() -) - -// AvailableAPIs discovers the available APIs in the running cluster -type AvailableAPIs struct { - apisMap map[string]bool -} - -func NewAvailableAPIs(client *discovery.DiscoveryClient) (*AvailableAPIs, error) { - apiMap := map[string]bool{ - consolePlugin: false, - cno: false, - svcMonitor: false, - promRule: false, - } - _, resources, err := client.ServerGroupsAndResources() - if err != nil { - return nil, err - } - for apiName := range apiMap { - out: - for i := range resources { - for j := range resources[i].APIResources { - gvk := resources[i].APIResources[j].Name + "." + resources[i].GroupVersion - if apiName == gvk { - apiMap[apiName] = true - break out - } - } - } - } - return &AvailableAPIs{apisMap: apiMap}, nil -} - -// HasConsolePlugin returns true if "consoleplugins.console.openshift.io" API was found -func (c *AvailableAPIs) HasConsolePlugin() bool { - return c.apisMap[consolePlugin] -} - -// HasCNO returns true if "networks.operator.openshift.io" API was found -func (c *AvailableAPIs) HasCNO() bool { - return c.apisMap[cno] -} - -// HasSvcMonitor returns true if "servicemonitors.monitoring.coreos.com" API was found -func (c *AvailableAPIs) HasSvcMonitor() bool { - return c.apisMap[svcMonitor] -} - -// HasPromRule returns true if "prometheusrules.monitoring.coreos.com" API was found -func (c *AvailableAPIs) HasPromRule() bool { - return c.apisMap[promRule] -} diff --git a/pkg/discover/permissions.go b/pkg/discover/permissions.go deleted file mode 100644 index 9db4c154e..000000000 --- a/pkg/discover/permissions.go +++ /dev/null @@ -1,48 +0,0 @@ -package discover - -import ( - "context" - - securityv1 "github.com/openshift/api/security/v1" - "k8s.io/client-go/discovery" - "sigs.k8s.io/controller-runtime/pkg/log" -) - -// Vendor enumerates different Kubernetes distributions -type Vendor int - -const ( - VendorUnknown = iota - VendorVanilla - VendorOpenShift -) - -// Permissions discovers the actual security and permissions provider -type Permissions struct { - vendor Vendor - Client *discovery.DiscoveryClient -} - -// Vendor that provides the current permissions implementation -func (c *Permissions) Vendor(ctx context.Context) Vendor { - if c.vendor != VendorUnknown { - return c.vendor - } - rlog := log.FromContext(ctx) - groupsList, err := c.Client.ServerGroups() - if err != nil { - rlog.Error(err, "fetching vendor: couldn't retrieve API services") - return VendorUnknown - } - for i := range groupsList.Groups { - if groupsList.Groups[i].Name == securityv1.GroupName { - rlog.Info("fetching vendor: found OpenShift") - c.vendor = VendorOpenShift - return c.vendor - } - } - rlog.Info("fetching vendor: any of our registered vendors matched. " + - "Assuming vanilla Kubernetes") - c.vendor = VendorVanilla - return c.vendor -} diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index 8e89b5fd6..75754b8fc 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -4,7 +4,8 @@ import ( "context" "fmt" - "github.com/netobserv/network-observability-operator/pkg/discover" + flowslatest "github.com/netobserv/network-observability-operator/apis/flowcollector/v1beta2" + "github.com/netobserv/network-observability-operator/pkg/cluster" "github.com/netobserv/network-observability-operator/pkg/manager/status" "github.com/netobserv/network-observability-operator/pkg/narrowcache" "k8s.io/client-go/discovery" @@ -38,11 +39,10 @@ type Registerer func(context.Context, *Manager) error type Manager struct { manager.Manager - discover.AvailableAPIs - Client client.Client - Status *status.Manager - Config *Config - vendor discover.Vendor + ClusterInfo *cluster.Info + Client client.Client + Status *status.Manager + Config *Config } func NewManager( @@ -73,20 +73,18 @@ func NewManager( if err != nil { return nil, fmt.Errorf("can't instantiate discovery client: %w", err) } - permissions := discover.Permissions{Client: dc} - vendor := permissions.Vendor(ctx) - apis, err := discover.NewAvailableAPIs(dc) + info, err := cluster.NewInfo(dc) if err != nil { - return nil, fmt.Errorf("can't discover available APIs: %w", err) + return nil, fmt.Errorf("can't collect cluster info: %w", err) } + flowslatest.CurrentClusterInfo = &info this := &Manager{ - Manager: internalManager, - AvailableAPIs: *apis, - Status: status.NewManager(), - Client: client, - Config: opcfg, - vendor: vendor, + Manager: internalManager, + ClusterInfo: &info, + Status: status.NewManager(), + Client: client, + Config: opcfg, } log.Info("Building controllers") @@ -96,13 +94,15 @@ func NewManager( } } + if err := internalManager.Add(manager.RunnableFunc(func(ctx context.Context) error { + return info.CheckClusterInfo(ctx, internalManager.GetClient()) + })); err != nil { + return nil, fmt.Errorf("can't collect more cluster info: %w", err) + } + return this, nil } func (m *Manager) GetClient() client.Client { return m.Client } - -func (m *Manager) IsOpenShift() bool { - return m.vendor == discover.VendorOpenShift -} diff --git a/pkg/test/envtest.go b/pkg/test/envtest.go index 581f11cd8..f80a4585e 100644 --- a/pkg/test/envtest.go +++ b/pkg/test/envtest.go @@ -127,6 +127,11 @@ func PrepareEnvTest(controllers []manager.Registerer, namespaces []string, baseP Expect(err).NotTo(HaveOccurred()) } + err = k8sClient.Create(ctx, &configv1.ClusterVersion{ + ObjectMeta: metav1.ObjectMeta{Name: "version"}, + }) + Expect(err).NotTo(HaveOccurred()) + k8sManager, err := manager.NewManager( context.Background(), cfg, diff --git a/vendor/github.com/coreos/go-semver/LICENSE b/vendor/github.com/coreos/go-semver/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/coreos/go-semver/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/coreos/go-semver/NOTICE b/vendor/github.com/coreos/go-semver/NOTICE new file mode 100644 index 000000000..23a0ada2f --- /dev/null +++ b/vendor/github.com/coreos/go-semver/NOTICE @@ -0,0 +1,5 @@ +CoreOS Project +Copyright 2018 CoreOS, Inc + +This product includes software developed at CoreOS, Inc. +(http://www.coreos.com/). diff --git a/vendor/github.com/coreos/go-semver/semver/semver.go b/vendor/github.com/coreos/go-semver/semver/semver.go new file mode 100644 index 000000000..eb9fb7ff2 --- /dev/null +++ b/vendor/github.com/coreos/go-semver/semver/semver.go @@ -0,0 +1,296 @@ +// Copyright 2013-2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Semantic Versions http://semver.org +package semver + +import ( + "bytes" + "errors" + "fmt" + "regexp" + "strconv" + "strings" +) + +type Version struct { + Major int64 + Minor int64 + Patch int64 + PreRelease PreRelease + Metadata string +} + +type PreRelease string + +func splitOff(input *string, delim string) (val string) { + parts := strings.SplitN(*input, delim, 2) + + if len(parts) == 2 { + *input = parts[0] + val = parts[1] + } + + return val +} + +func New(version string) *Version { + return Must(NewVersion(version)) +} + +func NewVersion(version string) (*Version, error) { + v := Version{} + + if err := v.Set(version); err != nil { + return nil, err + } + + return &v, nil +} + +// Must is a helper for wrapping NewVersion and will panic if err is not nil. +func Must(v *Version, err error) *Version { + if err != nil { + panic(err) + } + return v +} + +// Set parses and updates v from the given version string. Implements flag.Value +func (v *Version) Set(version string) error { + metadata := splitOff(&version, "+") + preRelease := PreRelease(splitOff(&version, "-")) + dotParts := strings.SplitN(version, ".", 3) + + if len(dotParts) != 3 { + return fmt.Errorf("%s is not in dotted-tri format", version) + } + + if err := validateIdentifier(string(preRelease)); err != nil { + return fmt.Errorf("failed to validate pre-release: %v", err) + } + + if err := validateIdentifier(metadata); err != nil { + return fmt.Errorf("failed to validate metadata: %v", err) + } + + parsed := make([]int64, 3) + + for i, v := range dotParts[:3] { + val, err := strconv.ParseInt(v, 10, 64) + parsed[i] = val + if err != nil { + return err + } + } + + v.Metadata = metadata + v.PreRelease = preRelease + v.Major = parsed[0] + v.Minor = parsed[1] + v.Patch = parsed[2] + return nil +} + +func (v Version) String() string { + var buffer bytes.Buffer + + fmt.Fprintf(&buffer, "%d.%d.%d", v.Major, v.Minor, v.Patch) + + if v.PreRelease != "" { + fmt.Fprintf(&buffer, "-%s", v.PreRelease) + } + + if v.Metadata != "" { + fmt.Fprintf(&buffer, "+%s", v.Metadata) + } + + return buffer.String() +} + +func (v *Version) UnmarshalYAML(unmarshal func(interface{}) error) error { + var data string + if err := unmarshal(&data); err != nil { + return err + } + return v.Set(data) +} + +func (v Version) MarshalJSON() ([]byte, error) { + return []byte(`"` + v.String() + `"`), nil +} + +func (v *Version) UnmarshalJSON(data []byte) error { + l := len(data) + if l == 0 || string(data) == `""` { + return nil + } + if l < 2 || data[0] != '"' || data[l-1] != '"' { + return errors.New("invalid semver string") + } + return v.Set(string(data[1 : l-1])) +} + +// Compare tests if v is less than, equal to, or greater than versionB, +// returning -1, 0, or +1 respectively. +func (v Version) Compare(versionB Version) int { + if cmp := recursiveCompare(v.Slice(), versionB.Slice()); cmp != 0 { + return cmp + } + return preReleaseCompare(v, versionB) +} + +// Equal tests if v is equal to versionB. +func (v Version) Equal(versionB Version) bool { + return v.Compare(versionB) == 0 +} + +// LessThan tests if v is less than versionB. +func (v Version) LessThan(versionB Version) bool { + return v.Compare(versionB) < 0 +} + +// Slice converts the comparable parts of the semver into a slice of integers. +func (v Version) Slice() []int64 { + return []int64{v.Major, v.Minor, v.Patch} +} + +func (p PreRelease) Slice() []string { + preRelease := string(p) + return strings.Split(preRelease, ".") +} + +func preReleaseCompare(versionA Version, versionB Version) int { + a := versionA.PreRelease + b := versionB.PreRelease + + /* Handle the case where if two versions are otherwise equal it is the + * one without a PreRelease that is greater */ + if len(a) == 0 && (len(b) > 0) { + return 1 + } else if len(b) == 0 && (len(a) > 0) { + return -1 + } + + // If there is a prerelease, check and compare each part. + return recursivePreReleaseCompare(a.Slice(), b.Slice()) +} + +func recursiveCompare(versionA []int64, versionB []int64) int { + if len(versionA) == 0 { + return 0 + } + + a := versionA[0] + b := versionB[0] + + if a > b { + return 1 + } else if a < b { + return -1 + } + + return recursiveCompare(versionA[1:], versionB[1:]) +} + +func recursivePreReleaseCompare(versionA []string, versionB []string) int { + // A larger set of pre-release fields has a higher precedence than a smaller set, + // if all of the preceding identifiers are equal. + if len(versionA) == 0 { + if len(versionB) > 0 { + return -1 + } + return 0 + } else if len(versionB) == 0 { + // We're longer than versionB so return 1. + return 1 + } + + a := versionA[0] + b := versionB[0] + + aInt := false + bInt := false + + aI, err := strconv.Atoi(versionA[0]) + if err == nil { + aInt = true + } + + bI, err := strconv.Atoi(versionB[0]) + if err == nil { + bInt = true + } + + // Numeric identifiers always have lower precedence than non-numeric identifiers. + if aInt && !bInt { + return -1 + } else if !aInt && bInt { + return 1 + } + + // Handle Integer Comparison + if aInt && bInt { + if aI > bI { + return 1 + } else if aI < bI { + return -1 + } + } + + // Handle String Comparison + if a > b { + return 1 + } else if a < b { + return -1 + } + + return recursivePreReleaseCompare(versionA[1:], versionB[1:]) +} + +// BumpMajor increments the Major field by 1 and resets all other fields to their default values +func (v *Version) BumpMajor() { + v.Major += 1 + v.Minor = 0 + v.Patch = 0 + v.PreRelease = PreRelease("") + v.Metadata = "" +} + +// BumpMinor increments the Minor field by 1 and resets all other fields to their default values +func (v *Version) BumpMinor() { + v.Minor += 1 + v.Patch = 0 + v.PreRelease = PreRelease("") + v.Metadata = "" +} + +// BumpPatch increments the Patch field by 1 and resets all other fields to their default values +func (v *Version) BumpPatch() { + v.Patch += 1 + v.PreRelease = PreRelease("") + v.Metadata = "" +} + +// validateIdentifier makes sure the provided identifier satisfies semver spec +func validateIdentifier(id string) error { + if id != "" && !reIdentifier.MatchString(id) { + return fmt.Errorf("%s is not a valid semver identifier", id) + } + return nil +} + +// reIdentifier is a regular expression used to check that pre-release and metadata +// identifiers satisfy the spec requirements +var reIdentifier = regexp.MustCompile(`^[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*$`) diff --git a/vendor/github.com/coreos/go-semver/semver/sort.go b/vendor/github.com/coreos/go-semver/semver/sort.go new file mode 100644 index 000000000..e256b41a5 --- /dev/null +++ b/vendor/github.com/coreos/go-semver/semver/sort.go @@ -0,0 +1,38 @@ +// Copyright 2013-2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semver + +import ( + "sort" +) + +type Versions []*Version + +func (s Versions) Len() int { + return len(s) +} + +func (s Versions) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s Versions) Less(i, j int) bool { + return s[i].LessThan(*s[j]) +} + +// Sort sorts the given slice of Version +func Sort(versions []*Version) { + sort.Sort(Versions(versions)) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 50ffe2bc8..c948a2771 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -4,6 +4,9 @@ github.com/beorn7/perks/quantile # github.com/cespare/xxhash/v2 v2.3.0 ## explicit; go 1.11 github.com/cespare/xxhash/v2 +# github.com/coreos/go-semver v0.3.1 +## explicit; go 1.8 +github.com/coreos/go-semver/semver # github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc ## explicit github.com/davecgh/go-spew/spew