From fc1772b72b8f2ad53b4f963da8e415f369e1b908 Mon Sep 17 00:00:00 2001 From: Ben Elferink Date: Thu, 19 Dec 2024 19:35:37 +0200 Subject: [PATCH 1/3] [GEN-2073]: fix glitch with AppStore "configuredSources" during uninstrementation of single source (#2035) This pull request includes changes to the `frontend/webapp` directory to improve state management and streamline source control operations. The most important changes involve the integration of `useAppStore` for managing configured sources and the removal of redundant imports. State management improvements: * [`frontend/webapp/containers/main/overview/multi-source-control/index.tsx`](diffhunk://#diff-cdd99a8fcd0484b24586f22b1f7b1bcf8d964bead53eeb0b2c07646c7301af70L35-R35): Updated `useAppStore` to no longer need a parameter for state selection. * [`frontend/webapp/hooks/sources/useSourceCRUD.ts`](diffhunk://#diff-1be86445dc0ed6bfdc08f7525c5800ef39bdcd70318a3c130f83f36f1ae9f5c8L2-R3): Integrated `useAppStore` for managing `configuredSources` and `setConfiguredSources`, and removed redundant import of `useNotificationStore`. [[1]](diffhunk://#diff-1be86445dc0ed6bfdc08f7525c5800ef39bdcd70318a3c130f83f36f1ae9f5c8L2-R3) [[2]](diffhunk://#diff-1be86445dc0ed6bfdc08f7525c5800ef39bdcd70318a3c130f83f36f1ae9f5c8R15-R16) [[3]](diffhunk://#diff-1be86445dc0ed6bfdc08f7525c5800ef39bdcd70318a3c130f83f36f1ae9f5c8R62) --- .../containers/main/overview/multi-source-control/index.tsx | 2 +- frontend/webapp/hooks/sources/useSourceCRUD.ts | 5 ++++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/frontend/webapp/containers/main/overview/multi-source-control/index.tsx b/frontend/webapp/containers/main/overview/multi-source-control/index.tsx index ed4861550..4181dda72 100644 --- a/frontend/webapp/containers/main/overview/multi-source-control/index.tsx +++ b/frontend/webapp/containers/main/overview/multi-source-control/index.tsx @@ -32,7 +32,7 @@ export const MultiSourceControl = () => { }); const { sources, deleteSources } = useSourceCRUD(); - const { configuredSources, setConfiguredSources } = useAppStore((state) => state); + const { configuredSources, setConfiguredSources } = useAppStore(); const [isWarnModalOpen, setIsWarnModalOpen] = useState(false); const totalSelected = useMemo(() => { diff --git a/frontend/webapp/hooks/sources/useSourceCRUD.ts b/frontend/webapp/hooks/sources/useSourceCRUD.ts index dc2f0ed27..9a2bdbdfc 100644 --- a/frontend/webapp/hooks/sources/useSourceCRUD.ts +++ b/frontend/webapp/hooks/sources/useSourceCRUD.ts @@ -1,7 +1,7 @@ import { useCallback } from 'react'; import { useMutation } from '@apollo/client'; -import { useNotificationStore } from '@/store'; import { ACTION, getSseTargetFromId } from '@/utils'; +import { useAppStore, useNotificationStore } from '@/store'; import { PERSIST_SOURCE, UPDATE_K8S_ACTUAL_SOURCE } from '@/graphql'; import { useComputePlatform, useNamespace } from '../compute-platform'; import { OVERVIEW_ENTITY_TYPES, type WorkloadId, type PatchSourceRequestInput, type K8sActualSource, NOTIFICATION_TYPE } from '@/types'; @@ -13,6 +13,8 @@ interface Params { export const useSourceCRUD = (params?: Params) => { const removeNotifications = useNotificationStore((store) => store.removeNotifications); + const { configuredSources, setConfiguredSources } = useAppStore(); + const { persistNamespace } = useNamespace(); const { data, refetch } = useComputePlatform(); const { addNotification } = useNotificationStore(); @@ -70,6 +72,7 @@ export const useSourceCRUD = (params?: Params) => { } else { const id = { kind, name, namespace }; if (!selected) removeNotifications(getSseTargetFromId(id, OVERVIEW_ENTITY_TYPES.SOURCE)); + if (!selected) setConfiguredSources({ ...configuredSources, [namespace]: configuredSources[namespace].filter((source) => source.name !== name) }); handleComplete(action, `source "${name}" was ${action.toLowerCase()}d ${fromOrIn} "${namespace}"`, selected ? id : undefined); } }, From c1958de1e81140e31899483e614b175cf671c7cc Mon Sep 17 00:00:00 2001 From: Ben Elferink Date: Thu, 19 Dec 2024 20:08:14 +0200 Subject: [PATCH 2/3] [GEN-2042]: do not display warning when closing drawer of "unknown instrumentation rule" (#2036) This pull request includes a small but important change to the `RuleDrawer` component in the `frontend/webapp/containers/main/instrumentation-rules/rule-drawer/index.tsx` file. The change adds an additional condition to the `handleEdit` function to improve the handling of unknown rule types. * [`frontend/webapp/containers/main/instrumentation-rules/rule-drawer/index.tsx`](diffhunk://#diff-759940924dd4ee0c6895f60e21ca040542d9cf1d3f69162677c9983d62b12f8cL76-R76): Modified the `handleEdit` function to include a condition that checks if the `bool` parameter is `true` or `undefined` before adding a notification for unknown rule types. --- .../containers/main/instrumentation-rules/rule-drawer/index.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frontend/webapp/containers/main/instrumentation-rules/rule-drawer/index.tsx b/frontend/webapp/containers/main/instrumentation-rules/rule-drawer/index.tsx index 8002a49b2..cdcbfeda9 100644 --- a/frontend/webapp/containers/main/instrumentation-rules/rule-drawer/index.tsx +++ b/frontend/webapp/containers/main/instrumentation-rules/rule-drawer/index.tsx @@ -71,7 +71,7 @@ export const RuleDrawer: React.FC = () => { const { id, item } = selectedItem as { id: string; item: InstrumentationRuleSpecMapped }; const handleEdit = (bool?: boolean) => { - if (item.type === InstrumentationRuleType.UNKNOWN_TYPE) { + if (item.type === InstrumentationRuleType.UNKNOWN_TYPE && (bool || bool === undefined)) { addNotification({ type: NOTIFICATION_TYPE.WARNING, title: FORM_ALERTS.FORBIDDEN, message: FORM_ALERTS.CANNOT_EDIT_RULE, crdType: OVERVIEW_ENTITY_TYPES.RULE, target: id }); } else { setIsEditing(typeof bool === 'boolean' ? bool : true); From 221efbd40a34f2abba5a5f2477479bf7c7f6b10d Mon Sep 17 00:00:00 2001 From: Amir Blum Date: Thu, 19 Dec 2024 23:00:27 +0200 Subject: [PATCH 3/3] refactor: use instrumentationconfig in autoscaler (#2025) Part of the effort to retire instrumentedapplication This object behaves the same, so no changes in functionality is expected. Updated the RBAC roles and cleaned up unused permissions while at it (we need to do a bigger rbac swap in a different PR). Tested it to work with both cli and helm (with logs destination that triggers change in collector cm to collect logs). --- autoscaler/PROJECT | 2 +- .../controllers/datacollection/configmap.go | 12 +++++----- .../datacollection/configmap_test.go | 20 ++++++++--------- autoscaler/controllers/datacollection/root.go | 15 ++++++------- autoscaler/controllers/gateway/deployment.go | 6 ++--- .../instrumentedapplication_controller.go | 11 ++++------ autoscaler/main.go | 4 ++-- cli/cmd/resources/autoscaler.go | 22 +------------------ .../templates/autoscaler/clusterrole.yaml | 4 +--- 9 files changed, 35 insertions(+), 61 deletions(-) diff --git a/autoscaler/PROJECT b/autoscaler/PROJECT index 45667032b..7fa5618d2 100644 --- a/autoscaler/PROJECT +++ b/autoscaler/PROJECT @@ -25,7 +25,7 @@ resources: namespaced: true controller: true domain: odigos.io - kind: InstrumentedApplication + kind: InstrumentationConfig path: github.com/odigos-io/odigos/api/odigos/v1alpha1 version: v1alpha1 version: "3" diff --git a/autoscaler/controllers/datacollection/configmap.go b/autoscaler/controllers/datacollection/configmap.go index d1b318c96..f17baec96 100644 --- a/autoscaler/controllers/datacollection/configmap.go +++ b/autoscaler/controllers/datacollection/configmap.go @@ -28,7 +28,7 @@ import ( semconv "go.opentelemetry.io/otel/semconv/v1.26.0" ) -func SyncConfigMap(apps *odigosv1.InstrumentedApplicationList, dests *odigosv1.DestinationList, allProcessors *odigosv1.ProcessorList, +func SyncConfigMap(sources *odigosv1.InstrumentationConfigList, dests *odigosv1.DestinationList, allProcessors *odigosv1.ProcessorList, datacollection *odigosv1.CollectorsGroup, ctx context.Context, c client.Client, scheme *runtime.Scheme, disableNameProcessor bool) error { logger := log.FromContext(ctx) @@ -39,7 +39,7 @@ func SyncConfigMap(apps *odigosv1.InstrumentedApplicationList, dests *odigosv1.D SamplingExists := commonconf.FindFirstProcessorByType(allProcessors, "odigossampling") setTracesLoadBalancer := SamplingExists != nil - desired, err := getDesiredConfigMap(apps, dests, processors, datacollection, scheme, setTracesLoadBalancer, disableNameProcessor) + desired, err := getDesiredConfigMap(sources, dests, processors, datacollection, scheme, setTracesLoadBalancer, disableNameProcessor) if err != nil { logger.Error(err, "failed to get desired config map") return err @@ -96,9 +96,9 @@ func createConfigMap(desired *v1.ConfigMap, ctx context.Context, c client.Client return desired, nil } -func getDesiredConfigMap(apps *odigosv1.InstrumentedApplicationList, dests *odigosv1.DestinationList, processors []*odigosv1.Processor, +func getDesiredConfigMap(sources *odigosv1.InstrumentationConfigList, dests *odigosv1.DestinationList, processors []*odigosv1.Processor, datacollection *odigosv1.CollectorsGroup, scheme *runtime.Scheme, setTracesLoadBalancer bool, disableNameProcessor bool) (*v1.ConfigMap, error) { - cmData, err := calculateConfigMapData(datacollection, apps, dests, processors, setTracesLoadBalancer, disableNameProcessor) + cmData, err := calculateConfigMapData(datacollection, sources, dests, processors, setTracesLoadBalancer, disableNameProcessor) if err != nil { return nil, err } @@ -124,7 +124,7 @@ func getDesiredConfigMap(apps *odigosv1.InstrumentedApplicationList, dests *odig return &desired, nil } -func calculateConfigMapData(nodeCG *odigosv1.CollectorsGroup, apps *odigosv1.InstrumentedApplicationList, dests *odigosv1.DestinationList, processors []*odigosv1.Processor, +func calculateConfigMapData(nodeCG *odigosv1.CollectorsGroup, sources *odigosv1.InstrumentationConfigList, dests *odigosv1.DestinationList, processors []*odigosv1.Processor, setTracesLoadBalancer bool, disableNameProcessor bool) (string, error) { ownMetricsPort := nodeCG.Spec.CollectorOwnMetricsPort @@ -286,7 +286,7 @@ func calculateConfigMapData(nodeCG *odigosv1.CollectorsGroup, apps *odigosv1.Ins if collectLogs { includes := make([]string, 0) - for _, element := range apps.Items { + for _, element := range sources.Items { // Paths for log files: /var/log/pods/__//.log // Pod specifiers // Deployment: _--_ diff --git a/autoscaler/controllers/datacollection/configmap_test.go b/autoscaler/controllers/datacollection/configmap_test.go index 702e75fee..b35567fe7 100644 --- a/autoscaler/controllers/datacollection/configmap_test.go +++ b/autoscaler/controllers/datacollection/configmap_test.go @@ -63,9 +63,9 @@ func NewMockTestStatefulSet(ns *corev1.Namespace) *appsv1.StatefulSet { // givin a workload object (deployment, daemonset, statefulset) return a mock instrumented application // with a single container with the GoProgrammingLanguage -func NewMockInstrumentedApplication(workloadObject client.Object) *odigosv1.InstrumentedApplication { +func NewMockInstrumentationConfig(workloadObject client.Object) *odigosv1.InstrumentationConfig { gvk, _ := apiutil.GVKForObject(workloadObject, scheme.Scheme) - return &odigosv1.InstrumentedApplication{ + return &odigosv1.InstrumentationConfig{ ObjectMeta: metav1.ObjectMeta{ Name: workload.CalculateWorkloadRuntimeObjectName(workloadObject.GetName(), gvk.Kind), Namespace: workloadObject.GetNamespace(), @@ -79,9 +79,9 @@ func NewMockInstrumentedApplication(workloadObject client.Object) *odigosv1.Inst } } -func NewMockInstrumentedApplicationWoOwner(workloadObject client.Object) *odigosv1.InstrumentedApplication { +func NewMockInstrumentationConfigWoOwner(workloadObject client.Object) *odigosv1.InstrumentationConfig { gvk, _ := apiutil.GVKForObject(workloadObject, scheme.Scheme) - return &odigosv1.InstrumentedApplication{ + return &odigosv1.InstrumentationConfig{ ObjectMeta: metav1.ObjectMeta{ Name: workload.CalculateWorkloadRuntimeObjectName(workloadObject.GetName(), gvk.Kind), Namespace: workloadObject.GetNamespace(), @@ -118,15 +118,15 @@ func TestCalculateConfigMapData(t *testing.T) { ns := NewMockNamespace("default") ns2 := NewMockNamespace("other-namespace") - items := []v1alpha1.InstrumentedApplication{ - *NewMockInstrumentedApplication(NewMockTestDeployment(ns)), - *NewMockInstrumentedApplication(NewMockTestDaemonSet(ns)), - *NewMockInstrumentedApplication(NewMockTestStatefulSet(ns2)), - *NewMockInstrumentedApplicationWoOwner(NewMockTestDeployment(ns2)), + items := []v1alpha1.InstrumentationConfig{ + *NewMockInstrumentationConfig(NewMockTestDeployment(ns)), + *NewMockInstrumentationConfig(NewMockTestDaemonSet(ns)), + *NewMockInstrumentationConfig(NewMockTestStatefulSet(ns2)), + *NewMockInstrumentationConfigWoOwner(NewMockTestDeployment(ns2)), } got, err := calculateConfigMapData( - &v1alpha1.InstrumentedApplicationList{ + &v1alpha1.InstrumentationConfigList{ Items: items, }, NewMockDestinationList(), diff --git a/autoscaler/controllers/datacollection/root.go b/autoscaler/controllers/datacollection/root.go index 14a5dffdf..f7a3b1410 100644 --- a/autoscaler/controllers/datacollection/root.go +++ b/autoscaler/controllers/datacollection/root.go @@ -22,14 +22,13 @@ const ( func Sync(ctx context.Context, c client.Client, scheme *runtime.Scheme, imagePullSecrets []string, odigosVersion string, k8sVersion *version.Version, disableNameProcessor bool) error { logger := log.FromContext(ctx) - var instApps odigosv1.InstrumentedApplicationList - if err := c.List(ctx, &instApps); err != nil { - logger.Error(err, "Failed to list instrumented apps") + var sources odigosv1.InstrumentationConfigList + if err := c.List(ctx, &sources); err != nil { return err } - if len(instApps.Items) == 0 { - logger.V(3).Info("No instrumented apps") + if len(sources.Items) == 0 { + logger.V(3).Info("No odigos sources found, skipping data collection sync") return nil } @@ -52,16 +51,16 @@ func Sync(ctx context.Context, c client.Client, scheme *runtime.Scheme, imagePul return err } - return syncDataCollection(&instApps, &dests, &processors, &dataCollectionCollectorGroup, ctx, c, scheme, imagePullSecrets, odigosVersion, k8sVersion, disableNameProcessor) + return syncDataCollection(&sources, &dests, &processors, &dataCollectionCollectorGroup, ctx, c, scheme, imagePullSecrets, odigosVersion, k8sVersion, disableNameProcessor) } -func syncDataCollection(instApps *odigosv1.InstrumentedApplicationList, dests *odigosv1.DestinationList, processors *odigosv1.ProcessorList, +func syncDataCollection(sources *odigosv1.InstrumentationConfigList, dests *odigosv1.DestinationList, processors *odigosv1.ProcessorList, dataCollection *odigosv1.CollectorsGroup, ctx context.Context, c client.Client, scheme *runtime.Scheme, imagePullSecrets []string, odigosVersion string, k8sVersion *version.Version, disableNameProcessor bool) error { logger := log.FromContext(ctx) logger.V(0).Info("Syncing data collection") - err := SyncConfigMap(instApps, dests, processors, dataCollection, ctx, c, scheme, disableNameProcessor) + err := SyncConfigMap(sources, dests, processors, dataCollection, ctx, c, scheme, disableNameProcessor) if err != nil { logger.Error(err, "Failed to sync config map") return err diff --git a/autoscaler/controllers/gateway/deployment.go b/autoscaler/controllers/gateway/deployment.go index a89a66fd1..0fe51b29e 100644 --- a/autoscaler/controllers/gateway/deployment.go +++ b/autoscaler/controllers/gateway/deployment.go @@ -174,13 +174,13 @@ func getDesiredDeployment(dests *odigosv1.DestinationList, configDataHash string { // let the Go runtime know how many CPUs are available, // without this, Go will assume all the cores are available. - Name: "GOMAXPROCS", - ValueFrom: &corev1.EnvVarSource{ + Name: "GOMAXPROCS", + ValueFrom: &corev1.EnvVarSource{ ResourceFieldRef: &corev1.ResourceFieldSelector{ ContainerName: containerName, // limitCPU, Kubernetes automatically rounds up the value to an integer // (700m -> 1, 1200m -> 2) - Resource: "limits.cpu", + Resource: "limits.cpu", }, }, }, diff --git a/autoscaler/controllers/instrumentedapplication_controller.go b/autoscaler/controllers/instrumentedapplication_controller.go index 306198997..d669780a4 100644 --- a/autoscaler/controllers/instrumentedapplication_controller.go +++ b/autoscaler/controllers/instrumentedapplication_controller.go @@ -26,10 +26,9 @@ import ( "k8s.io/apimachinery/pkg/util/version" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/log" ) -type InstrumentedApplicationReconciler struct { +type InstrumentationConfigReconciler struct { client.Client Scheme *runtime.Scheme ImagePullSecrets []string @@ -38,9 +37,7 @@ type InstrumentedApplicationReconciler struct { DisableNameProcessor bool } -func (r *InstrumentedApplicationReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - logger := log.FromContext(ctx) - logger.V(0).Info("Reconciling InstrumentedApps") +func (r *InstrumentationConfigReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { err := datacollection.Sync(ctx, r.Client, r.Scheme, r.ImagePullSecrets, r.OdigosVersion, r.K8sVersion, r.DisableNameProcessor) if err != nil { return ctrl.Result{}, err @@ -49,9 +46,9 @@ func (r *InstrumentedApplicationReconciler) Reconcile(ctx context.Context, req c return ctrl.Result{}, nil } -func (r *InstrumentedApplicationReconciler) SetupWithManager(mgr ctrl.Manager) error { +func (r *InstrumentationConfigReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). - For(&odigosv1.InstrumentedApplication{}). + For(&odigosv1.InstrumentationConfig{}). // this controller only cares about the instrumented application existence. // when it is created or removed, the node collector config map needs to be updated to scrape logs for it's pods. WithEventFilter(&predicate.ExistencePredicate{}). diff --git a/autoscaler/main.go b/autoscaler/main.go index e804e1cb4..457ed60ec 100644 --- a/autoscaler/main.go +++ b/autoscaler/main.go @@ -224,7 +224,7 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", "CollectorsGroup") os.Exit(1) } - if err = (&controllers.InstrumentedApplicationReconciler{ + if err = (&controllers.InstrumentationConfigReconciler{ Client: mgr.GetClient(), Scheme: mgr.GetScheme(), ImagePullSecrets: imagePullSecrets, @@ -232,7 +232,7 @@ func main() { K8sVersion: k8sVersion, DisableNameProcessor: disableNameProcessor, }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "InstrumentedApplication") + setupLog.Error(err, "unable to create controller", "controller", "InstrumentationConfig") os.Exit(1) } if err = (&controllers.SecretReconciler{ diff --git a/cli/cmd/resources/autoscaler.go b/cli/cmd/resources/autoscaler.go index 506e7770c..c75fa36d4 100644 --- a/cli/cmd/resources/autoscaler.go +++ b/cli/cmd/resources/autoscaler.go @@ -218,32 +218,12 @@ func NewAutoscalerClusterRole() *rbacv1.ClusterRole { }, { Verbs: []string{ - "create", - "delete", "get", "list", - "patch", - "update", "watch", }, APIGroups: []string{"odigos.io"}, - Resources: []string{"instrumentedapplications"}, - }, - { - Verbs: []string{ - "update", - }, - APIGroups: []string{"odigos.io"}, - Resources: []string{"instrumentedapplications/finalizers"}, - }, - { - Verbs: []string{ - "get", - "patch", - "update", - }, - APIGroups: []string{"odigos.io"}, - Resources: []string{"instrumentedapplications/status"}, + Resources: []string{"instrumentationconfigs"}, }, { Verbs: []string{ "create", diff --git a/helm/odigos/templates/autoscaler/clusterrole.yaml b/helm/odigos/templates/autoscaler/clusterrole.yaml index c1f33edb9..c94fec1fc 100644 --- a/helm/odigos/templates/autoscaler/clusterrole.yaml +++ b/helm/odigos/templates/autoscaler/clusterrole.yaml @@ -35,7 +35,7 @@ rules: - apiGroups: - odigos.io resources: - - instrumentedapplications + - instrumentationconfigs - collectorsgroups - odigosconfigurations - destinations @@ -52,7 +52,6 @@ rules: - odigos.io resources: - collectorsgroups/finalizers - - instrumentedapplications/finalizers - destinations/finalizers verbs: - update @@ -60,7 +59,6 @@ rules: - odigos.io resources: - collectorsgroups/status - - instrumentedapplications/status - destinations/status verbs: - get