From c22b7cdbbe1d7e7799049aa1d43a630b9a20b4a1 Mon Sep 17 00:00:00 2001 From: Ben Elferink Date: Thu, 19 Dec 2024 17:51:04 +0200 Subject: [PATCH 1/9] [GEN-2057]: make onClick optional for Tab component and adjust styling (#2030) This pull request includes several changes to the `frontend/webapp/reuseable-components/tab-list/index.tsx` file to enhance the `TabList` component's flexibility and behavior. The most important changes include making the `onClick` property optional, adding a new `$noClick` property to the `TabContainer` styled component, and modifying the `TabList` component export. Enhancements to `TabList` component: * Made the `onClick` property optional in the `TabProps` interface. * Added a new `$noClick` property to the `TabContainer` styled component to handle cases where `onClick` is not provided. [[1]](diffhunk://#diff-7ac3f19106b928b2789dbbef7ebbd1514c3faf68c7c6a15379c047bcaac56518L24-R35) [[2]](diffhunk://#diff-7ac3f19106b928b2789dbbef7ebbd1514c3faf68c7c6a15379c047bcaac56518L48-R51) * Removed the `onClick` property from the `Overview` tab in the `TABS` array. Codebase adjustments: * Changed the `TabList` component to be exported directly instead of using a separate export statement. [[1]](diffhunk://#diff-7ac3f19106b928b2789dbbef7ebbd1514c3faf68c7c6a15379c047bcaac56518L85-R83) [[2]](diffhunk://#diff-7ac3f19106b928b2789dbbef7ebbd1514c3faf68c7c6a15379c047bcaac56518L94-L95) --- .../reuseable-components/tab-list/index.tsx | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) diff --git a/frontend/webapp/reuseable-components/tab-list/index.tsx b/frontend/webapp/reuseable-components/tab-list/index.tsx index 9ef07d3db..ddb196370 100644 --- a/frontend/webapp/reuseable-components/tab-list/index.tsx +++ b/frontend/webapp/reuseable-components/tab-list/index.tsx @@ -12,7 +12,7 @@ interface TabProps { icon: SVG; selected: boolean; disabled?: boolean; - onClick: () => void; + onClick?: () => void; } // Define types for the TabList component props @@ -21,18 +21,18 @@ interface TabListProps { } // Styled-components for Tab and TabList -const TabContainer = styled.div<{ $selected: TabProps['selected']; $disabled: TabProps['disabled'] }>` +const TabContainer = styled.div<{ $selected: TabProps['selected']; $disabled: TabProps['disabled']; $noClick: boolean }>` display: flex; align-items: center; padding: 10px 12px; border-radius: 32px; - cursor: ${({ $disabled }) => ($disabled ? 'not-allowed' : 'pointer')}; - background-color: ${({ $selected, theme }) => ($selected ? theme.colors.majestic_blue + hexPercentValues['024'] : theme.colors.card)}; + cursor: ${({ $noClick, $disabled }) => ($noClick ? 'unset' : $disabled ? 'not-allowed' : 'pointer')}; + background-color: ${({ $noClick, $selected, theme }) => ($noClick ? 'transparent' : $selected ? theme.colors.majestic_blue + hexPercentValues['024'] : theme.colors.card)}; opacity: ${({ $disabled }) => ($disabled ? 0.5 : 1)}; transition: background-color 0.3s, color 0.3s; &:hover { - background-color: ${({ $disabled, theme }) => ($disabled ? 'none' : theme.colors.majestic_blue + hexPercentValues['024'])}; + background-color: ${({ $noClick, $disabled, theme }) => ($noClick || $disabled ? 'none' : theme.colors.majestic_blue + hexPercentValues['024'])}; } svg { @@ -45,11 +45,10 @@ const TabListContainer = styled.div` gap: 8px; `; -// Tab component const Tab: React.FC = ({ title, tooltip, icon: Icon, selected, disabled, onClick }) => { return ( - + {title} @@ -62,7 +61,6 @@ const TABS = [ title: 'Overview', icon: OverviewIcon, selected: true, - onClick: () => {}, }, // { // title: 'Service Map', @@ -82,7 +80,7 @@ const TABS = [ // }, ]; -const TabList: React.FC = ({ tabs = TABS }) => { +export const TabList: React.FC = ({ tabs = TABS }) => { return ( {tabs.map((tab) => ( @@ -91,5 +89,3 @@ const TabList: React.FC = ({ tabs = TABS }) => { ); }; - -export { TabList }; From 4a1debfa211febf53674f207cbc679ca622db879 Mon Sep 17 00:00:00 2001 From: Ben Elferink Date: Thu, 19 Dec 2024 18:19:15 +0200 Subject: [PATCH 2/9] [GEN-2065]: fix bug where UI crashed on attempt to edit "debug destination" (#2031) This pull request includes several changes to the `frontend/webapp/containers/main` directory, focusing on improving the handling of form items and destination types. The most important changes include removing unnecessary return statements, adding optional parameters, and renaming variables for clarity. Improvements to form item handling: * [`frontend/webapp/containers/main/actions/action-drawer/index.tsx`](diffhunk://#diff-5f56695cd2d0ca6bcd28f372653c71d8c4dab572b08715c1f36b7acc5cf50f60L67-L68): Removed an unnecessary return statement when no matching item is found. * [`frontend/webapp/containers/main/instrumentation-rules/rule-drawer/index.tsx`](diffhunk://#diff-759940924dd4ee0c6895f60e21ca040542d9cf1d3f69162677c9983d62b12f8cL65-L66): Removed an unnecessary return statement when no matching item is found. Enhancements to destination handling: * [`frontend/webapp/containers/main/destinations/destination-drawer/build-card.ts`](diffhunk://#diff-ce1c4d3c06218dab699d84dac4690cc261b1209716055609b79e97ab930e6862L10-R10): Added an optional parameter to the `buildCard` function to handle cases where `destinationTypeDetails` may be undefined. * [`frontend/webapp/containers/main/destinations/destination-drawer/index.tsx`](diffhunk://#diff-392a25e5740454eca88d8df2f33e965ed7abe29b0312e9ce5f82b8a8ae036badL59-R74): Updated the `cardData` memoization logic to handle cases where `destinationTypeDetails` is not provided. * [`frontend/webapp/containers/main/destinations/destination-drawer/index.tsx`](diffhunk://#diff-392a25e5740454eca88d8df2f33e965ed7abe29b0312e9ce5f82b8a8ae036badL122-R120): Renamed `thisDestination` to `thisDestinationType` for clarity and updated the corresponding references. --- .../containers/main/actions/action-drawer/index.tsx | 2 -- .../main/destinations/destination-drawer/build-card.ts | 2 +- .../main/destinations/destination-drawer/index.tsx | 10 ++++------ .../main/instrumentation-rules/rule-drawer/index.tsx | 2 -- 4 files changed, 5 insertions(+), 11 deletions(-) diff --git a/frontend/webapp/containers/main/actions/action-drawer/index.tsx b/frontend/webapp/containers/main/actions/action-drawer/index.tsx index 22edc6c51..b24cb2803 100644 --- a/frontend/webapp/containers/main/actions/action-drawer/index.tsx +++ b/frontend/webapp/containers/main/actions/action-drawer/index.tsx @@ -64,8 +64,6 @@ export const ActionDrawer: React.FC = () => { ACTION_OPTIONS.find(({ id }) => id === 'attributes')?.items?.find(({ type }) => type === item.type) || ACTION_OPTIONS.find(({ id }) => id === 'sampler')?.items?.find(({ type }) => type === item.type); - if (!found) return undefined; - loadFormWithDrawerItem(selectedItem); return found; diff --git a/frontend/webapp/containers/main/destinations/destination-drawer/build-card.ts b/frontend/webapp/containers/main/destinations/destination-drawer/build-card.ts index 1ad9d095b..c21c3b28b 100644 --- a/frontend/webapp/containers/main/destinations/destination-drawer/build-card.ts +++ b/frontend/webapp/containers/main/destinations/destination-drawer/build-card.ts @@ -7,7 +7,7 @@ const buildMonitorsList = (exportedSignals: ExportedSignals): string => .filter((key) => exportedSignals[key]) .join(', '); -const buildCard = (destination: ActualDestination, destinationTypeDetails: DestinationDetailsResponse['destinationTypeDetails']) => { +const buildCard = (destination: ActualDestination, destinationTypeDetails?: DestinationDetailsResponse['destinationTypeDetails']) => { const { exportedSignals, destinationType, fields } = destination; const arr: DataCardRow[] = [ diff --git a/frontend/webapp/containers/main/destinations/destination-drawer/index.tsx b/frontend/webapp/containers/main/destinations/destination-drawer/index.tsx index f2276978b..24f8385d1 100644 --- a/frontend/webapp/containers/main/destinations/destination-drawer/index.tsx +++ b/frontend/webapp/containers/main/destinations/destination-drawer/index.tsx @@ -56,7 +56,7 @@ export const DestinationDrawer: React.FC = () => { const [isFormDirty, setIsFormDirty] = useState(false); const cardData = useMemo(() => { - if (!selectedItem || !destinationTypeDetails) return []; + if (!selectedItem) return []; const { item } = selectedItem as { item: ActualDestination }; const arr = buildCard(item, destinationTypeDetails); @@ -64,16 +64,14 @@ export const DestinationDrawer: React.FC = () => { return arr; }, [selectedItem, destinationTypeDetails]); - const thisDestination = useMemo(() => { + const thisDestinationType = useMemo(() => { if (!destinationTypes.length || !selectedItem || !isEditing) { resetFormData(); return undefined; } const { item } = selectedItem as { item: ActualDestination }; - const found = destinationTypes.map(({ items }) => items.filter(({ type }) => type === item.destinationType.type)).filter((arr) => !!arr.length)[0][0]; - - if (!found) return undefined; + const found = destinationTypes.map(({ items }) => items.filter(({ type }) => type === item.destinationType.type)).filter((arr) => !!arr.length)?.[0]?.[0]; loadFormWithDrawerItem(selectedItem); @@ -119,7 +117,7 @@ export const DestinationDrawer: React.FC = () => { = () => { const { item } = selectedItem as { item: InstrumentationRuleSpecMapped }; const found = RULE_OPTIONS.find(({ type }) => type === item.type); - if (!found) return undefined; - loadFormWithDrawerItem(selectedItem); return found; From b5795e7d4d46b44ca24f5f0d7abb438e6e52b73d Mon Sep 17 00:00:00 2001 From: Ben Elferink Date: Thu, 19 Dec 2024 18:35:27 +0200 Subject: [PATCH 3/9] [GEN-2074]: Implement polling mechanism for refetching data in useSourceCRUD (#2034) --- frontend/webapp/hooks/sources/useSourceCRUD.ts | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/frontend/webapp/hooks/sources/useSourceCRUD.ts b/frontend/webapp/hooks/sources/useSourceCRUD.ts index 82f30bdf7..dc2f0ed27 100644 --- a/frontend/webapp/hooks/sources/useSourceCRUD.ts +++ b/frontend/webapp/hooks/sources/useSourceCRUD.ts @@ -1,3 +1,4 @@ +import { useCallback } from 'react'; import { useMutation } from '@apollo/client'; import { useNotificationStore } from '@/store'; import { ACTION, getSseTargetFromId } from '@/utils'; @@ -16,6 +17,18 @@ export const useSourceCRUD = (params?: Params) => { const { data, refetch } = useComputePlatform(); const { addNotification } = useNotificationStore(); + const startPolling = useCallback(async () => { + let retries = 0; + const maxRetries = 5; + const retryInterval = 1 * 1000; // time in milliseconds + + while (retries < maxRetries) { + await new Promise((resolve) => setTimeout(resolve, retryInterval)); + refetch(); + retries++; + } + }, [refetch]); + const notifyUser = (type: NOTIFICATION_TYPE, title: string, message: string, id?: WorkloadId) => { addNotification({ type, @@ -33,7 +46,7 @@ export const useSourceCRUD = (params?: Params) => { const handleComplete = (title: string, message: string, id?: WorkloadId) => { notifyUser(NOTIFICATION_TYPE.SUCCESS, title, message, id); - refetch(); + startPolling(); params?.onSuccess?.(title); }; From fc1772b72b8f2ad53b4f963da8e415f369e1b908 Mon Sep 17 00:00:00 2001 From: Ben Elferink Date: Thu, 19 Dec 2024 19:35:37 +0200 Subject: [PATCH 4/9] [GEN-2073]: fix glitch with AppStore "configuredSources" during uninstrementation of single source (#2035) This pull request includes changes to the `frontend/webapp` directory to improve state management and streamline source control operations. The most important changes involve the integration of `useAppStore` for managing configured sources and the removal of redundant imports. State management improvements: * [`frontend/webapp/containers/main/overview/multi-source-control/index.tsx`](diffhunk://#diff-cdd99a8fcd0484b24586f22b1f7b1bcf8d964bead53eeb0b2c07646c7301af70L35-R35): Updated `useAppStore` to no longer need a parameter for state selection. * [`frontend/webapp/hooks/sources/useSourceCRUD.ts`](diffhunk://#diff-1be86445dc0ed6bfdc08f7525c5800ef39bdcd70318a3c130f83f36f1ae9f5c8L2-R3): Integrated `useAppStore` for managing `configuredSources` and `setConfiguredSources`, and removed redundant import of `useNotificationStore`. [[1]](diffhunk://#diff-1be86445dc0ed6bfdc08f7525c5800ef39bdcd70318a3c130f83f36f1ae9f5c8L2-R3) [[2]](diffhunk://#diff-1be86445dc0ed6bfdc08f7525c5800ef39bdcd70318a3c130f83f36f1ae9f5c8R15-R16) [[3]](diffhunk://#diff-1be86445dc0ed6bfdc08f7525c5800ef39bdcd70318a3c130f83f36f1ae9f5c8R62) --- .../containers/main/overview/multi-source-control/index.tsx | 2 +- frontend/webapp/hooks/sources/useSourceCRUD.ts | 5 ++++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/frontend/webapp/containers/main/overview/multi-source-control/index.tsx b/frontend/webapp/containers/main/overview/multi-source-control/index.tsx index ed4861550..4181dda72 100644 --- a/frontend/webapp/containers/main/overview/multi-source-control/index.tsx +++ b/frontend/webapp/containers/main/overview/multi-source-control/index.tsx @@ -32,7 +32,7 @@ export const MultiSourceControl = () => { }); const { sources, deleteSources } = useSourceCRUD(); - const { configuredSources, setConfiguredSources } = useAppStore((state) => state); + const { configuredSources, setConfiguredSources } = useAppStore(); const [isWarnModalOpen, setIsWarnModalOpen] = useState(false); const totalSelected = useMemo(() => { diff --git a/frontend/webapp/hooks/sources/useSourceCRUD.ts b/frontend/webapp/hooks/sources/useSourceCRUD.ts index dc2f0ed27..9a2bdbdfc 100644 --- a/frontend/webapp/hooks/sources/useSourceCRUD.ts +++ b/frontend/webapp/hooks/sources/useSourceCRUD.ts @@ -1,7 +1,7 @@ import { useCallback } from 'react'; import { useMutation } from '@apollo/client'; -import { useNotificationStore } from '@/store'; import { ACTION, getSseTargetFromId } from '@/utils'; +import { useAppStore, useNotificationStore } from '@/store'; import { PERSIST_SOURCE, UPDATE_K8S_ACTUAL_SOURCE } from '@/graphql'; import { useComputePlatform, useNamespace } from '../compute-platform'; import { OVERVIEW_ENTITY_TYPES, type WorkloadId, type PatchSourceRequestInput, type K8sActualSource, NOTIFICATION_TYPE } from '@/types'; @@ -13,6 +13,8 @@ interface Params { export const useSourceCRUD = (params?: Params) => { const removeNotifications = useNotificationStore((store) => store.removeNotifications); + const { configuredSources, setConfiguredSources } = useAppStore(); + const { persistNamespace } = useNamespace(); const { data, refetch } = useComputePlatform(); const { addNotification } = useNotificationStore(); @@ -70,6 +72,7 @@ export const useSourceCRUD = (params?: Params) => { } else { const id = { kind, name, namespace }; if (!selected) removeNotifications(getSseTargetFromId(id, OVERVIEW_ENTITY_TYPES.SOURCE)); + if (!selected) setConfiguredSources({ ...configuredSources, [namespace]: configuredSources[namespace].filter((source) => source.name !== name) }); handleComplete(action, `source "${name}" was ${action.toLowerCase()}d ${fromOrIn} "${namespace}"`, selected ? id : undefined); } }, From c1958de1e81140e31899483e614b175cf671c7cc Mon Sep 17 00:00:00 2001 From: Ben Elferink Date: Thu, 19 Dec 2024 20:08:14 +0200 Subject: [PATCH 5/9] [GEN-2042]: do not display warning when closing drawer of "unknown instrumentation rule" (#2036) This pull request includes a small but important change to the `RuleDrawer` component in the `frontend/webapp/containers/main/instrumentation-rules/rule-drawer/index.tsx` file. The change adds an additional condition to the `handleEdit` function to improve the handling of unknown rule types. * [`frontend/webapp/containers/main/instrumentation-rules/rule-drawer/index.tsx`](diffhunk://#diff-759940924dd4ee0c6895f60e21ca040542d9cf1d3f69162677c9983d62b12f8cL76-R76): Modified the `handleEdit` function to include a condition that checks if the `bool` parameter is `true` or `undefined` before adding a notification for unknown rule types. --- .../containers/main/instrumentation-rules/rule-drawer/index.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frontend/webapp/containers/main/instrumentation-rules/rule-drawer/index.tsx b/frontend/webapp/containers/main/instrumentation-rules/rule-drawer/index.tsx index 8002a49b2..cdcbfeda9 100644 --- a/frontend/webapp/containers/main/instrumentation-rules/rule-drawer/index.tsx +++ b/frontend/webapp/containers/main/instrumentation-rules/rule-drawer/index.tsx @@ -71,7 +71,7 @@ export const RuleDrawer: React.FC = () => { const { id, item } = selectedItem as { id: string; item: InstrumentationRuleSpecMapped }; const handleEdit = (bool?: boolean) => { - if (item.type === InstrumentationRuleType.UNKNOWN_TYPE) { + if (item.type === InstrumentationRuleType.UNKNOWN_TYPE && (bool || bool === undefined)) { addNotification({ type: NOTIFICATION_TYPE.WARNING, title: FORM_ALERTS.FORBIDDEN, message: FORM_ALERTS.CANNOT_EDIT_RULE, crdType: OVERVIEW_ENTITY_TYPES.RULE, target: id }); } else { setIsEditing(typeof bool === 'boolean' ? bool : true); From 221efbd40a34f2abba5a5f2477479bf7c7f6b10d Mon Sep 17 00:00:00 2001 From: Amir Blum Date: Thu, 19 Dec 2024 23:00:27 +0200 Subject: [PATCH 6/9] refactor: use instrumentationconfig in autoscaler (#2025) Part of the effort to retire instrumentedapplication This object behaves the same, so no changes in functionality is expected. Updated the RBAC roles and cleaned up unused permissions while at it (we need to do a bigger rbac swap in a different PR). Tested it to work with both cli and helm (with logs destination that triggers change in collector cm to collect logs). --- autoscaler/PROJECT | 2 +- .../controllers/datacollection/configmap.go | 12 +++++----- .../datacollection/configmap_test.go | 20 ++++++++--------- autoscaler/controllers/datacollection/root.go | 15 ++++++------- autoscaler/controllers/gateway/deployment.go | 6 ++--- .../instrumentedapplication_controller.go | 11 ++++------ autoscaler/main.go | 4 ++-- cli/cmd/resources/autoscaler.go | 22 +------------------ .../templates/autoscaler/clusterrole.yaml | 4 +--- 9 files changed, 35 insertions(+), 61 deletions(-) diff --git a/autoscaler/PROJECT b/autoscaler/PROJECT index 45667032b..7fa5618d2 100644 --- a/autoscaler/PROJECT +++ b/autoscaler/PROJECT @@ -25,7 +25,7 @@ resources: namespaced: true controller: true domain: odigos.io - kind: InstrumentedApplication + kind: InstrumentationConfig path: github.com/odigos-io/odigos/api/odigos/v1alpha1 version: v1alpha1 version: "3" diff --git a/autoscaler/controllers/datacollection/configmap.go b/autoscaler/controllers/datacollection/configmap.go index d1b318c96..f17baec96 100644 --- a/autoscaler/controllers/datacollection/configmap.go +++ b/autoscaler/controllers/datacollection/configmap.go @@ -28,7 +28,7 @@ import ( semconv "go.opentelemetry.io/otel/semconv/v1.26.0" ) -func SyncConfigMap(apps *odigosv1.InstrumentedApplicationList, dests *odigosv1.DestinationList, allProcessors *odigosv1.ProcessorList, +func SyncConfigMap(sources *odigosv1.InstrumentationConfigList, dests *odigosv1.DestinationList, allProcessors *odigosv1.ProcessorList, datacollection *odigosv1.CollectorsGroup, ctx context.Context, c client.Client, scheme *runtime.Scheme, disableNameProcessor bool) error { logger := log.FromContext(ctx) @@ -39,7 +39,7 @@ func SyncConfigMap(apps *odigosv1.InstrumentedApplicationList, dests *odigosv1.D SamplingExists := commonconf.FindFirstProcessorByType(allProcessors, "odigossampling") setTracesLoadBalancer := SamplingExists != nil - desired, err := getDesiredConfigMap(apps, dests, processors, datacollection, scheme, setTracesLoadBalancer, disableNameProcessor) + desired, err := getDesiredConfigMap(sources, dests, processors, datacollection, scheme, setTracesLoadBalancer, disableNameProcessor) if err != nil { logger.Error(err, "failed to get desired config map") return err @@ -96,9 +96,9 @@ func createConfigMap(desired *v1.ConfigMap, ctx context.Context, c client.Client return desired, nil } -func getDesiredConfigMap(apps *odigosv1.InstrumentedApplicationList, dests *odigosv1.DestinationList, processors []*odigosv1.Processor, +func getDesiredConfigMap(sources *odigosv1.InstrumentationConfigList, dests *odigosv1.DestinationList, processors []*odigosv1.Processor, datacollection *odigosv1.CollectorsGroup, scheme *runtime.Scheme, setTracesLoadBalancer bool, disableNameProcessor bool) (*v1.ConfigMap, error) { - cmData, err := calculateConfigMapData(datacollection, apps, dests, processors, setTracesLoadBalancer, disableNameProcessor) + cmData, err := calculateConfigMapData(datacollection, sources, dests, processors, setTracesLoadBalancer, disableNameProcessor) if err != nil { return nil, err } @@ -124,7 +124,7 @@ func getDesiredConfigMap(apps *odigosv1.InstrumentedApplicationList, dests *odig return &desired, nil } -func calculateConfigMapData(nodeCG *odigosv1.CollectorsGroup, apps *odigosv1.InstrumentedApplicationList, dests *odigosv1.DestinationList, processors []*odigosv1.Processor, +func calculateConfigMapData(nodeCG *odigosv1.CollectorsGroup, sources *odigosv1.InstrumentationConfigList, dests *odigosv1.DestinationList, processors []*odigosv1.Processor, setTracesLoadBalancer bool, disableNameProcessor bool) (string, error) { ownMetricsPort := nodeCG.Spec.CollectorOwnMetricsPort @@ -286,7 +286,7 @@ func calculateConfigMapData(nodeCG *odigosv1.CollectorsGroup, apps *odigosv1.Ins if collectLogs { includes := make([]string, 0) - for _, element := range apps.Items { + for _, element := range sources.Items { // Paths for log files: /var/log/pods/__//.log // Pod specifiers // Deployment: _--_ diff --git a/autoscaler/controllers/datacollection/configmap_test.go b/autoscaler/controllers/datacollection/configmap_test.go index 702e75fee..b35567fe7 100644 --- a/autoscaler/controllers/datacollection/configmap_test.go +++ b/autoscaler/controllers/datacollection/configmap_test.go @@ -63,9 +63,9 @@ func NewMockTestStatefulSet(ns *corev1.Namespace) *appsv1.StatefulSet { // givin a workload object (deployment, daemonset, statefulset) return a mock instrumented application // with a single container with the GoProgrammingLanguage -func NewMockInstrumentedApplication(workloadObject client.Object) *odigosv1.InstrumentedApplication { +func NewMockInstrumentationConfig(workloadObject client.Object) *odigosv1.InstrumentationConfig { gvk, _ := apiutil.GVKForObject(workloadObject, scheme.Scheme) - return &odigosv1.InstrumentedApplication{ + return &odigosv1.InstrumentationConfig{ ObjectMeta: metav1.ObjectMeta{ Name: workload.CalculateWorkloadRuntimeObjectName(workloadObject.GetName(), gvk.Kind), Namespace: workloadObject.GetNamespace(), @@ -79,9 +79,9 @@ func NewMockInstrumentedApplication(workloadObject client.Object) *odigosv1.Inst } } -func NewMockInstrumentedApplicationWoOwner(workloadObject client.Object) *odigosv1.InstrumentedApplication { +func NewMockInstrumentationConfigWoOwner(workloadObject client.Object) *odigosv1.InstrumentationConfig { gvk, _ := apiutil.GVKForObject(workloadObject, scheme.Scheme) - return &odigosv1.InstrumentedApplication{ + return &odigosv1.InstrumentationConfig{ ObjectMeta: metav1.ObjectMeta{ Name: workload.CalculateWorkloadRuntimeObjectName(workloadObject.GetName(), gvk.Kind), Namespace: workloadObject.GetNamespace(), @@ -118,15 +118,15 @@ func TestCalculateConfigMapData(t *testing.T) { ns := NewMockNamespace("default") ns2 := NewMockNamespace("other-namespace") - items := []v1alpha1.InstrumentedApplication{ - *NewMockInstrumentedApplication(NewMockTestDeployment(ns)), - *NewMockInstrumentedApplication(NewMockTestDaemonSet(ns)), - *NewMockInstrumentedApplication(NewMockTestStatefulSet(ns2)), - *NewMockInstrumentedApplicationWoOwner(NewMockTestDeployment(ns2)), + items := []v1alpha1.InstrumentationConfig{ + *NewMockInstrumentationConfig(NewMockTestDeployment(ns)), + *NewMockInstrumentationConfig(NewMockTestDaemonSet(ns)), + *NewMockInstrumentationConfig(NewMockTestStatefulSet(ns2)), + *NewMockInstrumentationConfigWoOwner(NewMockTestDeployment(ns2)), } got, err := calculateConfigMapData( - &v1alpha1.InstrumentedApplicationList{ + &v1alpha1.InstrumentationConfigList{ Items: items, }, NewMockDestinationList(), diff --git a/autoscaler/controllers/datacollection/root.go b/autoscaler/controllers/datacollection/root.go index 14a5dffdf..f7a3b1410 100644 --- a/autoscaler/controllers/datacollection/root.go +++ b/autoscaler/controllers/datacollection/root.go @@ -22,14 +22,13 @@ const ( func Sync(ctx context.Context, c client.Client, scheme *runtime.Scheme, imagePullSecrets []string, odigosVersion string, k8sVersion *version.Version, disableNameProcessor bool) error { logger := log.FromContext(ctx) - var instApps odigosv1.InstrumentedApplicationList - if err := c.List(ctx, &instApps); err != nil { - logger.Error(err, "Failed to list instrumented apps") + var sources odigosv1.InstrumentationConfigList + if err := c.List(ctx, &sources); err != nil { return err } - if len(instApps.Items) == 0 { - logger.V(3).Info("No instrumented apps") + if len(sources.Items) == 0 { + logger.V(3).Info("No odigos sources found, skipping data collection sync") return nil } @@ -52,16 +51,16 @@ func Sync(ctx context.Context, c client.Client, scheme *runtime.Scheme, imagePul return err } - return syncDataCollection(&instApps, &dests, &processors, &dataCollectionCollectorGroup, ctx, c, scheme, imagePullSecrets, odigosVersion, k8sVersion, disableNameProcessor) + return syncDataCollection(&sources, &dests, &processors, &dataCollectionCollectorGroup, ctx, c, scheme, imagePullSecrets, odigosVersion, k8sVersion, disableNameProcessor) } -func syncDataCollection(instApps *odigosv1.InstrumentedApplicationList, dests *odigosv1.DestinationList, processors *odigosv1.ProcessorList, +func syncDataCollection(sources *odigosv1.InstrumentationConfigList, dests *odigosv1.DestinationList, processors *odigosv1.ProcessorList, dataCollection *odigosv1.CollectorsGroup, ctx context.Context, c client.Client, scheme *runtime.Scheme, imagePullSecrets []string, odigosVersion string, k8sVersion *version.Version, disableNameProcessor bool) error { logger := log.FromContext(ctx) logger.V(0).Info("Syncing data collection") - err := SyncConfigMap(instApps, dests, processors, dataCollection, ctx, c, scheme, disableNameProcessor) + err := SyncConfigMap(sources, dests, processors, dataCollection, ctx, c, scheme, disableNameProcessor) if err != nil { logger.Error(err, "Failed to sync config map") return err diff --git a/autoscaler/controllers/gateway/deployment.go b/autoscaler/controllers/gateway/deployment.go index a89a66fd1..0fe51b29e 100644 --- a/autoscaler/controllers/gateway/deployment.go +++ b/autoscaler/controllers/gateway/deployment.go @@ -174,13 +174,13 @@ func getDesiredDeployment(dests *odigosv1.DestinationList, configDataHash string { // let the Go runtime know how many CPUs are available, // without this, Go will assume all the cores are available. - Name: "GOMAXPROCS", - ValueFrom: &corev1.EnvVarSource{ + Name: "GOMAXPROCS", + ValueFrom: &corev1.EnvVarSource{ ResourceFieldRef: &corev1.ResourceFieldSelector{ ContainerName: containerName, // limitCPU, Kubernetes automatically rounds up the value to an integer // (700m -> 1, 1200m -> 2) - Resource: "limits.cpu", + Resource: "limits.cpu", }, }, }, diff --git a/autoscaler/controllers/instrumentedapplication_controller.go b/autoscaler/controllers/instrumentedapplication_controller.go index 306198997..d669780a4 100644 --- a/autoscaler/controllers/instrumentedapplication_controller.go +++ b/autoscaler/controllers/instrumentedapplication_controller.go @@ -26,10 +26,9 @@ import ( "k8s.io/apimachinery/pkg/util/version" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/log" ) -type InstrumentedApplicationReconciler struct { +type InstrumentationConfigReconciler struct { client.Client Scheme *runtime.Scheme ImagePullSecrets []string @@ -38,9 +37,7 @@ type InstrumentedApplicationReconciler struct { DisableNameProcessor bool } -func (r *InstrumentedApplicationReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - logger := log.FromContext(ctx) - logger.V(0).Info("Reconciling InstrumentedApps") +func (r *InstrumentationConfigReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { err := datacollection.Sync(ctx, r.Client, r.Scheme, r.ImagePullSecrets, r.OdigosVersion, r.K8sVersion, r.DisableNameProcessor) if err != nil { return ctrl.Result{}, err @@ -49,9 +46,9 @@ func (r *InstrumentedApplicationReconciler) Reconcile(ctx context.Context, req c return ctrl.Result{}, nil } -func (r *InstrumentedApplicationReconciler) SetupWithManager(mgr ctrl.Manager) error { +func (r *InstrumentationConfigReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). - For(&odigosv1.InstrumentedApplication{}). + For(&odigosv1.InstrumentationConfig{}). // this controller only cares about the instrumented application existence. // when it is created or removed, the node collector config map needs to be updated to scrape logs for it's pods. WithEventFilter(&predicate.ExistencePredicate{}). diff --git a/autoscaler/main.go b/autoscaler/main.go index e804e1cb4..457ed60ec 100644 --- a/autoscaler/main.go +++ b/autoscaler/main.go @@ -224,7 +224,7 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", "CollectorsGroup") os.Exit(1) } - if err = (&controllers.InstrumentedApplicationReconciler{ + if err = (&controllers.InstrumentationConfigReconciler{ Client: mgr.GetClient(), Scheme: mgr.GetScheme(), ImagePullSecrets: imagePullSecrets, @@ -232,7 +232,7 @@ func main() { K8sVersion: k8sVersion, DisableNameProcessor: disableNameProcessor, }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "InstrumentedApplication") + setupLog.Error(err, "unable to create controller", "controller", "InstrumentationConfig") os.Exit(1) } if err = (&controllers.SecretReconciler{ diff --git a/cli/cmd/resources/autoscaler.go b/cli/cmd/resources/autoscaler.go index 506e7770c..c75fa36d4 100644 --- a/cli/cmd/resources/autoscaler.go +++ b/cli/cmd/resources/autoscaler.go @@ -218,32 +218,12 @@ func NewAutoscalerClusterRole() *rbacv1.ClusterRole { }, { Verbs: []string{ - "create", - "delete", "get", "list", - "patch", - "update", "watch", }, APIGroups: []string{"odigos.io"}, - Resources: []string{"instrumentedapplications"}, - }, - { - Verbs: []string{ - "update", - }, - APIGroups: []string{"odigos.io"}, - Resources: []string{"instrumentedapplications/finalizers"}, - }, - { - Verbs: []string{ - "get", - "patch", - "update", - }, - APIGroups: []string{"odigos.io"}, - Resources: []string{"instrumentedapplications/status"}, + Resources: []string{"instrumentationconfigs"}, }, { Verbs: []string{ "create", diff --git a/helm/odigos/templates/autoscaler/clusterrole.yaml b/helm/odigos/templates/autoscaler/clusterrole.yaml index c1f33edb9..c94fec1fc 100644 --- a/helm/odigos/templates/autoscaler/clusterrole.yaml +++ b/helm/odigos/templates/autoscaler/clusterrole.yaml @@ -35,7 +35,7 @@ rules: - apiGroups: - odigos.io resources: - - instrumentedapplications + - instrumentationconfigs - collectorsgroups - odigosconfigurations - destinations @@ -52,7 +52,6 @@ rules: - odigos.io resources: - collectorsgroups/finalizers - - instrumentedapplications/finalizers - destinations/finalizers verbs: - update @@ -60,7 +59,6 @@ rules: - odigos.io resources: - collectorsgroups/status - - instrumentedapplications/status - destinations/status verbs: - get From 9cda745cfd864c08a48628cb6ccea654e5063d32 Mon Sep 17 00:00:00 2001 From: Ron Federman <73110295+RonFed@users.noreply.github.com> Date: Fri, 20 Dec 2024 10:32:08 +0200 Subject: [PATCH 7/9] Rename process group to process details (#2032) --- instrumentation/manager.go | 74 ++++++++++++------------ instrumentation/types.go | 49 +++++++++------- odiglet/pkg/ebpf/common.go | 18 +++--- odiglet/pkg/ebpf/distribution_matcher.go | 2 +- odiglet/pkg/ebpf/reporter.go | 18 +++--- odiglet/pkg/ebpf/resolvers.go | 12 ++-- odiglet/pkg/ebpf/settings_getter.go | 6 +- 7 files changed, 92 insertions(+), 87 deletions(-) diff --git a/instrumentation/manager.go b/instrumentation/manager.go index 6da5792ee..f1444ea20 100644 --- a/instrumentation/manager.go +++ b/instrumentation/manager.go @@ -23,16 +23,16 @@ var ( // The manager will apply the configuration to all instrumentations that match the config group. type ConfigUpdate[configGroup ConfigGroup] map[configGroup]Config -type instrumentationDetails[processGroup ProcessGroup, configGroup ConfigGroup] struct { +type instrumentationDetails[processDetails ProcessDetails, configGroup ConfigGroup] struct { // we want to track the instrumentation even if it failed to load, to be able to report the error // and clean up the reporter resources once the process exits. // hence, this might be nil if the instrumentation failed to load. inst Instrumentation - pg processGroup + pd processDetails cg configGroup } -type ManagerOptions[processGroup ProcessGroup, configGroup ConfigGroup] struct { +type ManagerOptions[processDetails ProcessDetails, configGroup ConfigGroup] struct { Logger logr.Logger // Factories is a map of OTel distributions to their corresponding instrumentation factories. @@ -46,7 +46,7 @@ type ManagerOptions[processGroup ProcessGroup, configGroup ConfigGroup] struct { // based on the process event. // // The handler is also used to report the instrumentation lifecycle events. - Handler *Handler[processGroup, configGroup] + Handler *Handler[processDetails, configGroup] // DetectorOptions is a list of options to configure the process detector. // @@ -69,27 +69,27 @@ type Manager interface { Run(ctx context.Context) error } -type manager[processGroup ProcessGroup, configGroup ConfigGroup] struct { +type manager[processDetails ProcessDetails, configGroup ConfigGroup] struct { // channel for receiving process events, // used to detect new processes and process exits, and handle their instrumentation accordingly. procEvents <-chan detector.ProcessEvent detector detector.Detector - handler *Handler[processGroup, configGroup] + handler *Handler[processDetails, configGroup] factories map[OtelDistribution]Factory logger logr.Logger // all the created instrumentations by pid, // this map is not concurrent safe, so it should be accessed only from the main event loop - detailsByPid map[int]*instrumentationDetails[processGroup, configGroup] + detailsByPid map[int]*instrumentationDetails[processDetails, configGroup] // instrumentations by workload, and aggregated by pid // this map is not concurrent safe, so it should be accessed only from the main event loop - detailsByWorkload map[configGroup]map[int]*instrumentationDetails[processGroup, configGroup] + detailsByWorkload map[configGroup]map[int]*instrumentationDetails[processDetails, configGroup] configUpdates <-chan ConfigUpdate[configGroup] } -func NewManager[processGroup ProcessGroup, configGroup ConfigGroup](options ManagerOptions[processGroup, configGroup]) (Manager, error) { +func NewManager[processDetails ProcessDetails, configGroup ConfigGroup](options ManagerOptions[processDetails, configGroup]) (Manager, error) { handler := options.Handler if handler == nil { return nil, errors.New("handler is required for ebpf instrumentation manager") @@ -99,7 +99,7 @@ func NewManager[processGroup ProcessGroup, configGroup ConfigGroup](options Mana return nil, errors.New("reporter is required for ebpf instrumentation manager") } - if handler.ProcessGroupResolver == nil { + if handler.ProcessDetailsResolver == nil { return nil, errors.New("details resolver is required for ebpf instrumentation manager") } @@ -126,19 +126,19 @@ func NewManager[processGroup ProcessGroup, configGroup ConfigGroup](options Mana return nil, fmt.Errorf("failed to create process detector: %w", err) } - return &manager[processGroup, configGroup]{ + return &manager[processDetails, configGroup]{ procEvents: procEvents, detector: detector, handler: handler, factories: options.Factories, logger: logger.WithName("ebpf-instrumentation-manager"), - detailsByPid: make(map[int]*instrumentationDetails[processGroup, configGroup]), - detailsByWorkload: map[configGroup]map[int]*instrumentationDetails[processGroup, configGroup]{}, + detailsByPid: make(map[int]*instrumentationDetails[processDetails, configGroup]), + detailsByWorkload: map[configGroup]map[int]*instrumentationDetails[processDetails, configGroup]{}, configUpdates: options.ConfigUpdates, }, nil } -func (m *manager[ProcessGroup, ConfigGroup]) runEventLoop(ctx context.Context) { +func (m *manager[ProcessDetails, ConfigGroup]) runEventLoop(ctx context.Context) { // main event loop for handling instrumentations for { select { @@ -182,7 +182,7 @@ func (m *manager[ProcessGroup, ConfigGroup]) runEventLoop(ctx context.Context) { } } -func (m *manager[ProcessGroup, ConfigGroup]) Run(ctx context.Context) error { +func (m *manager[ProcessDetails, ConfigGroup]) Run(ctx context.Context) error { g, errCtx := errgroup.WithContext(ctx) g.Go(func() error { @@ -198,14 +198,14 @@ func (m *manager[ProcessGroup, ConfigGroup]) Run(ctx context.Context) error { return err } -func (m *manager[ProcessGroup, ConfigGroup]) cleanInstrumentation(ctx context.Context, pid int) { +func (m *manager[ProcessDetails, ConfigGroup]) cleanInstrumentation(ctx context.Context, pid int) { details, found := m.detailsByPid[pid] if !found { m.logger.V(3).Info("no instrumentation found for exiting pid, nothing to clean", "pid", pid) return } - m.logger.Info("cleaning instrumentation resources", "pid", pid, "process group details", details.pg) + m.logger.Info("cleaning instrumentation resources", "pid", pid, "process group details", details.pd) if details.inst != nil { err := details.inst.Close(ctx) @@ -214,7 +214,7 @@ func (m *manager[ProcessGroup, ConfigGroup]) cleanInstrumentation(ctx context.Co } } - err := m.handler.Reporter.OnExit(ctx, pid, details.pg) + err := m.handler.Reporter.OnExit(ctx, pid, details.pd) if err != nil { m.logger.Error(err, "failed to report instrumentation exit") } @@ -222,7 +222,7 @@ func (m *manager[ProcessGroup, ConfigGroup]) cleanInstrumentation(ctx context.Co m.stopTrackInstrumentation(pid) } -func (m *manager[ProcessGroup, ConfigGroup]) handleProcessExecEvent(ctx context.Context, e detector.ProcessEvent) error { +func (m *manager[ProcessDetails, ConfigGroup]) handleProcessExecEvent(ctx context.Context, e detector.ProcessEvent) error { if details, found := m.detailsByPid[e.PID]; found && details.inst != nil { // this can happen if we have multiple exec events for the same pid (chain loading) // TODO: better handle this? @@ -232,17 +232,17 @@ func (m *manager[ProcessGroup, ConfigGroup]) handleProcessExecEvent(ctx context. return nil } - pg, err := m.handler.ProcessGroupResolver.Resolve(ctx, e) + pd, err := m.handler.ProcessDetailsResolver.Resolve(ctx, e) if err != nil { return errors.Join(err, errFailedToGetDetails) } - otelDisto, err := m.handler.DistributionMatcher.Distribution(ctx, pg) + otelDisto, err := m.handler.DistributionMatcher.Distribution(ctx, pd) if err != nil { return errors.Join(err, errFailedToGetDistribution) } - configGroup, err := m.handler.ConfigGroupResolver.Resolve(ctx, pg, otelDisto) + configGroup, err := m.handler.ConfigGroupResolver.Resolve(ctx, pd, otelDisto) if err != nil { return errors.Join(err, errFailedToGetConfigGroup) } @@ -253,7 +253,7 @@ func (m *manager[ProcessGroup, ConfigGroup]) handleProcessExecEvent(ctx context. } // Fetch initial settings for the instrumentation - settings, err := m.handler.SettingsGetter.Settings(ctx, pg, otelDisto) + settings, err := m.handler.SettingsGetter.Settings(ctx, pd, otelDisto) if err != nil { // for k8s instrumentation config CR will be queried to get the settings // we should always have config for this event. @@ -269,35 +269,35 @@ func (m *manager[ProcessGroup, ConfigGroup]) handleProcessExecEvent(ctx context. inst, err := factory.CreateInstrumentation(ctx, e.PID, settings) if err != nil { m.logger.Error(err, "failed to initialize instrumentation", "language", otelDisto.Language, "sdk", otelDisto.OtelSdk) - err = m.handler.Reporter.OnInit(ctx, e.PID, err, pg) + err = m.handler.Reporter.OnInit(ctx, e.PID, err, pd) // TODO: should we return here the initialize error? or the handler error? or both? return err } loadErr := inst.Load(ctx) - reporterErr := m.handler.Reporter.OnLoad(ctx, e.PID, loadErr, pg) + reporterErr := m.handler.Reporter.OnLoad(ctx, e.PID, loadErr, pd) if reporterErr != nil { - m.logger.Error(reporterErr, "failed to report instrumentation load", "loaded", loadErr == nil, "pid", e.PID, "process group details", pg) + m.logger.Error(reporterErr, "failed to report instrumentation load", "loaded", loadErr == nil, "pid", e.PID, "process group details", pd) } if loadErr != nil { // we need to track the instrumentation even if the load failed. // consider a reporter which writes a persistent record for a failed/successful load // we need to notify the reporter once that PID exits to clean up the resources - hence we track it. // saving the inst as nil marking the instrumentation failed to load, and is not valid to run/configure/close. - m.startTrackInstrumentation(e.PID, nil, pg, configGroup) + m.startTrackInstrumentation(e.PID, nil, pd, configGroup) m.logger.Error(err, "failed to load instrumentation", "language", otelDisto.Language, "sdk", otelDisto.OtelSdk) // TODO: should we return here the load error? or the instance write error? or both? return err } - m.startTrackInstrumentation(e.PID, inst, pg, configGroup) - m.logger.Info("instrumentation loaded", "pid", e.PID, "process group details", pg) + m.startTrackInstrumentation(e.PID, inst, pd, configGroup) + m.logger.Info("instrumentation loaded", "pid", e.PID, "process group details", pd) go func() { err := inst.Run(ctx) if err != nil && !errors.Is(err, context.Canceled) { - reporterErr := m.handler.Reporter.OnRun(ctx, e.PID, err, pg) + reporterErr := m.handler.Reporter.OnRun(ctx, e.PID, err, pd) if reporterErr != nil { m.logger.Error(reporterErr, "failed to report instrumentation run") } @@ -308,23 +308,23 @@ func (m *manager[ProcessGroup, ConfigGroup]) handleProcessExecEvent(ctx context. return nil } -func (m *manager[ProcessGroup, ConfigGroup]) startTrackInstrumentation(pid int, inst Instrumentation, processGroup ProcessGroup, configGroup ConfigGroup) { - instDetails := &instrumentationDetails[ProcessGroup, ConfigGroup]{ +func (m *manager[ProcessDetails, ConfigGroup]) startTrackInstrumentation(pid int, inst Instrumentation, processDetails ProcessDetails, configGroup ConfigGroup) { + instDetails := &instrumentationDetails[ProcessDetails, ConfigGroup]{ inst: inst, - pg: processGroup, + pd: processDetails, cg: configGroup, } m.detailsByPid[pid] = instDetails if _, found := m.detailsByWorkload[configGroup]; !found { // first instrumentation for this workload - m.detailsByWorkload[configGroup] = map[int]*instrumentationDetails[ProcessGroup, ConfigGroup]{pid: instDetails} + m.detailsByWorkload[configGroup] = map[int]*instrumentationDetails[ProcessDetails, ConfigGroup]{pid: instDetails} } else { m.detailsByWorkload[configGroup][pid] = instDetails } } -func (m *manager[ProcessGroup, ConfigGroup]) stopTrackInstrumentation(pid int) { +func (m *manager[ProcessDetails, ConfigGroup]) stopTrackInstrumentation(pid int) { details, ok := m.detailsByPid[pid] if !ok { return @@ -339,7 +339,7 @@ func (m *manager[ProcessGroup, ConfigGroup]) stopTrackInstrumentation(pid int) { } } -func (m *manager[ProcessGroup, ConfigGroup]) applyInstrumentationConfigurationForSDK(ctx context.Context, configGroup ConfigGroup, config Config) error { +func (m *manager[ProcessDetails, ConfigGroup]) applyInstrumentationConfigurationForSDK(ctx context.Context, configGroup ConfigGroup, config Config) error { var err error configGroupInstrumentations, ok := m.detailsByWorkload[configGroup] @@ -351,7 +351,7 @@ func (m *manager[ProcessGroup, ConfigGroup]) applyInstrumentationConfigurationFo if instDetails.inst == nil { continue } - m.logger.Info("applying configuration to instrumentation", "process group details", instDetails.pg, "configGroup", configGroup) + m.logger.Info("applying configuration to instrumentation", "process group details", instDetails.pd, "configGroup", configGroup) applyErr := instDetails.inst.ApplyConfig(ctx, config) err = errors.Join(err, applyErr) } diff --git a/instrumentation/types.go b/instrumentation/types.go index 9b734e592..509e394af 100644 --- a/instrumentation/types.go +++ b/instrumentation/types.go @@ -17,13 +17,18 @@ type OtelDistribution struct { OtelSdk common.OtelSdk } -// ProcessGroup represents a group of processes that are managed together by the hosting platform. +// ProcessDetails is used to convert the common process details reported by the detector to details relevant to hosting platform. +// +// ProcessDetails can contain details that associates a process to a group of processes that are managed together by the hosting platform. // It may include different information depending on the platform (Kubernetes, VM, etc). // // For example consider an app which is launched by a bash script, the script launches a python process. // The process may create different child processes, and the bash script may launch multiple python processes. // In this case, the process group may include the bash script, the python process, and the child processes. -type ProcessGroup interface { +// +// Another category of information that may be included relates to language and runtime information which can be used to +// determine the OTel distribution to use. +type ProcessDetails interface { fmt.Stringer } @@ -34,60 +39,60 @@ type ConfigGroup interface { comparable } -// ProcessGroupResolver is used to resolve the process group of a process. -type ProcessGroupResolver[processGroup ProcessGroup] interface { +// ProcessDetailsResolver is used to resolve the process group of a process. +type ProcessDetailsResolver[processDetails ProcessDetails] interface { // Resolve will classify the process into a process group. // Those process group details may be used for future calls when reporting the status of the instrumentation. // or for resolving the configuration group of the process. - Resolve(context.Context, detector.ProcessEvent) (processGroup, error) + Resolve(context.Context, detector.ProcessEvent) (processDetails, error) } // ConfigGroupResolver is used to resolve the configuration group of a process. -type ConfigGroupResolver[processGroup ProcessGroup, configGroup ConfigGroup] interface { +type ConfigGroupResolver[processDetails ProcessDetails, configGroup ConfigGroup] interface { // Resolve will classify the process into a configuration group. // The Otel Distribution is resolved in the time of calling this function, and may be used // to determine the configuration group. - Resolve(context.Context, processGroup, OtelDistribution) (configGroup, error) + Resolve(context.Context, processDetails, OtelDistribution) (configGroup, error) } // Reporter is used to report the status of the instrumentation. // It is called at different stages of the instrumentation lifecycle. -type Reporter[processGroup ProcessGroup] interface { +type Reporter[processDetails ProcessDetails] interface { // OnInit is called when the instrumentation is initialized. // The error parameter will be nil if the instrumentation was initialized successfully. - OnInit(ctx context.Context, pid int, err error, pg processGroup) error + OnInit(ctx context.Context, pid int, err error, pg processDetails) error // OnLoad is called after an instrumentation is loaded successfully or failed to load. // The error parameter will be nil if the instrumentation was loaded successfully. - OnLoad(ctx context.Context, pid int, err error, pg processGroup) error + OnLoad(ctx context.Context, pid int, err error, pg processDetails) error // OnRun is called after the instrumentation stops running. // An error may report a fatal error during the instrumentation run, or a closing error // which happened during the closing of the instrumentation. - OnRun(ctx context.Context, pid int, err error, pg processGroup) error + OnRun(ctx context.Context, pid int, err error, pg processDetails) error // OnExit is called when the instrumented process exits, and the instrumentation has already been stopped. // For a reported which persists the instrumentation state, this is the time to clean up the state. - OnExit(ctx context.Context, pid int, pg processGroup) error + OnExit(ctx context.Context, pid int, pg processDetails) error } // DistributionMatcher is used to match a process to an Otel Distribution. -type DistributionMatcher[processGroup ProcessGroup] interface { +type DistributionMatcher[processDetails ProcessDetails] interface { // Distribution will match a process to an Otel Distribution. - Distribution(context.Context, processGroup) (OtelDistribution, error) + Distribution(context.Context, processDetails) (OtelDistribution, error) } // SettingsGetter is used to fetch the initial settings of an instrumentation. -type SettingsGetter[processGroup ProcessGroup] interface { +type SettingsGetter[processDetails ProcessDetails] interface { // GetSettings will fetch the initial settings of an instrumentation. - Settings(context.Context, processGroup, OtelDistribution) (Settings, error) + Settings(context.Context, processDetails, OtelDistribution) (Settings, error) } // Handler is used to classify, report and configure instrumentations. -type Handler[processGroup ProcessGroup, configGroup comparable] struct { - ProcessGroupResolver ProcessGroupResolver[processGroup] - ConfigGroupResolver ConfigGroupResolver[processGroup, configGroup] - Reporter Reporter[processGroup] - DistributionMatcher DistributionMatcher[processGroup] - SettingsGetter SettingsGetter[processGroup] +type Handler[processDetails ProcessDetails, configGroup comparable] struct { + ProcessDetailsResolver ProcessDetailsResolver[processDetails] + ConfigGroupResolver ConfigGroupResolver[processDetails, configGroup] + Reporter Reporter[processDetails] + DistributionMatcher DistributionMatcher[processDetails] + SettingsGetter SettingsGetter[processDetails] } diff --git a/odiglet/pkg/ebpf/common.go b/odiglet/pkg/ebpf/common.go index b39a390d7..887580823 100644 --- a/odiglet/pkg/ebpf/common.go +++ b/odiglet/pkg/ebpf/common.go @@ -10,7 +10,7 @@ import ( // NewManager creates a new instrumentation manager for eBPF which is configured to work with Kubernetes. func NewManager(client client.Client, logger logr.Logger, factories map[instrumentation.OtelDistribution]instrumentation.Factory, configUpdates <-chan instrumentation.ConfigUpdate[K8sConfigGroup]) (instrumentation.Manager, error) { - managerOpts := instrumentation.ManagerOptions[K8sProcessGroup, K8sConfigGroup]{ + managerOpts := instrumentation.ManagerOptions[K8sProcessDetails, K8sConfigGroup]{ Logger: logger, Factories: factories, Handler: newHandler(client), @@ -26,11 +26,11 @@ func NewManager(client client.Client, logger logr.Logger, factories map[instrume return manager, nil } -func newHandler(client client.Client) *instrumentation.Handler[K8sProcessGroup, K8sConfigGroup] { +func newHandler(client client.Client) *instrumentation.Handler[K8sProcessDetails, K8sConfigGroup] { reporter := &k8sReporter{ client: client, } - processGroupResolver := &k8sDetailsResolver{ + processDetailsResolver := &k8sDetailsResolver{ client: client, } configGroupResolver := &k8sConfigGroupResolver{} @@ -38,11 +38,11 @@ func newHandler(client client.Client) *instrumentation.Handler[K8sProcessGroup, client: client, } distributionMatcher := &podDeviceDistributionMatcher{} - return &instrumentation.Handler[K8sProcessGroup, K8sConfigGroup]{ - ProcessGroupResolver: processGroupResolver, - ConfigGroupResolver: configGroupResolver, - Reporter: reporter, - DistributionMatcher: distributionMatcher, - SettingsGetter: settingsGetter, + return &instrumentation.Handler[K8sProcessDetails, K8sConfigGroup]{ + ProcessDetailsResolver: processDetailsResolver, + ConfigGroupResolver: configGroupResolver, + Reporter: reporter, + DistributionMatcher: distributionMatcher, + SettingsGetter: settingsGetter, } } diff --git a/odiglet/pkg/ebpf/distribution_matcher.go b/odiglet/pkg/ebpf/distribution_matcher.go index 624e31e63..5b7aed12a 100644 --- a/odiglet/pkg/ebpf/distribution_matcher.go +++ b/odiglet/pkg/ebpf/distribution_matcher.go @@ -10,7 +10,7 @@ import ( type podDeviceDistributionMatcher struct{} -func (dm *podDeviceDistributionMatcher) Distribution(ctx context.Context, e K8sProcessGroup) (instrumentation.OtelDistribution, error) { +func (dm *podDeviceDistributionMatcher) Distribution(ctx context.Context, e K8sProcessDetails) (instrumentation.OtelDistribution, error) { // get the language and sdk for this process event // based on the pod spec and the container name from the process event // TODO: We should have all the required information in the process event diff --git a/odiglet/pkg/ebpf/reporter.go b/odiglet/pkg/ebpf/reporter.go index ec52647e6..7cae2f7e4 100644 --- a/odiglet/pkg/ebpf/reporter.go +++ b/odiglet/pkg/ebpf/reporter.go @@ -16,13 +16,13 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) -type K8sProcessGroup struct { +type K8sProcessDetails struct { pod *corev1.Pod containerName string pw *workload.PodWorkload } -func (kd K8sProcessGroup) String() string { +func (kd K8sProcessDetails) String() string { return fmt.Sprintf("Pod: %s.%s, Container: %s, Workload: %s", kd.pod.Name, kd.pod.Namespace, kd.containerName, @@ -30,7 +30,7 @@ func (kd K8sProcessGroup) String() string { ) } -var _ instrumentation.ProcessGroup = K8sProcessGroup{} +var _ instrumentation.ProcessDetails = K8sProcessDetails{} type k8sReporter struct { client client.Client @@ -41,7 +41,7 @@ type K8sConfigGroup struct { Lang common.ProgrammingLanguage } -var _ instrumentation.Reporter[K8sProcessGroup] = &k8sReporter{} +var _ instrumentation.Reporter[K8sProcessDetails] = &k8sReporter{} type errRequiredEnvVarNotFound struct { envVarName string @@ -75,7 +75,7 @@ const ( InstrumentationUnhealthy InstrumentationHealth = false ) -func (r *k8sReporter) OnInit(ctx context.Context, pid int, err error, e K8sProcessGroup) error { +func (r *k8sReporter) OnInit(ctx context.Context, pid int, err error, e K8sProcessDetails) error { if err == nil { // currently we don't report on successful initialization return nil @@ -84,7 +84,7 @@ func (r *k8sReporter) OnInit(ctx context.Context, pid int, err error, e K8sProce return r.updateInstrumentationInstanceStatus(ctx, e, pid, InstrumentationUnhealthy, FailedToInitialize, err.Error()) } -func (r *k8sReporter) OnLoad(ctx context.Context, pid int, err error, e K8sProcessGroup) error { +func (r *k8sReporter) OnLoad(ctx context.Context, pid int, err error, e K8sProcessDetails) error { if err != nil { return r.updateInstrumentationInstanceStatus(ctx, e, pid, InstrumentationUnhealthy, FailedToLoad, err.Error()) } @@ -93,7 +93,7 @@ func (r *k8sReporter) OnLoad(ctx context.Context, pid int, err error, e K8sProce return r.updateInstrumentationInstanceStatus(ctx, e, pid, InstrumentationHealthy, LoadedSuccessfully, msg) } -func (r *k8sReporter) OnRun(ctx context.Context, pid int, err error, e K8sProcessGroup) error { +func (r *k8sReporter) OnRun(ctx context.Context, pid int, err error, e K8sProcessDetails) error { if err == nil { // finished running successfully return nil @@ -102,7 +102,7 @@ func (r *k8sReporter) OnRun(ctx context.Context, pid int, err error, e K8sProces return r.updateInstrumentationInstanceStatus(ctx, e, pid, InstrumentationUnhealthy, FailedToRun, err.Error()) } -func (r *k8sReporter) OnExit(ctx context.Context, pid int, e K8sProcessGroup) error { +func (r *k8sReporter) OnExit(ctx context.Context, pid int, e K8sProcessDetails) error { if err := r.client.Delete(ctx, &odigosv1.InstrumentationInstance{ ObjectMeta: metav1.ObjectMeta{ Name: instance.InstrumentationInstanceName(e.pod.Name, pid), @@ -114,7 +114,7 @@ func (r *k8sReporter) OnExit(ctx context.Context, pid int, e K8sProcessGroup) er return nil } -func (r *k8sReporter) updateInstrumentationInstanceStatus(ctx context.Context, ke K8sProcessGroup, pid int, health InstrumentationHealth, reason InstrumentationStatusReason, msg string) error { +func (r *k8sReporter) updateInstrumentationInstanceStatus(ctx context.Context, ke K8sProcessDetails, pid int, health InstrumentationHealth, reason InstrumentationStatusReason, msg string) error { instrumentedAppName := workload.CalculateWorkloadRuntimeObjectName(ke.pw.Name, ke.pw.Kind) healthy := bool(health) return instance.UpdateInstrumentationInstanceStatus(ctx, ke.pod, ke.containerName, r.client, instrumentedAppName, pid, r.client.Scheme(), diff --git a/odiglet/pkg/ebpf/resolvers.go b/odiglet/pkg/ebpf/resolvers.go index efb654f9e..92f1a6372 100644 --- a/odiglet/pkg/ebpf/resolvers.go +++ b/odiglet/pkg/ebpf/resolvers.go @@ -16,23 +16,23 @@ type k8sDetailsResolver struct { client client.Client } -func (dr *k8sDetailsResolver) Resolve(ctx context.Context, event detector.ProcessEvent) (K8sProcessGroup, error) { +func (dr *k8sDetailsResolver) Resolve(ctx context.Context, event detector.ProcessEvent) (K8sProcessDetails, error) { pod, err := dr.podFromProcEvent(ctx, event) if err != nil { - return K8sProcessGroup{}, err + return K8sProcessDetails{}, err } containerName, found := containerNameFromProcEvent(event) if !found { - return K8sProcessGroup{}, errContainerNameNotReported + return K8sProcessDetails{}, errContainerNameNotReported } podWorkload, err := workload.PodWorkloadObjectOrError(ctx, pod) if err != nil { - return K8sProcessGroup{}, fmt.Errorf("failed to find workload object from pod manifest owners references: %w", err) + return K8sProcessDetails{}, fmt.Errorf("failed to find workload object from pod manifest owners references: %w", err) } - return K8sProcessGroup{ + return K8sProcessDetails{ pod: pod, containerName: containerName, pw: podWorkload, @@ -68,7 +68,7 @@ func containerNameFromProcEvent(event detector.ProcessEvent) (string, bool) { type k8sConfigGroupResolver struct{} -func (cr *k8sConfigGroupResolver) Resolve(ctx context.Context, d K8sProcessGroup, dist instrumentation.OtelDistribution) (K8sConfigGroup, error) { +func (cr *k8sConfigGroupResolver) Resolve(ctx context.Context, d K8sProcessDetails, dist instrumentation.OtelDistribution) (K8sConfigGroup, error) { if d.pw == nil { return K8sConfigGroup{}, fmt.Errorf("podWorkload is not provided, cannot resolve config group") } diff --git a/odiglet/pkg/ebpf/settings_getter.go b/odiglet/pkg/ebpf/settings_getter.go index 40c420006..a10a216ed 100644 --- a/odiglet/pkg/ebpf/settings_getter.go +++ b/odiglet/pkg/ebpf/settings_getter.go @@ -15,9 +15,9 @@ type k8sSettingsGetter struct { client client.Client } -var _ instrumentation.SettingsGetter[K8sProcessGroup] = &k8sSettingsGetter{} +var _ instrumentation.SettingsGetter[K8sProcessDetails] = &k8sSettingsGetter{} -func (ksg *k8sSettingsGetter) Settings(ctx context.Context, kd K8sProcessGroup, dist instrumentation.OtelDistribution) (instrumentation.Settings, error) { +func (ksg *k8sSettingsGetter) Settings(ctx context.Context, kd K8sProcessDetails, dist instrumentation.OtelDistribution) (instrumentation.Settings, error) { sdkConfig, serviceName, err := ksg.instrumentationSDKConfig(ctx, kd, dist) if err != nil { return instrumentation.Settings{}, err @@ -35,7 +35,7 @@ func (ksg *k8sSettingsGetter) Settings(ctx context.Context, kd K8sProcessGroup, }, nil } -func (ksg *k8sSettingsGetter) instrumentationSDKConfig(ctx context.Context, kd K8sProcessGroup, dist instrumentation.OtelDistribution) (*odigosv1.SdkConfig, string, error) { +func (ksg *k8sSettingsGetter) instrumentationSDKConfig(ctx context.Context, kd K8sProcessDetails, dist instrumentation.OtelDistribution) (*odigosv1.SdkConfig, string, error) { instrumentationConfig := odigosv1.InstrumentationConfig{} instrumentationConfigKey := client.ObjectKey{ Namespace: kd.pw.Namespace, From 6497397767027a791f998a4e2da218ec3837e4f1 Mon Sep 17 00:00:00 2001 From: Amir Blum Date: Fri, 20 Dec 2024 12:02:30 +0200 Subject: [PATCH 8/9] chore: narrow the RBAC permissions used by the autoscaler (#2039) This reduces the permissions we request to just those we actually use. Most resources are moved from cluster role to role. only resource we need in cluster level is `instrumentationconfigs` to update the datacollection configmaps with file patterns to scrape for logs. Updated the selectors in controller runtime cache. Tested several flows with CLI and helm. --- Makefile | 6 + autoscaler/main.go | 34 ++- cli/cmd/resources/autoscaler.go | 202 ++++++------------ .../templates/autoscaler/clusterrole.yaml | 109 +--------- helm/odigos/templates/autoscaler/role.yaml | 103 ++++++++- helm/odigos/templates/scheduler/role.yaml | 1 + 6 files changed, 212 insertions(+), 243 deletions(-) diff --git a/Makefile b/Makefile index 892373ba1..06f564c9d 100644 --- a/Makefile +++ b/Makefile @@ -215,6 +215,12 @@ cli-diagnose: @echo "Diagnosing cluster data for debugging" cd ./cli ; go run -tags=embed_manifests . diagnose +.PHONY: helm-install +helm-install: + @echo "Installing odigos using helm" + helm upgrade --install odigos ./helm/odigos --create-namespace --namespace odigos-system --set image.tag=$(ODIGOS_CLI_VERSION) + kubectl label namespace odigos-system odigos.io/system-object="true" + .PHONY: api-all api-all: make -C api all diff --git a/autoscaler/main.go b/autoscaler/main.go index 457ed60ec..4757f758e 100644 --- a/autoscaler/main.go +++ b/autoscaler/main.go @@ -51,7 +51,7 @@ import ( metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" apiactions "github.com/odigos-io/odigos/api/actions/v1alpha1" - observabilitycontrolplanev1 "github.com/odigos-io/odigos/api/odigos/v1alpha1" + odigosv1 "github.com/odigos-io/odigos/api/odigos/v1alpha1" "github.com/odigos-io/odigos/common" "github.com/odigos-io/odigos/autoscaler/controllers" @@ -72,7 +72,7 @@ var ( func init() { utilruntime.Must(clientgoscheme.AddToScheme(scheme)) - utilruntime.Must(observabilitycontrolplanev1.AddToScheme(scheme)) + utilruntime.Must(odigosv1.AddToScheme(scheme)) utilruntime.Must(apiactions.AddToScheme(scheme)) //+kubebuilder:scaffold:scheme } @@ -157,6 +157,36 @@ func main() { &corev1.Secret{}: { Field: nsSelector, }, + &odigosv1.CollectorsGroup{}: { + Field: nsSelector, + }, + &odigosv1.Destination{}: { + Field: nsSelector, + }, + &odigosv1.Processor{}: { + Field: nsSelector, + }, + &apiactions.AddClusterInfo{}: { + Field: nsSelector, + }, + &apiactions.DeleteAttribute{}: { + Field: nsSelector, + }, + &apiactions.ErrorSampler{}: { + Field: nsSelector, + }, + &apiactions.LatencySampler{}: { + Field: nsSelector, + }, + &apiactions.PiiMasking{}: { + Field: nsSelector, + }, + &apiactions.ProbabilisticSampler{}: { + Field: nsSelector, + }, + &apiactions.RenameAttribute{}: { + Field: nsSelector, + }, }, }, HealthProbeBindAddress: probeAddr, diff --git a/cli/cmd/resources/autoscaler.go b/cli/cmd/resources/autoscaler.go index c75fa36d4..b961c688c 100644 --- a/cli/cmd/resources/autoscaler.go +++ b/cli/cmd/resources/autoscaler.go @@ -19,11 +19,15 @@ import ( ) const ( - AutoScalerServiceAccountName = "odigos-autoscaler" - AutoScalerServiceName = "auto-scaler" - AutoScalerDeploymentName = "odigos-autoscaler" - AutoScalerAppLabelValue = "odigos-autoscaler" - AutoScalerContainerName = "manager" + AutoScalerDeploymentName = "odigos-autoscaler" + AutoScalerServiceAccountName = AutoScalerDeploymentName + AutoScalerAppLabelValue = AutoScalerDeploymentName + AutoScalerRoleName = AutoScalerDeploymentName + AutoScalerRoleBindingName = AutoScalerDeploymentName + AutoScalerClusterRoleName = AutoScalerDeploymentName + AutoScalerClusterRoleBindingName = AutoScalerDeploymentName + AutoScalerServiceName = "auto-scaler" + AutoScalerContainerName = "manager" ) func NewAutoscalerServiceAccount(ns string) *corev1.ServiceAccount { @@ -46,7 +50,7 @@ func NewAutoscalerRole(ns string) *rbacv1.Role { APIVersion: "rbac.authorization.k8s.io/v1", }, ObjectMeta: metav1.ObjectMeta{ - Name: "odigos-autoscaler", + Name: AutoScalerRoleName, Namespace: ns, }, Rules: []rbacv1.PolicyRule{ @@ -117,8 +121,8 @@ func NewAutoscalerRole(ns string) *rbacv1.Role { { Verbs: []string{ "get", - "patch", - "update", + "list", + "watch", }, APIGroups: []string{"apps"}, Resources: []string{"deployments/status"}, @@ -142,107 +146,14 @@ func NewAutoscalerRole(ns string) *rbacv1.Role { APIGroups: []string{""}, Resources: []string{"secrets"}, }, - }, - } -} - -func NewAutoscalerRoleBinding(ns string) *rbacv1.RoleBinding { - return &rbacv1.RoleBinding{ - TypeMeta: metav1.TypeMeta{ - Kind: "RoleBinding", - APIVersion: "rbac.authorization.k8s.io/v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "odigos-autoscaler", - Namespace: ns, - }, - Subjects: []rbacv1.Subject{ - { - Kind: "ServiceAccount", - Name: "odigos-autoscaler", - }, - }, - RoleRef: rbacv1.RoleRef{ - APIGroup: "rbac.authorization.k8s.io", - Kind: "Role", - Name: "odigos-autoscaler", - }, - } -} - -func NewAutoscalerClusterRole() *rbacv1.ClusterRole { - return &rbacv1.ClusterRole{ - TypeMeta: metav1.TypeMeta{ - Kind: "ClusterRole", - APIVersion: "rbac.authorization.k8s.io/v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "odigos-autoscaler", - }, - Rules: []rbacv1.PolicyRule{ - { - Verbs: []string{ - "get", - "list", - "watch", - }, - APIGroups: []string{""}, - Resources: []string{"configmaps"}, - }, { Verbs: []string{ "get", "list", "watch", }, - APIGroups: []string{""}, - Resources: []string{"services"}, - }, - { - Verbs: []string{ - "get", - "list", - "watch", - }, - APIGroups: []string{"apps"}, - Resources: []string{"daemonsets"}, - }, - { - Verbs: []string{ - "get", - "list", - "watch", - }, - APIGroups: []string{"apps"}, - Resources: []string{"deployments"}, - }, - { - Verbs: []string{ - "get", - "list", - "watch", - }, - APIGroups: []string{"odigos.io"}, - Resources: []string{"instrumentationconfigs"}, - }, { - Verbs: []string{ - "create", - "delete", - "get", - "list", - "patch", - "update", - "watch", - }, APIGroups: []string{"odigos.io"}, - Resources: []string{"collectorsgroups"}, - }, - { - Verbs: []string{ - "update", - }, - APIGroups: []string{"odigos.io"}, - Resources: []string{"collectorsgroups/finalizers"}, + Resources: []string{"destinations"}, }, { Verbs: []string{ @@ -251,20 +162,7 @@ func NewAutoscalerClusterRole() *rbacv1.ClusterRole { "update", }, APIGroups: []string{"odigos.io"}, - Resources: []string{"collectorsgroups/status"}, - }, - { - Verbs: []string{ - "create", - "delete", - "get", - "list", - "patch", - "update", - "watch", - }, - APIGroups: []string{"odigos.io"}, - Resources: []string{"destinations"}, + Resources: []string{"destinations/status"}, }, { Verbs: []string{ @@ -280,10 +178,12 @@ func NewAutoscalerClusterRole() *rbacv1.ClusterRole { }, { Verbs: []string{ - "update", + "watch", + "get", + "list", }, - APIGroups: []string{"odigos.io"}, - Resources: []string{"destinations/finalizers"}, + APIGroups: []string{"actions.odigos.io"}, + Resources: []string{"addclusterinfos", "deleteattributes", "renameattributes", "probabilisticsamplers", "piimaskings", "latencysamplers", "errorsamplers"}, }, { Verbs: []string{ @@ -291,17 +191,17 @@ func NewAutoscalerClusterRole() *rbacv1.ClusterRole { "patch", "update", }, - APIGroups: []string{"odigos.io"}, - Resources: []string{"destinations/status"}, + APIGroups: []string{"actions.odigos.io"}, + Resources: []string{"addclusterinfos/status", "deleteattributes/status", "renameattributes/status", "probabilisticsamplers/status", "piimaskings/status", "latencysamplers/status", "errorsamplers/status"}, }, { Verbs: []string{ - "watch", "get", "list", + "watch", }, - APIGroups: []string{"actions.odigos.io"}, - Resources: []string{"addclusterinfos", "deleteattributes", "renameattributes", "probabilisticsamplers", "piimaskings", "latencysamplers", "errorsamplers"}, + APIGroups: []string{"odigos.io"}, + Resources: []string{"collectorsgroups"}, }, { Verbs: []string{ @@ -309,9 +209,47 @@ func NewAutoscalerClusterRole() *rbacv1.ClusterRole { "patch", "update", }, - APIGroups: []string{"actions.odigos.io"}, - Resources: []string{"addclusterinfos/status", "deleteattributes/status", "renameattributes/status", "probabilisticsamplers/status", "piimaskings/status", "latencysamplers/status", "errorsamplers/status"}, + APIGroups: []string{"odigos.io"}, + Resources: []string{"collectorsgroups/status"}, }, + }, + } +} + +func NewAutoscalerRoleBinding(ns string) *rbacv1.RoleBinding { + return &rbacv1.RoleBinding{ + TypeMeta: metav1.TypeMeta{ + Kind: "RoleBinding", + APIVersion: "rbac.authorization.k8s.io/v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: AutoScalerRoleBindingName, + Namespace: ns, + }, + Subjects: []rbacv1.Subject{ + { + Kind: "ServiceAccount", + Name: AutoScalerServiceAccountName, + }, + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "Role", + Name: AutoScalerRoleName, + }, + } +} + +func NewAutoscalerClusterRole() *rbacv1.ClusterRole { + return &rbacv1.ClusterRole{ + TypeMeta: metav1.TypeMeta{ + Kind: "ClusterRole", + APIVersion: "rbac.authorization.k8s.io/v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: AutoScalerClusterRoleName, + }, + Rules: []rbacv1.PolicyRule{ { Verbs: []string{ "get", @@ -319,7 +257,7 @@ func NewAutoscalerClusterRole() *rbacv1.ClusterRole { "watch", }, APIGroups: []string{"odigos.io"}, - Resources: []string{"odigosconfigurations"}, + Resources: []string{"instrumentationconfigs"}, }, }, } @@ -332,19 +270,19 @@ func NewAutoscalerClusterRoleBinding(ns string) *rbacv1.ClusterRoleBinding { APIVersion: "rbac.authorization.k8s.io/v1", }, ObjectMeta: metav1.ObjectMeta{ - Name: "odigos-autoscaler", + Name: AutoScalerClusterRoleBindingName, }, Subjects: []rbacv1.Subject{ { Kind: "ServiceAccount", - Name: "odigos-autoscaler", + Name: AutoScalerServiceAccountName, Namespace: ns, }, }, RoleRef: rbacv1.RoleRef{ APIGroup: "rbac.authorization.k8s.io", Kind: "ClusterRole", - Name: "odigos-autoscaler", + Name: AutoScalerClusterRoleName, }, } } @@ -362,7 +300,7 @@ func NewAutoscalerLeaderElectionRoleBinding(ns string) *rbacv1.RoleBinding { Subjects: []rbacv1.Subject{ { Kind: "ServiceAccount", - Name: "odigos-autoscaler", + Name: AutoScalerServiceAccountName, }, }, RoleRef: rbacv1.RoleRef{ @@ -486,7 +424,7 @@ func NewAutoscalerDeployment(ns string, version string, imagePrefix string, imag }, }, TerminationGracePeriodSeconds: ptrint64(10), - ServiceAccountName: "odigos-autoscaler", + ServiceAccountName: AutoScalerServiceAccountName, SecurityContext: &corev1.PodSecurityContext{ RunAsNonRoot: ptrbool(true), }, diff --git a/helm/odigos/templates/autoscaler/clusterrole.yaml b/helm/odigos/templates/autoscaler/clusterrole.yaml index c94fec1fc..014f2c6b1 100644 --- a/helm/odigos/templates/autoscaler/clusterrole.yaml +++ b/helm/odigos/templates/autoscaler/clusterrole.yaml @@ -3,118 +3,11 @@ kind: ClusterRole metadata: name: odigos-autoscaler rules: - - apiGroups: - - "" - resources: - - services - verbs: - - get - - list - - watch - - apiGroups: - - "" - resources: - - configmaps - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - - apiGroups: - - apps - resources: - - daemonsets - - deployments - verbs: - - get - - list - - watch - apiGroups: - odigos.io resources: - instrumentationconfigs - - collectorsgroups - - odigosconfigurations - - destinations - - processors - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - - apiGroups: - - odigos.io - resources: - - collectorsgroups/finalizers - - destinations/finalizers - verbs: - - update - - apiGroups: - - odigos.io - resources: - - collectorsgroups/status - - destinations/status - verbs: - - get - - patch - - update - - apiGroups: - - actions.odigos.io - resources: - - addclusterinfos - - deleteattributes - - renameattributes - - probabilisticsamplers - - latencysamplers - - errorsamplers - - piimaskings - verbs: - - watch - - get - - list - - apiGroups: - - actions.odigos.io - resources: - - addclusterinfos/status - - deleteattributes/status - - renameattributes/status - - probabilisticsamplers/status - - latencysamplers/status - - errorsamplers/status - - piimaskings/status - verbs: - - get - - patch - - update - - apiGroups: - - "" - resources: - - configmaps - - services - verbs: - - get - - list - - watch - - apiGroups: - - apps - resources: - - daemonsets - - deployments - verbs: - - get - - list - - watch - - apiGroups: - - odigos.io - resources: - - odigosconfigurations verbs: - get - list - - watch + - watch \ No newline at end of file diff --git a/helm/odigos/templates/autoscaler/role.yaml b/helm/odigos/templates/autoscaler/role.yaml index c8f864849..623646420 100644 --- a/helm/odigos/templates/autoscaler/role.yaml +++ b/helm/odigos/templates/autoscaler/role.yaml @@ -8,6 +8,17 @@ rules: - "" resources: - configmaps + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - "" + resources: - services verbs: - create @@ -22,7 +33,6 @@ rules: - apps resources: - daemonsets - - deployments verbs: - create - delete @@ -36,6 +46,26 @@ rules: - apps resources: - daemonsets/status + verbs: + - get + - patch + - update + - apiGroups: + - apps + resources: + - deployments + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + - apiGroups: + - apps + resources: - deployments/status verbs: - get @@ -54,7 +84,78 @@ rules: - "" resources: - secrets + verbs: + - list + - watch + - get + - apiGroups: + - odigos.io + resources: + - destinations verbs: - get - list - watch + - apiGroups: + - odigos.io + resources: + - destinations/status + verbs: + - get + - patch + - update + - apiGroups: + - odigos.io + resources: + - processors + verbs: + - get + - list + - watch + - patch + - create + - update + - apiGroups: + - actions.odigos.io + resources: + - addclusterinfos + - deleteattributes + - renameattributes + - probabilisticsamplers + - piimaskings + - latencysamplers + - errorsamplers + verbs: + - watch + - get + - list + - apiGroups: + - actions.odigos.io + resources: + - addclusterinfos/status + - deleteattributes/status + - renameattributes/status + - probabilisticsamplers/status + - piimaskings/status + - latencysamplers/status + - errorsamplers/status + verbs: + - get + - patch + - update + - apiGroups: + - odigos.io + resources: + - collectorsgroups + verbs: + - get + - list + - watch + - apiGroups: + - odigos.io + resources: + - collectorsgroups/status + verbs: + - get + - patch + - update diff --git a/helm/odigos/templates/scheduler/role.yaml b/helm/odigos/templates/scheduler/role.yaml index ca14e3135..8199720ad 100644 --- a/helm/odigos/templates/scheduler/role.yaml +++ b/helm/odigos/templates/scheduler/role.yaml @@ -2,6 +2,7 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: odigos-scheduler + namespace: {{ .Release.Namespace }} rules: - apiGroups: - "" From f9b527cbb955b7796bbd7f2b0a1c1bca763a603c Mon Sep 17 00:00:00 2001 From: Ben Elferink Date: Fri, 20 Dec 2024 12:24:06 +0200 Subject: [PATCH 9/9] [GEN-1286]: Refactor configuration handling to replace deprecated dropdowns with checkboxes for Clickhouse, Gigapipe and Qryn destinations (#2033) This pull request includes several changes to the configuration handling and documentation for the Clickhouse and Qryn destinations. The main focus is on updating deprecated values and improving the handling of boolean configurations. ### Configuration Handling Improvements: * [`common/config/clickhouse.go`](diffhunk://#diff-138cb50de47f4f21f12e6bfd952391ae5cd871bc0d330bab84eb42e7c440a98bL58-R59): Replaced direct boolean conversion with a helper function `getBooleanConfig` for `create_schema` configuration. * [`common/config/qryn.go`](diffhunk://#diff-332ef1831a065da3170c8cadfdcf5274667681e4bdbd786e278780cc6f054b0dL48-R48): Updated the `ModifyConfig` function to use pre-parsed configuration values and replaced string comparisons with boolean values using `getBooleanConfig`. [[1]](diffhunk://#diff-332ef1831a065da3170c8cadfdcf5274667681e4bdbd786e278780cc6f054b0dL48-R48) [[2]](diffhunk://#diff-332ef1831a065da3170c8cadfdcf5274667681e4bdbd786e278780cc6f054b0dL58-R58) [[3]](diffhunk://#diff-332ef1831a065da3170c8cadfdcf5274667681e4bdbd786e278780cc6f054b0dL129-R130) * [`common/config/qryn_oss.go`](diffhunk://#diff-ce2141637cf39bb35c0c6becea7503ead0d028c866a8f8e2bf779a8146037615L26-L28): Deprecated "Yes/No" values in favor of "true/false" for `resourceToTelemetryConversion` and `addExporterName` configurations. * [`common/config/utils.go`](diffhunk://#diff-3a30bd3819234a1d17d8a057d80ffc71550dab2fc17687b78b40e5c6f49a5136R95-R99): Added a new utility function `getBooleanConfig` to handle deprecated boolean configuration values. ### Documentation Updates: * [`docs/backends/clickhouse.mdx`](diffhunk://#diff-f9498b059963c2f930f06518c4fced6a78b53b3dbebb4bf5afeee868cacc54a1L38-R39): Updated documentation to reflect the change from "Create/Skip" to "true/false" for the `Create Scheme` configuration. [[1]](diffhunk://#diff-f9498b059963c2f930f06518c4fced6a78b53b3dbebb4bf5afeee868cacc54a1L38-R39) [[2]](diffhunk://#diff-f9498b059963c2f930f06518c4fced6a78b53b3dbebb4bf5afeee868cacc54a1L57-R58) [[3]](diffhunk://#diff-f9498b059963c2f930f06518c4fced6a78b53b3dbebb4bf5afeee868cacc54a1L106-R106) [[4]](diffhunk://#diff-f9498b059963c2f930f06518c4fced6a78b53b3dbebb4bf5afeee868cacc54a1L143-R143) * `docs/backends/gigapipe.mdx`, `docs/backends/qryn.mdx`: Updated documentation to reflect the change from "Yes/No" to "true/false" for boolean configurations. [[1]](diffhunk://#diff-4abd168fd2d605be33e39a21b60369eab975899d6dc350120027676208823c3fL59-R60) [[2]](diffhunk://#diff-a50b66d82235440a1e549628a4d45ae127499c15609635959933e1d0eda473ccL52-R53) ### UI and Frontend Adjustments: * [`frontend/webapp/hooks/compute-platform/useComputePlatform.ts`](diffhunk://#diff-408f228c0f11c5a04d006fc956ee35e0d67cd74048af12b857d9bedf51ca87e6R48-R68): Replaced deprecated string values with boolean values for Clickhouse and Qryn configurations. * [`frontend/webapp/hooks/destinations/useDestinationCRUD.ts`](diffhunk://#diff-c7f19ca063b62568e37726473e1b8c265d3309def0d157772951023c66e055b8L78-R86): Ensured that undefined values are filtered out when creating or updating destinations. ### YAML Specification Changes: * `destinations/data/clickhouse.yaml`, `destinations/data/gigapipe.yaml`, `destinations/data/qryn.yaml`: Changed component types from dropdown to checkbox for boolean configurations and updated initial values accordingly. [[1]](diffhunk://#diff-44f264f92f35b6a7a0bd00f1f77b39e05aaf84c7d5a17ca79b3bd481f2b36c2aL23-R23) [[2]](diffhunk://#diff-44f264f92f35b6a7a0bd00f1f77b39e05aaf84c7d5a17ca79b3bd481f2b36c2aL42-R46) [[3]](diffhunk://#diff-02968da745547cb0250c5b8b23d04de88a89e7ea9a1f10916b717736595bac99L38-L51) [[4]](diffhunk://#diff-1190ae5d806c855e8bd336a40e4063d7d9b558f2b455a9b973f1a33f6a131d1aL36-L49) --- common/config/clickhouse.go | 5 ++--- common/config/qryn.go | 8 +++---- common/config/qryn_oss.go | 14 +++++++++++-- common/config/utils.go | 5 +++++ destinations/data/clickhouse.yaml | 9 +++----- destinations/data/gigapipe.yaml | 10 ++------- destinations/data/qryn.yaml | 10 ++------- docs/backends/clickhouse.mdx | 12 +++++------ docs/backends/gigapipe.mdx | 4 ++-- docs/backends/qryn.mdx | 4 ++-- .../compute-platform/useComputePlatform.ts | 21 +++++++++++++++++++ .../hooks/destinations/useDestinationCRUD.ts | 12 ++++++++--- 12 files changed, 70 insertions(+), 44 deletions(-) diff --git a/common/config/clickhouse.go b/common/config/clickhouse.go index 3ed3ad5f9..32d591c28 100644 --- a/common/config/clickhouse.go +++ b/common/config/clickhouse.go @@ -55,9 +55,8 @@ func (c *Clickhouse) ModifyConfig(dest ExporterConfigurer, currentConfig *Config exporterConfig["password"] = clickhousePassword } - createSchema, exists := dest.GetConfig()[clickhouseCreateSchema] - createSchemaBoolValue := exists && strings.ToLower(createSchema) == "create" - exporterConfig["create_schema"] = createSchemaBoolValue + createSchema := dest.GetConfig()[clickhouseCreateSchema] + exporterConfig["create_schema"] = getBooleanConfig(createSchema, "create") dbName, exists := dest.GetConfig()[clickhouseDatabaseName] if !exists { diff --git a/common/config/qryn.go b/common/config/qryn.go index 8386110ec..4de118726 100644 --- a/common/config/qryn.go +++ b/common/config/qryn.go @@ -45,7 +45,7 @@ func (g *Qryn) ModifyConfig(dest ExporterConfigurer, currentConfig *Config) erro if conf.passwordFieldName != "" { passwordPlaceholder = "${" + conf.passwordFieldName + "}" } - baseURL, err := parseURL(dest.GetConfig()[qrynHost], conf.key, passwordPlaceholder) + baseURL, err := parseURL(conf.host, conf.key, passwordPlaceholder) if err != nil { return errors.Join(err, errors.New("invalid qryn endpoint. gateway will not be configured with qryn")) } @@ -55,7 +55,7 @@ func (g *Qryn) ModifyConfig(dest ExporterConfigurer, currentConfig *Config) erro currentConfig.Exporters[rwExporterName] = GenericMap{ "endpoint": fmt.Sprintf("%s/api/v1/prom/remote/write", baseURL), "resource_to_telemetry_conversion": GenericMap{ - "enabled": dest.GetConfig()[resourceToTelemetryConversion] == "Yes", + "enabled": conf.resourceToTelemetryConversion, }, } metricsPipelineName := "metrics/qryn-" + dest.GetID() @@ -126,8 +126,8 @@ func (g *Qryn) getConfigs(dest ExporterConfigurer) qrynConf { return qrynConf{ host: dest.GetConfig()[qrynHost], key: dest.GetConfig()[qrynAPIKey], - addExporterName: dest.GetConfig()[qrynAddExporterName] == "Yes", - resourceToTelemetryConversion: dest.GetConfig()[resourceToTelemetryConversion] == "Yes", + addExporterName: getBooleanConfig(dest.GetConfig()[qrynAddExporterName], "Yes"), + resourceToTelemetryConversion: getBooleanConfig(dest.GetConfig()[resourceToTelemetryConversion], "Yes"), secretsOptional: dest.GetConfig()[qrynSecretsOptional] == "1", passwordFieldName: dest.GetConfig()[qrynPasswordFieldName], } diff --git a/common/config/qryn_oss.go b/common/config/qryn_oss.go index b09b306d5..2eada2dca 100644 --- a/common/config/qryn_oss.go +++ b/common/config/qryn_oss.go @@ -23,9 +23,19 @@ func (d QrynOssDest) GetConfig() map[string]string { conf := d.ExporterConfigurer.GetConfig() conf[qrynHost] = conf[qrynOssHost] conf[qrynAPIKey] = conf[qrynOssUsername] - conf[resourceToTelemetryConversion] = conf[qrynOssresourceToTelemetryConversion] + // Yes/No are deperecated, use true/false + if conf[qrynOssresourceToTelemetryConversion] == "true" || conf[qrynOssresourceToTelemetryConversion] == "Yes" { + conf[resourceToTelemetryConversion] = "true" + } else { + conf[resourceToTelemetryConversion] = "false" + } + // Yes/No are deperecated, use true/false + if conf[qrynOssAddExporterName] == "true" || conf[qrynOssAddExporterName] == "Yes" { + conf[qrynAddExporterName] = "true" + } else { + conf[qrynAddExporterName] = "false" + } conf[qrynSecretsOptional] = "1" - conf[qrynAddExporterName] = conf[qrynOssAddExporterName] conf[qrynPasswordFieldName] = "QRYN_OSS_PASSWORD" return conf } diff --git a/common/config/utils.go b/common/config/utils.go index c0d8aaac9..f9173bb46 100644 --- a/common/config/utils.go +++ b/common/config/utils.go @@ -92,3 +92,8 @@ func urlHostContainsPort(host string) bool { return strings.Contains(host, ":") } } + +func getBooleanConfig(currentValue string, deprecatedValue string) bool { + lowerCaseValue := strings.ToLower(currentValue) + return lowerCaseValue == "true" || lowerCaseValue == deprecatedValue +} diff --git a/destinations/data/clickhouse.yaml b/destinations/data/clickhouse.yaml index 26f9f565a..97158e01d 100644 --- a/destinations/data/clickhouse.yaml +++ b/destinations/data/clickhouse.yaml @@ -20,7 +20,7 @@ spec: componentProps: type: text required: true - placeholder: "http://host:port" + placeholder: 'http://host:port' tooltip: 'Clickhouse server address' - name: CLICKHOUSE_USERNAME displayName: Username @@ -39,14 +39,11 @@ spec: tooltip: 'If Clickhouse Authentication is used, provide the password' - name: CLICKHOUSE_CREATE_SCHEME displayName: Create Scheme - componentType: dropdown + componentType: checkbox componentProps: - values: - - Create - - Skip required: true tooltip: 'Should the destination create the schema for you?' - initialValue: Create + initialValue: true - name: CLICKHOUSE_DATABASE_NAME displayName: Database Name componentType: input diff --git a/destinations/data/gigapipe.yaml b/destinations/data/gigapipe.yaml index 7b71600ba..7c52e3d0f 100644 --- a/destinations/data/gigapipe.yaml +++ b/destinations/data/gigapipe.yaml @@ -35,19 +35,13 @@ spec: required: true - name: QRYN_RESOURCE_TO_TELEMETRY_CONVERSION displayName: Convert container attributes to labels - componentType: dropdown + componentType: checkbox componentProps: - values: - - "Yes" - - "No" required: false initialValue: Yes - name: QRYN_ADD_EXPORTER_NAME displayName: Add exporter name to labels - componentType: dropdown + componentType: checkbox componentProps: - values: - - "Yes" - - "No" required: false initialValue: Yes diff --git a/destinations/data/qryn.yaml b/destinations/data/qryn.yaml index 0397d4d61..93ac8135c 100644 --- a/destinations/data/qryn.yaml +++ b/destinations/data/qryn.yaml @@ -33,19 +33,13 @@ spec: type: text - name: QRYN_OSS_RESOURCE_TO_TELEMETRY_CONVERSION displayName: Convert container attributes to labels - componentType: dropdown + componentType: checkbox componentProps: - values: - - "Yes" - - "No" required: false initialValue: Yes - name: QRYN_OSS_ADD_EXPORTER_NAME displayName: Add exporter name to labels - componentType: dropdown + componentType: checkbox componentProps: - values: - - "Yes" - - "No" required: false initialValue: Yes diff --git a/docs/backends/clickhouse.mdx b/docs/backends/clickhouse.mdx index fb6e91e8c..cf89a980d 100644 --- a/docs/backends/clickhouse.mdx +++ b/docs/backends/clickhouse.mdx @@ -35,8 +35,8 @@ The benefit of this option is that you can see the value fast, without needing t The downside is that the schema may not be optimized for your specific use case, and may make changes more complicated. To use it: -- Odigos UI - When adding a new ClickHouse destination, select the `Create` Option under the `Create Scheme` field. -- Destination K8s Manifest - Set the `CLICKHOUSE_CREATE_SCHEME` setting to value `Create`. +- Odigos UI - When adding a new ClickHouse destination, select the `Create Scheme` checkbox field. +- Destination K8s Manifest - Set the `CLICKHOUSE_CREATE_SCHEME` setting to value `true`. The schema which will be used by default can be found [here](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter/clickhouseexporter/example/default_ddl). @@ -54,8 +54,8 @@ This option is not recommended for production workloads: With this option, you are responsible for creating and managing the schema yourself. To use it: -- Odigos UI - In `Create Scheme` field, select the the `Skip` Option. -- Destination K8s Manifest - Set the `CLICKHOUSE_CREATE_SCHEME` setting to value `Skip`. +- Odigos UI - Unselect the `Create Scheme` checkbox field. +- Destination K8s Manifest - Set the `CLICKHOUSE_CREATE_SCHEME` setting to value `false`. The benefit of this option is that you have full control over the schema, and can optimize it for your specific use case. @@ -103,7 +103,7 @@ These are optional, keep empty if your ClickHouse server does not require authen ### Schema -- Create Schema - Set to `Skip` if you manage your own schema, or `Create` to have Odigos create the schema for you. See [Create Schema](#create-schema) for more details. +- Create Schema - Set to `false` if you manage your own schema, or `true` to have Odigos create the schema for you. See [Create Schema](#create-schema) for more details. - Database Name (Required) - The name of the Clickhouse Database where the telemetry data will be stored. The default is `otel`. The Database will not be created when not exists, so make sure you have created it before. - Table Names - Allows you to customize the names of the tables where the telemetry data will be stored. The default is `otel_traces` for traces and `otel_metrics` for metrics. @@ -140,7 +140,7 @@ metadata: namespace: odigos-system spec: data: - CLICKHOUSE_CREATE_SCHEME: + CLICKHOUSE_CREATE_SCHEME: # CLICKHOUSE_USERNAME: # Note: The commented fields above are optional. CLICKHOUSE_DATABASE_NAME: diff --git a/docs/backends/gigapipe.mdx b/docs/backends/gigapipe.mdx index fb9f9d203..ec9215049 100644 --- a/docs/backends/gigapipe.mdx +++ b/docs/backends/gigapipe.mdx @@ -56,8 +56,8 @@ metadata: spec: data: QRYN_API_KEY: - # QRYN_RESOURCE_TO_TELEMETRY_CONVERSION: - # QRYN_ADD_EXPORTER_NAME: + # QRYN_RESOURCE_TO_TELEMETRY_CONVERSION: + # QRYN_ADD_EXPORTER_NAME: # Note: The commented fields above are optional. QRYN_URL: destinationName: qryn diff --git a/docs/backends/qryn.mdx b/docs/backends/qryn.mdx index 1a7a31080..6e80ce270 100644 --- a/docs/backends/qryn.mdx +++ b/docs/backends/qryn.mdx @@ -49,8 +49,8 @@ spec: data: QRYN_OSS_URL: # QRYN_OSS_USERNAME: - # QRYN_OSS_RESOURCE_TO_TELEMETRY_CONVERSION: - # QRYN_OSS_ADD_EXPORTER_NAME: + # QRYN_OSS_RESOURCE_TO_TELEMETRY_CONVERSION: + # QRYN_OSS_ADD_EXPORTER_NAME: # Note: The commented fields above are optional. destinationName: qryn-oss # Uncomment the secretRef below if you are using the optional Secret. diff --git a/frontend/webapp/hooks/compute-platform/useComputePlatform.ts b/frontend/webapp/hooks/compute-platform/useComputePlatform.ts index 0253168f6..da943fe0c 100644 --- a/frontend/webapp/hooks/compute-platform/useComputePlatform.ts +++ b/frontend/webapp/hooks/compute-platform/useComputePlatform.ts @@ -45,6 +45,27 @@ export const useComputePlatform = (): UseComputePlatformHook => { return { ...item, type }; }), + destinations: data.computePlatform.destinations.map((item) => { + // Replace deprecated string values, with boolean values + const fields = + item.destinationType.type === 'clickhouse' + ? item.fields.replace('"CLICKHOUSE_CREATE_SCHEME":"Create"', '"CLICKHOUSE_CREATE_SCHEME":"true"').replace('"CLICKHOUSE_CREATE_SCHEME":"Skip"', '"CLICKHOUSE_CREATE_SCHEME":"false"') + : item.destinationType.type === 'qryn' + ? item.fields + .replace('"QRYN_ADD_EXPORTER_NAME":"Yes"', '"QRYN_ADD_EXPORTER_NAME":"true"') + .replace('"QRYN_ADD_EXPORTER_NAME":"No"', '"QRYN_ADD_EXPORTER_NAME":"false"') + .replace('"QRYN_RESOURCE_TO_TELEMETRY_CONVERSION":"Yes"', '"QRYN_RESOURCE_TO_TELEMETRY_CONVERSION":"true"') + .replace('"QRYN_RESOURCE_TO_TELEMETRY_CONVERSION":"No"', '"QRYN_RESOURCE_TO_TELEMETRY_CONVERSION":"false"') + : item.destinationType.type === 'qryn-oss' + ? item.fields + .replace('"QRYN_OSS_ADD_EXPORTER_NAME":"Yes"', '"QRYN_OSS_ADD_EXPORTER_NAME":"true"') + .replace('"QRYN_OSS_ADD_EXPORTER_NAME":"No"', '"QRYN_OSS_ADD_EXPORTER_NAME":"false"') + .replace('"QRYN_OSS_RESOURCE_TO_TELEMETRY_CONVERSION":"Yes"', '"QRYN_OSS_RESOURCE_TO_TELEMETRY_CONVERSION":"true"') + .replace('"QRYN_OSS_RESOURCE_TO_TELEMETRY_CONVERSION":"No"', '"QRYN_OSS_RESOURCE_TO_TELEMETRY_CONVERSION":"false"') + : item.fields; + + return { ...item, fields }; + }), }, }; }, [data]); diff --git a/frontend/webapp/hooks/destinations/useDestinationCRUD.ts b/frontend/webapp/hooks/destinations/useDestinationCRUD.ts index cc52de26d..4870b6617 100644 --- a/frontend/webapp/hooks/destinations/useDestinationCRUD.ts +++ b/frontend/webapp/hooks/destinations/useDestinationCRUD.ts @@ -75,8 +75,14 @@ export const useDestinationCRUD = (params?: Params) => { loading: cState.loading || uState.loading || dState.loading, destinations: data?.computePlatform.destinations || [], - createDestination: (destination: DestinationInput) => createDestination({ variables: { destination: { ...destination, fields: destination.fields.filter(({ value }) => value !== undefined) } } }), - updateDestination: (id: string, destination: DestinationInput) => updateDestination({ variables: { id, destination } }), - deleteDestination: (id: string) => deleteDestination({ variables: { id } }), + createDestination: (destination: DestinationInput) => { + createDestination({ variables: { destination: { ...destination, fields: destination.fields.filter(({ value }) => value !== undefined) } } }); + }, + updateDestination: (id: string, destination: DestinationInput) => { + updateDestination({ variables: { id, destination: { ...destination, fields: destination.fields.filter(({ value }) => value !== undefined) } } }); + }, + deleteDestination: (id: string) => { + deleteDestination({ variables: { id } }); + }, }; };