diff --git a/Makefile b/Makefile index 892373ba1..06f564c9d 100644 --- a/Makefile +++ b/Makefile @@ -215,6 +215,12 @@ cli-diagnose: @echo "Diagnosing cluster data for debugging" cd ./cli ; go run -tags=embed_manifests . diagnose +.PHONY: helm-install +helm-install: + @echo "Installing odigos using helm" + helm upgrade --install odigos ./helm/odigos --create-namespace --namespace odigos-system --set image.tag=$(ODIGOS_CLI_VERSION) + kubectl label namespace odigos-system odigos.io/system-object="true" + .PHONY: api-all api-all: make -C api all diff --git a/autoscaler/PROJECT b/autoscaler/PROJECT index 45667032b..7fa5618d2 100644 --- a/autoscaler/PROJECT +++ b/autoscaler/PROJECT @@ -25,7 +25,7 @@ resources: namespaced: true controller: true domain: odigos.io - kind: InstrumentedApplication + kind: InstrumentationConfig path: github.com/odigos-io/odigos/api/odigos/v1alpha1 version: v1alpha1 version: "3" diff --git a/autoscaler/controllers/datacollection/configmap.go b/autoscaler/controllers/datacollection/configmap.go index d1b318c96..f17baec96 100644 --- a/autoscaler/controllers/datacollection/configmap.go +++ b/autoscaler/controllers/datacollection/configmap.go @@ -28,7 +28,7 @@ import ( semconv "go.opentelemetry.io/otel/semconv/v1.26.0" ) -func SyncConfigMap(apps *odigosv1.InstrumentedApplicationList, dests *odigosv1.DestinationList, allProcessors *odigosv1.ProcessorList, +func SyncConfigMap(sources *odigosv1.InstrumentationConfigList, dests *odigosv1.DestinationList, allProcessors *odigosv1.ProcessorList, datacollection *odigosv1.CollectorsGroup, ctx context.Context, c client.Client, scheme *runtime.Scheme, disableNameProcessor bool) error { logger := log.FromContext(ctx) @@ -39,7 +39,7 @@ func SyncConfigMap(apps *odigosv1.InstrumentedApplicationList, dests *odigosv1.D SamplingExists := commonconf.FindFirstProcessorByType(allProcessors, "odigossampling") setTracesLoadBalancer := SamplingExists != nil - desired, err := getDesiredConfigMap(apps, dests, processors, datacollection, scheme, setTracesLoadBalancer, disableNameProcessor) + desired, err := getDesiredConfigMap(sources, dests, processors, datacollection, scheme, setTracesLoadBalancer, disableNameProcessor) if err != nil { logger.Error(err, "failed to get desired config map") return err @@ -96,9 +96,9 @@ func createConfigMap(desired *v1.ConfigMap, ctx context.Context, c client.Client return desired, nil } -func getDesiredConfigMap(apps *odigosv1.InstrumentedApplicationList, dests *odigosv1.DestinationList, processors []*odigosv1.Processor, +func getDesiredConfigMap(sources *odigosv1.InstrumentationConfigList, dests *odigosv1.DestinationList, processors []*odigosv1.Processor, datacollection *odigosv1.CollectorsGroup, scheme *runtime.Scheme, setTracesLoadBalancer bool, disableNameProcessor bool) (*v1.ConfigMap, error) { - cmData, err := calculateConfigMapData(datacollection, apps, dests, processors, setTracesLoadBalancer, disableNameProcessor) + cmData, err := calculateConfigMapData(datacollection, sources, dests, processors, setTracesLoadBalancer, disableNameProcessor) if err != nil { return nil, err } @@ -124,7 +124,7 @@ func getDesiredConfigMap(apps *odigosv1.InstrumentedApplicationList, dests *odig return &desired, nil } -func calculateConfigMapData(nodeCG *odigosv1.CollectorsGroup, apps *odigosv1.InstrumentedApplicationList, dests *odigosv1.DestinationList, processors []*odigosv1.Processor, +func calculateConfigMapData(nodeCG *odigosv1.CollectorsGroup, sources *odigosv1.InstrumentationConfigList, dests *odigosv1.DestinationList, processors []*odigosv1.Processor, setTracesLoadBalancer bool, disableNameProcessor bool) (string, error) { ownMetricsPort := nodeCG.Spec.CollectorOwnMetricsPort @@ -286,7 +286,7 @@ func calculateConfigMapData(nodeCG *odigosv1.CollectorsGroup, apps *odigosv1.Ins if collectLogs { includes := make([]string, 0) - for _, element := range apps.Items { + for _, element := range sources.Items { // Paths for log files: /var/log/pods/__//.log // Pod specifiers // Deployment: _--_ diff --git a/autoscaler/controllers/datacollection/configmap_test.go b/autoscaler/controllers/datacollection/configmap_test.go index 702e75fee..b35567fe7 100644 --- a/autoscaler/controllers/datacollection/configmap_test.go +++ b/autoscaler/controllers/datacollection/configmap_test.go @@ -63,9 +63,9 @@ func NewMockTestStatefulSet(ns *corev1.Namespace) *appsv1.StatefulSet { // givin a workload object (deployment, daemonset, statefulset) return a mock instrumented application // with a single container with the GoProgrammingLanguage -func NewMockInstrumentedApplication(workloadObject client.Object) *odigosv1.InstrumentedApplication { +func NewMockInstrumentationConfig(workloadObject client.Object) *odigosv1.InstrumentationConfig { gvk, _ := apiutil.GVKForObject(workloadObject, scheme.Scheme) - return &odigosv1.InstrumentedApplication{ + return &odigosv1.InstrumentationConfig{ ObjectMeta: metav1.ObjectMeta{ Name: workload.CalculateWorkloadRuntimeObjectName(workloadObject.GetName(), gvk.Kind), Namespace: workloadObject.GetNamespace(), @@ -79,9 +79,9 @@ func NewMockInstrumentedApplication(workloadObject client.Object) *odigosv1.Inst } } -func NewMockInstrumentedApplicationWoOwner(workloadObject client.Object) *odigosv1.InstrumentedApplication { +func NewMockInstrumentationConfigWoOwner(workloadObject client.Object) *odigosv1.InstrumentationConfig { gvk, _ := apiutil.GVKForObject(workloadObject, scheme.Scheme) - return &odigosv1.InstrumentedApplication{ + return &odigosv1.InstrumentationConfig{ ObjectMeta: metav1.ObjectMeta{ Name: workload.CalculateWorkloadRuntimeObjectName(workloadObject.GetName(), gvk.Kind), Namespace: workloadObject.GetNamespace(), @@ -118,15 +118,15 @@ func TestCalculateConfigMapData(t *testing.T) { ns := NewMockNamespace("default") ns2 := NewMockNamespace("other-namespace") - items := []v1alpha1.InstrumentedApplication{ - *NewMockInstrumentedApplication(NewMockTestDeployment(ns)), - *NewMockInstrumentedApplication(NewMockTestDaemonSet(ns)), - *NewMockInstrumentedApplication(NewMockTestStatefulSet(ns2)), - *NewMockInstrumentedApplicationWoOwner(NewMockTestDeployment(ns2)), + items := []v1alpha1.InstrumentationConfig{ + *NewMockInstrumentationConfig(NewMockTestDeployment(ns)), + *NewMockInstrumentationConfig(NewMockTestDaemonSet(ns)), + *NewMockInstrumentationConfig(NewMockTestStatefulSet(ns2)), + *NewMockInstrumentationConfigWoOwner(NewMockTestDeployment(ns2)), } got, err := calculateConfigMapData( - &v1alpha1.InstrumentedApplicationList{ + &v1alpha1.InstrumentationConfigList{ Items: items, }, NewMockDestinationList(), diff --git a/autoscaler/controllers/datacollection/root.go b/autoscaler/controllers/datacollection/root.go index 14a5dffdf..f7a3b1410 100644 --- a/autoscaler/controllers/datacollection/root.go +++ b/autoscaler/controllers/datacollection/root.go @@ -22,14 +22,13 @@ const ( func Sync(ctx context.Context, c client.Client, scheme *runtime.Scheme, imagePullSecrets []string, odigosVersion string, k8sVersion *version.Version, disableNameProcessor bool) error { logger := log.FromContext(ctx) - var instApps odigosv1.InstrumentedApplicationList - if err := c.List(ctx, &instApps); err != nil { - logger.Error(err, "Failed to list instrumented apps") + var sources odigosv1.InstrumentationConfigList + if err := c.List(ctx, &sources); err != nil { return err } - if len(instApps.Items) == 0 { - logger.V(3).Info("No instrumented apps") + if len(sources.Items) == 0 { + logger.V(3).Info("No odigos sources found, skipping data collection sync") return nil } @@ -52,16 +51,16 @@ func Sync(ctx context.Context, c client.Client, scheme *runtime.Scheme, imagePul return err } - return syncDataCollection(&instApps, &dests, &processors, &dataCollectionCollectorGroup, ctx, c, scheme, imagePullSecrets, odigosVersion, k8sVersion, disableNameProcessor) + return syncDataCollection(&sources, &dests, &processors, &dataCollectionCollectorGroup, ctx, c, scheme, imagePullSecrets, odigosVersion, k8sVersion, disableNameProcessor) } -func syncDataCollection(instApps *odigosv1.InstrumentedApplicationList, dests *odigosv1.DestinationList, processors *odigosv1.ProcessorList, +func syncDataCollection(sources *odigosv1.InstrumentationConfigList, dests *odigosv1.DestinationList, processors *odigosv1.ProcessorList, dataCollection *odigosv1.CollectorsGroup, ctx context.Context, c client.Client, scheme *runtime.Scheme, imagePullSecrets []string, odigosVersion string, k8sVersion *version.Version, disableNameProcessor bool) error { logger := log.FromContext(ctx) logger.V(0).Info("Syncing data collection") - err := SyncConfigMap(instApps, dests, processors, dataCollection, ctx, c, scheme, disableNameProcessor) + err := SyncConfigMap(sources, dests, processors, dataCollection, ctx, c, scheme, disableNameProcessor) if err != nil { logger.Error(err, "Failed to sync config map") return err diff --git a/autoscaler/controllers/gateway/deployment.go b/autoscaler/controllers/gateway/deployment.go index a89a66fd1..0fe51b29e 100644 --- a/autoscaler/controllers/gateway/deployment.go +++ b/autoscaler/controllers/gateway/deployment.go @@ -174,13 +174,13 @@ func getDesiredDeployment(dests *odigosv1.DestinationList, configDataHash string { // let the Go runtime know how many CPUs are available, // without this, Go will assume all the cores are available. - Name: "GOMAXPROCS", - ValueFrom: &corev1.EnvVarSource{ + Name: "GOMAXPROCS", + ValueFrom: &corev1.EnvVarSource{ ResourceFieldRef: &corev1.ResourceFieldSelector{ ContainerName: containerName, // limitCPU, Kubernetes automatically rounds up the value to an integer // (700m -> 1, 1200m -> 2) - Resource: "limits.cpu", + Resource: "limits.cpu", }, }, }, diff --git a/autoscaler/controllers/instrumentedapplication_controller.go b/autoscaler/controllers/instrumentedapplication_controller.go index 306198997..d669780a4 100644 --- a/autoscaler/controllers/instrumentedapplication_controller.go +++ b/autoscaler/controllers/instrumentedapplication_controller.go @@ -26,10 +26,9 @@ import ( "k8s.io/apimachinery/pkg/util/version" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/log" ) -type InstrumentedApplicationReconciler struct { +type InstrumentationConfigReconciler struct { client.Client Scheme *runtime.Scheme ImagePullSecrets []string @@ -38,9 +37,7 @@ type InstrumentedApplicationReconciler struct { DisableNameProcessor bool } -func (r *InstrumentedApplicationReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - logger := log.FromContext(ctx) - logger.V(0).Info("Reconciling InstrumentedApps") +func (r *InstrumentationConfigReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { err := datacollection.Sync(ctx, r.Client, r.Scheme, r.ImagePullSecrets, r.OdigosVersion, r.K8sVersion, r.DisableNameProcessor) if err != nil { return ctrl.Result{}, err @@ -49,9 +46,9 @@ func (r *InstrumentedApplicationReconciler) Reconcile(ctx context.Context, req c return ctrl.Result{}, nil } -func (r *InstrumentedApplicationReconciler) SetupWithManager(mgr ctrl.Manager) error { +func (r *InstrumentationConfigReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). - For(&odigosv1.InstrumentedApplication{}). + For(&odigosv1.InstrumentationConfig{}). // this controller only cares about the instrumented application existence. // when it is created or removed, the node collector config map needs to be updated to scrape logs for it's pods. WithEventFilter(&predicate.ExistencePredicate{}). diff --git a/autoscaler/main.go b/autoscaler/main.go index e804e1cb4..4757f758e 100644 --- a/autoscaler/main.go +++ b/autoscaler/main.go @@ -51,7 +51,7 @@ import ( metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" apiactions "github.com/odigos-io/odigos/api/actions/v1alpha1" - observabilitycontrolplanev1 "github.com/odigos-io/odigos/api/odigos/v1alpha1" + odigosv1 "github.com/odigos-io/odigos/api/odigos/v1alpha1" "github.com/odigos-io/odigos/common" "github.com/odigos-io/odigos/autoscaler/controllers" @@ -72,7 +72,7 @@ var ( func init() { utilruntime.Must(clientgoscheme.AddToScheme(scheme)) - utilruntime.Must(observabilitycontrolplanev1.AddToScheme(scheme)) + utilruntime.Must(odigosv1.AddToScheme(scheme)) utilruntime.Must(apiactions.AddToScheme(scheme)) //+kubebuilder:scaffold:scheme } @@ -157,6 +157,36 @@ func main() { &corev1.Secret{}: { Field: nsSelector, }, + &odigosv1.CollectorsGroup{}: { + Field: nsSelector, + }, + &odigosv1.Destination{}: { + Field: nsSelector, + }, + &odigosv1.Processor{}: { + Field: nsSelector, + }, + &apiactions.AddClusterInfo{}: { + Field: nsSelector, + }, + &apiactions.DeleteAttribute{}: { + Field: nsSelector, + }, + &apiactions.ErrorSampler{}: { + Field: nsSelector, + }, + &apiactions.LatencySampler{}: { + Field: nsSelector, + }, + &apiactions.PiiMasking{}: { + Field: nsSelector, + }, + &apiactions.ProbabilisticSampler{}: { + Field: nsSelector, + }, + &apiactions.RenameAttribute{}: { + Field: nsSelector, + }, }, }, HealthProbeBindAddress: probeAddr, @@ -224,7 +254,7 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", "CollectorsGroup") os.Exit(1) } - if err = (&controllers.InstrumentedApplicationReconciler{ + if err = (&controllers.InstrumentationConfigReconciler{ Client: mgr.GetClient(), Scheme: mgr.GetScheme(), ImagePullSecrets: imagePullSecrets, @@ -232,7 +262,7 @@ func main() { K8sVersion: k8sVersion, DisableNameProcessor: disableNameProcessor, }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "InstrumentedApplication") + setupLog.Error(err, "unable to create controller", "controller", "InstrumentationConfig") os.Exit(1) } if err = (&controllers.SecretReconciler{ diff --git a/cli/cmd/resources/autoscaler.go b/cli/cmd/resources/autoscaler.go index 506e7770c..b961c688c 100644 --- a/cli/cmd/resources/autoscaler.go +++ b/cli/cmd/resources/autoscaler.go @@ -19,11 +19,15 @@ import ( ) const ( - AutoScalerServiceAccountName = "odigos-autoscaler" - AutoScalerServiceName = "auto-scaler" - AutoScalerDeploymentName = "odigos-autoscaler" - AutoScalerAppLabelValue = "odigos-autoscaler" - AutoScalerContainerName = "manager" + AutoScalerDeploymentName = "odigos-autoscaler" + AutoScalerServiceAccountName = AutoScalerDeploymentName + AutoScalerAppLabelValue = AutoScalerDeploymentName + AutoScalerRoleName = AutoScalerDeploymentName + AutoScalerRoleBindingName = AutoScalerDeploymentName + AutoScalerClusterRoleName = AutoScalerDeploymentName + AutoScalerClusterRoleBindingName = AutoScalerDeploymentName + AutoScalerServiceName = "auto-scaler" + AutoScalerContainerName = "manager" ) func NewAutoscalerServiceAccount(ns string) *corev1.ServiceAccount { @@ -46,7 +50,7 @@ func NewAutoscalerRole(ns string) *rbacv1.Role { APIVersion: "rbac.authorization.k8s.io/v1", }, ObjectMeta: metav1.ObjectMeta{ - Name: "odigos-autoscaler", + Name: AutoScalerRoleName, Namespace: ns, }, Rules: []rbacv1.PolicyRule{ @@ -117,8 +121,8 @@ func NewAutoscalerRole(ns string) *rbacv1.Role { { Verbs: []string{ "get", - "patch", - "update", + "list", + "watch", }, APIGroups: []string{"apps"}, Resources: []string{"deployments/status"}, @@ -142,168 +146,62 @@ func NewAutoscalerRole(ns string) *rbacv1.Role { APIGroups: []string{""}, Resources: []string{"secrets"}, }, - }, - } -} - -func NewAutoscalerRoleBinding(ns string) *rbacv1.RoleBinding { - return &rbacv1.RoleBinding{ - TypeMeta: metav1.TypeMeta{ - Kind: "RoleBinding", - APIVersion: "rbac.authorization.k8s.io/v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "odigos-autoscaler", - Namespace: ns, - }, - Subjects: []rbacv1.Subject{ - { - Kind: "ServiceAccount", - Name: "odigos-autoscaler", - }, - }, - RoleRef: rbacv1.RoleRef{ - APIGroup: "rbac.authorization.k8s.io", - Kind: "Role", - Name: "odigos-autoscaler", - }, - } -} - -func NewAutoscalerClusterRole() *rbacv1.ClusterRole { - return &rbacv1.ClusterRole{ - TypeMeta: metav1.TypeMeta{ - Kind: "ClusterRole", - APIVersion: "rbac.authorization.k8s.io/v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "odigos-autoscaler", - }, - Rules: []rbacv1.PolicyRule{ - { - Verbs: []string{ - "get", - "list", - "watch", - }, - APIGroups: []string{""}, - Resources: []string{"configmaps"}, - }, - { - Verbs: []string{ - "get", - "list", - "watch", - }, - APIGroups: []string{""}, - Resources: []string{"services"}, - }, { Verbs: []string{ "get", "list", "watch", }, - APIGroups: []string{"apps"}, - Resources: []string{"daemonsets"}, - }, - { - Verbs: []string{ - "get", - "list", - "watch", - }, - APIGroups: []string{"apps"}, - Resources: []string{"deployments"}, + APIGroups: []string{"odigos.io"}, + Resources: []string{"destinations"}, }, { Verbs: []string{ - "create", - "delete", "get", - "list", "patch", "update", - "watch", - }, - APIGroups: []string{"odigos.io"}, - Resources: []string{"instrumentedapplications"}, - }, - { - Verbs: []string{ - "update", }, APIGroups: []string{"odigos.io"}, - Resources: []string{"instrumentedapplications/finalizers"}, + Resources: []string{"destinations/status"}, }, { Verbs: []string{ - "get", - "patch", - "update", - }, - APIGroups: []string{"odigos.io"}, - Resources: []string{"instrumentedapplications/status"}, - }, { - Verbs: []string{ - "create", - "delete", "get", "list", - "patch", - "update", "watch", - }, - APIGroups: []string{"odigos.io"}, - Resources: []string{"collectorsgroups"}, - }, - { - Verbs: []string{ + "patch", + "create", "update", }, APIGroups: []string{"odigos.io"}, - Resources: []string{"collectorsgroups/finalizers"}, + Resources: []string{"processors"}, }, { Verbs: []string{ + "watch", "get", - "patch", - "update", + "list", }, - APIGroups: []string{"odigos.io"}, - Resources: []string{"collectorsgroups/status"}, + APIGroups: []string{"actions.odigos.io"}, + Resources: []string{"addclusterinfos", "deleteattributes", "renameattributes", "probabilisticsamplers", "piimaskings", "latencysamplers", "errorsamplers"}, }, { Verbs: []string{ - "create", - "delete", "get", - "list", "patch", "update", - "watch", }, - APIGroups: []string{"odigos.io"}, - Resources: []string{"destinations"}, + APIGroups: []string{"actions.odigos.io"}, + Resources: []string{"addclusterinfos/status", "deleteattributes/status", "renameattributes/status", "probabilisticsamplers/status", "piimaskings/status", "latencysamplers/status", "errorsamplers/status"}, }, { Verbs: []string{ "get", "list", "watch", - "patch", - "create", - "update", - }, - APIGroups: []string{"odigos.io"}, - Resources: []string{"processors"}, - }, - { - Verbs: []string{ - "update", }, APIGroups: []string{"odigos.io"}, - Resources: []string{"destinations/finalizers"}, + Resources: []string{"collectorsgroups"}, }, { Verbs: []string{ @@ -312,26 +210,46 @@ func NewAutoscalerClusterRole() *rbacv1.ClusterRole { "update", }, APIGroups: []string{"odigos.io"}, - Resources: []string{"destinations/status"}, - }, - { - Verbs: []string{ - "watch", - "get", - "list", - }, - APIGroups: []string{"actions.odigos.io"}, - Resources: []string{"addclusterinfos", "deleteattributes", "renameattributes", "probabilisticsamplers", "piimaskings", "latencysamplers", "errorsamplers"}, + Resources: []string{"collectorsgroups/status"}, }, + }, + } +} + +func NewAutoscalerRoleBinding(ns string) *rbacv1.RoleBinding { + return &rbacv1.RoleBinding{ + TypeMeta: metav1.TypeMeta{ + Kind: "RoleBinding", + APIVersion: "rbac.authorization.k8s.io/v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: AutoScalerRoleBindingName, + Namespace: ns, + }, + Subjects: []rbacv1.Subject{ { - Verbs: []string{ - "get", - "patch", - "update", - }, - APIGroups: []string{"actions.odigos.io"}, - Resources: []string{"addclusterinfos/status", "deleteattributes/status", "renameattributes/status", "probabilisticsamplers/status", "piimaskings/status", "latencysamplers/status", "errorsamplers/status"}, + Kind: "ServiceAccount", + Name: AutoScalerServiceAccountName, }, + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "Role", + Name: AutoScalerRoleName, + }, + } +} + +func NewAutoscalerClusterRole() *rbacv1.ClusterRole { + return &rbacv1.ClusterRole{ + TypeMeta: metav1.TypeMeta{ + Kind: "ClusterRole", + APIVersion: "rbac.authorization.k8s.io/v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: AutoScalerClusterRoleName, + }, + Rules: []rbacv1.PolicyRule{ { Verbs: []string{ "get", @@ -339,7 +257,7 @@ func NewAutoscalerClusterRole() *rbacv1.ClusterRole { "watch", }, APIGroups: []string{"odigos.io"}, - Resources: []string{"odigosconfigurations"}, + Resources: []string{"instrumentationconfigs"}, }, }, } @@ -352,19 +270,19 @@ func NewAutoscalerClusterRoleBinding(ns string) *rbacv1.ClusterRoleBinding { APIVersion: "rbac.authorization.k8s.io/v1", }, ObjectMeta: metav1.ObjectMeta{ - Name: "odigos-autoscaler", + Name: AutoScalerClusterRoleBindingName, }, Subjects: []rbacv1.Subject{ { Kind: "ServiceAccount", - Name: "odigos-autoscaler", + Name: AutoScalerServiceAccountName, Namespace: ns, }, }, RoleRef: rbacv1.RoleRef{ APIGroup: "rbac.authorization.k8s.io", Kind: "ClusterRole", - Name: "odigos-autoscaler", + Name: AutoScalerClusterRoleName, }, } } @@ -382,7 +300,7 @@ func NewAutoscalerLeaderElectionRoleBinding(ns string) *rbacv1.RoleBinding { Subjects: []rbacv1.Subject{ { Kind: "ServiceAccount", - Name: "odigos-autoscaler", + Name: AutoScalerServiceAccountName, }, }, RoleRef: rbacv1.RoleRef{ @@ -506,7 +424,7 @@ func NewAutoscalerDeployment(ns string, version string, imagePrefix string, imag }, }, TerminationGracePeriodSeconds: ptrint64(10), - ServiceAccountName: "odigos-autoscaler", + ServiceAccountName: AutoScalerServiceAccountName, SecurityContext: &corev1.PodSecurityContext{ RunAsNonRoot: ptrbool(true), }, diff --git a/common/config/clickhouse.go b/common/config/clickhouse.go index 3ed3ad5f9..32d591c28 100644 --- a/common/config/clickhouse.go +++ b/common/config/clickhouse.go @@ -55,9 +55,8 @@ func (c *Clickhouse) ModifyConfig(dest ExporterConfigurer, currentConfig *Config exporterConfig["password"] = clickhousePassword } - createSchema, exists := dest.GetConfig()[clickhouseCreateSchema] - createSchemaBoolValue := exists && strings.ToLower(createSchema) == "create" - exporterConfig["create_schema"] = createSchemaBoolValue + createSchema := dest.GetConfig()[clickhouseCreateSchema] + exporterConfig["create_schema"] = getBooleanConfig(createSchema, "create") dbName, exists := dest.GetConfig()[clickhouseDatabaseName] if !exists { diff --git a/common/config/qryn.go b/common/config/qryn.go index 8386110ec..4de118726 100644 --- a/common/config/qryn.go +++ b/common/config/qryn.go @@ -45,7 +45,7 @@ func (g *Qryn) ModifyConfig(dest ExporterConfigurer, currentConfig *Config) erro if conf.passwordFieldName != "" { passwordPlaceholder = "${" + conf.passwordFieldName + "}" } - baseURL, err := parseURL(dest.GetConfig()[qrynHost], conf.key, passwordPlaceholder) + baseURL, err := parseURL(conf.host, conf.key, passwordPlaceholder) if err != nil { return errors.Join(err, errors.New("invalid qryn endpoint. gateway will not be configured with qryn")) } @@ -55,7 +55,7 @@ func (g *Qryn) ModifyConfig(dest ExporterConfigurer, currentConfig *Config) erro currentConfig.Exporters[rwExporterName] = GenericMap{ "endpoint": fmt.Sprintf("%s/api/v1/prom/remote/write", baseURL), "resource_to_telemetry_conversion": GenericMap{ - "enabled": dest.GetConfig()[resourceToTelemetryConversion] == "Yes", + "enabled": conf.resourceToTelemetryConversion, }, } metricsPipelineName := "metrics/qryn-" + dest.GetID() @@ -126,8 +126,8 @@ func (g *Qryn) getConfigs(dest ExporterConfigurer) qrynConf { return qrynConf{ host: dest.GetConfig()[qrynHost], key: dest.GetConfig()[qrynAPIKey], - addExporterName: dest.GetConfig()[qrynAddExporterName] == "Yes", - resourceToTelemetryConversion: dest.GetConfig()[resourceToTelemetryConversion] == "Yes", + addExporterName: getBooleanConfig(dest.GetConfig()[qrynAddExporterName], "Yes"), + resourceToTelemetryConversion: getBooleanConfig(dest.GetConfig()[resourceToTelemetryConversion], "Yes"), secretsOptional: dest.GetConfig()[qrynSecretsOptional] == "1", passwordFieldName: dest.GetConfig()[qrynPasswordFieldName], } diff --git a/common/config/qryn_oss.go b/common/config/qryn_oss.go index b09b306d5..2eada2dca 100644 --- a/common/config/qryn_oss.go +++ b/common/config/qryn_oss.go @@ -23,9 +23,19 @@ func (d QrynOssDest) GetConfig() map[string]string { conf := d.ExporterConfigurer.GetConfig() conf[qrynHost] = conf[qrynOssHost] conf[qrynAPIKey] = conf[qrynOssUsername] - conf[resourceToTelemetryConversion] = conf[qrynOssresourceToTelemetryConversion] + // Yes/No are deperecated, use true/false + if conf[qrynOssresourceToTelemetryConversion] == "true" || conf[qrynOssresourceToTelemetryConversion] == "Yes" { + conf[resourceToTelemetryConversion] = "true" + } else { + conf[resourceToTelemetryConversion] = "false" + } + // Yes/No are deperecated, use true/false + if conf[qrynOssAddExporterName] == "true" || conf[qrynOssAddExporterName] == "Yes" { + conf[qrynAddExporterName] = "true" + } else { + conf[qrynAddExporterName] = "false" + } conf[qrynSecretsOptional] = "1" - conf[qrynAddExporterName] = conf[qrynOssAddExporterName] conf[qrynPasswordFieldName] = "QRYN_OSS_PASSWORD" return conf } diff --git a/common/config/utils.go b/common/config/utils.go index c0d8aaac9..f9173bb46 100644 --- a/common/config/utils.go +++ b/common/config/utils.go @@ -92,3 +92,8 @@ func urlHostContainsPort(host string) bool { return strings.Contains(host, ":") } } + +func getBooleanConfig(currentValue string, deprecatedValue string) bool { + lowerCaseValue := strings.ToLower(currentValue) + return lowerCaseValue == "true" || lowerCaseValue == deprecatedValue +} diff --git a/destinations/data/clickhouse.yaml b/destinations/data/clickhouse.yaml index 26f9f565a..97158e01d 100644 --- a/destinations/data/clickhouse.yaml +++ b/destinations/data/clickhouse.yaml @@ -20,7 +20,7 @@ spec: componentProps: type: text required: true - placeholder: "http://host:port" + placeholder: 'http://host:port' tooltip: 'Clickhouse server address' - name: CLICKHOUSE_USERNAME displayName: Username @@ -39,14 +39,11 @@ spec: tooltip: 'If Clickhouse Authentication is used, provide the password' - name: CLICKHOUSE_CREATE_SCHEME displayName: Create Scheme - componentType: dropdown + componentType: checkbox componentProps: - values: - - Create - - Skip required: true tooltip: 'Should the destination create the schema for you?' - initialValue: Create + initialValue: true - name: CLICKHOUSE_DATABASE_NAME displayName: Database Name componentType: input diff --git a/destinations/data/gigapipe.yaml b/destinations/data/gigapipe.yaml index 7b71600ba..7c52e3d0f 100644 --- a/destinations/data/gigapipe.yaml +++ b/destinations/data/gigapipe.yaml @@ -35,19 +35,13 @@ spec: required: true - name: QRYN_RESOURCE_TO_TELEMETRY_CONVERSION displayName: Convert container attributes to labels - componentType: dropdown + componentType: checkbox componentProps: - values: - - "Yes" - - "No" required: false initialValue: Yes - name: QRYN_ADD_EXPORTER_NAME displayName: Add exporter name to labels - componentType: dropdown + componentType: checkbox componentProps: - values: - - "Yes" - - "No" required: false initialValue: Yes diff --git a/destinations/data/qryn.yaml b/destinations/data/qryn.yaml index 0397d4d61..93ac8135c 100644 --- a/destinations/data/qryn.yaml +++ b/destinations/data/qryn.yaml @@ -33,19 +33,13 @@ spec: type: text - name: QRYN_OSS_RESOURCE_TO_TELEMETRY_CONVERSION displayName: Convert container attributes to labels - componentType: dropdown + componentType: checkbox componentProps: - values: - - "Yes" - - "No" required: false initialValue: Yes - name: QRYN_OSS_ADD_EXPORTER_NAME displayName: Add exporter name to labels - componentType: dropdown + componentType: checkbox componentProps: - values: - - "Yes" - - "No" required: false initialValue: Yes diff --git a/docs/backends/clickhouse.mdx b/docs/backends/clickhouse.mdx index fb6e91e8c..cf89a980d 100644 --- a/docs/backends/clickhouse.mdx +++ b/docs/backends/clickhouse.mdx @@ -35,8 +35,8 @@ The benefit of this option is that you can see the value fast, without needing t The downside is that the schema may not be optimized for your specific use case, and may make changes more complicated. To use it: -- Odigos UI - When adding a new ClickHouse destination, select the `Create` Option under the `Create Scheme` field. -- Destination K8s Manifest - Set the `CLICKHOUSE_CREATE_SCHEME` setting to value `Create`. +- Odigos UI - When adding a new ClickHouse destination, select the `Create Scheme` checkbox field. +- Destination K8s Manifest - Set the `CLICKHOUSE_CREATE_SCHEME` setting to value `true`. The schema which will be used by default can be found [here](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter/clickhouseexporter/example/default_ddl). @@ -54,8 +54,8 @@ This option is not recommended for production workloads: With this option, you are responsible for creating and managing the schema yourself. To use it: -- Odigos UI - In `Create Scheme` field, select the the `Skip` Option. -- Destination K8s Manifest - Set the `CLICKHOUSE_CREATE_SCHEME` setting to value `Skip`. +- Odigos UI - Unselect the `Create Scheme` checkbox field. +- Destination K8s Manifest - Set the `CLICKHOUSE_CREATE_SCHEME` setting to value `false`. The benefit of this option is that you have full control over the schema, and can optimize it for your specific use case. @@ -103,7 +103,7 @@ These are optional, keep empty if your ClickHouse server does not require authen ### Schema -- Create Schema - Set to `Skip` if you manage your own schema, or `Create` to have Odigos create the schema for you. See [Create Schema](#create-schema) for more details. +- Create Schema - Set to `false` if you manage your own schema, or `true` to have Odigos create the schema for you. See [Create Schema](#create-schema) for more details. - Database Name (Required) - The name of the Clickhouse Database where the telemetry data will be stored. The default is `otel`. The Database will not be created when not exists, so make sure you have created it before. - Table Names - Allows you to customize the names of the tables where the telemetry data will be stored. The default is `otel_traces` for traces and `otel_metrics` for metrics. @@ -140,7 +140,7 @@ metadata: namespace: odigos-system spec: data: - CLICKHOUSE_CREATE_SCHEME: + CLICKHOUSE_CREATE_SCHEME: # CLICKHOUSE_USERNAME: # Note: The commented fields above are optional. CLICKHOUSE_DATABASE_NAME: diff --git a/docs/backends/gigapipe.mdx b/docs/backends/gigapipe.mdx index fb9f9d203..ec9215049 100644 --- a/docs/backends/gigapipe.mdx +++ b/docs/backends/gigapipe.mdx @@ -56,8 +56,8 @@ metadata: spec: data: QRYN_API_KEY: - # QRYN_RESOURCE_TO_TELEMETRY_CONVERSION: - # QRYN_ADD_EXPORTER_NAME: + # QRYN_RESOURCE_TO_TELEMETRY_CONVERSION: + # QRYN_ADD_EXPORTER_NAME: # Note: The commented fields above are optional. QRYN_URL: destinationName: qryn diff --git a/docs/backends/qryn.mdx b/docs/backends/qryn.mdx index 1a7a31080..6e80ce270 100644 --- a/docs/backends/qryn.mdx +++ b/docs/backends/qryn.mdx @@ -49,8 +49,8 @@ spec: data: QRYN_OSS_URL: # QRYN_OSS_USERNAME: - # QRYN_OSS_RESOURCE_TO_TELEMETRY_CONVERSION: - # QRYN_OSS_ADD_EXPORTER_NAME: + # QRYN_OSS_RESOURCE_TO_TELEMETRY_CONVERSION: + # QRYN_OSS_ADD_EXPORTER_NAME: # Note: The commented fields above are optional. destinationName: qryn-oss # Uncomment the secretRef below if you are using the optional Secret. diff --git a/frontend/webapp/containers/main/actions/action-drawer/index.tsx b/frontend/webapp/containers/main/actions/action-drawer/index.tsx index 22edc6c51..b24cb2803 100644 --- a/frontend/webapp/containers/main/actions/action-drawer/index.tsx +++ b/frontend/webapp/containers/main/actions/action-drawer/index.tsx @@ -64,8 +64,6 @@ export const ActionDrawer: React.FC = () => { ACTION_OPTIONS.find(({ id }) => id === 'attributes')?.items?.find(({ type }) => type === item.type) || ACTION_OPTIONS.find(({ id }) => id === 'sampler')?.items?.find(({ type }) => type === item.type); - if (!found) return undefined; - loadFormWithDrawerItem(selectedItem); return found; diff --git a/frontend/webapp/containers/main/destinations/destination-drawer/build-card.ts b/frontend/webapp/containers/main/destinations/destination-drawer/build-card.ts index 1ad9d095b..c21c3b28b 100644 --- a/frontend/webapp/containers/main/destinations/destination-drawer/build-card.ts +++ b/frontend/webapp/containers/main/destinations/destination-drawer/build-card.ts @@ -7,7 +7,7 @@ const buildMonitorsList = (exportedSignals: ExportedSignals): string => .filter((key) => exportedSignals[key]) .join(', '); -const buildCard = (destination: ActualDestination, destinationTypeDetails: DestinationDetailsResponse['destinationTypeDetails']) => { +const buildCard = (destination: ActualDestination, destinationTypeDetails?: DestinationDetailsResponse['destinationTypeDetails']) => { const { exportedSignals, destinationType, fields } = destination; const arr: DataCardRow[] = [ diff --git a/frontend/webapp/containers/main/destinations/destination-drawer/index.tsx b/frontend/webapp/containers/main/destinations/destination-drawer/index.tsx index f2276978b..24f8385d1 100644 --- a/frontend/webapp/containers/main/destinations/destination-drawer/index.tsx +++ b/frontend/webapp/containers/main/destinations/destination-drawer/index.tsx @@ -56,7 +56,7 @@ export const DestinationDrawer: React.FC = () => { const [isFormDirty, setIsFormDirty] = useState(false); const cardData = useMemo(() => { - if (!selectedItem || !destinationTypeDetails) return []; + if (!selectedItem) return []; const { item } = selectedItem as { item: ActualDestination }; const arr = buildCard(item, destinationTypeDetails); @@ -64,16 +64,14 @@ export const DestinationDrawer: React.FC = () => { return arr; }, [selectedItem, destinationTypeDetails]); - const thisDestination = useMemo(() => { + const thisDestinationType = useMemo(() => { if (!destinationTypes.length || !selectedItem || !isEditing) { resetFormData(); return undefined; } const { item } = selectedItem as { item: ActualDestination }; - const found = destinationTypes.map(({ items }) => items.filter(({ type }) => type === item.destinationType.type)).filter((arr) => !!arr.length)[0][0]; - - if (!found) return undefined; + const found = destinationTypes.map(({ items }) => items.filter(({ type }) => type === item.destinationType.type)).filter((arr) => !!arr.length)?.[0]?.[0]; loadFormWithDrawerItem(selectedItem); @@ -119,7 +117,7 @@ export const DestinationDrawer: React.FC = () => { = () => { const { item } = selectedItem as { item: InstrumentationRuleSpecMapped }; const found = RULE_OPTIONS.find(({ type }) => type === item.type); - if (!found) return undefined; - loadFormWithDrawerItem(selectedItem); return found; @@ -73,7 +71,7 @@ export const RuleDrawer: React.FC = () => { const { id, item } = selectedItem as { id: string; item: InstrumentationRuleSpecMapped }; const handleEdit = (bool?: boolean) => { - if (item.type === InstrumentationRuleType.UNKNOWN_TYPE) { + if (item.type === InstrumentationRuleType.UNKNOWN_TYPE && (bool || bool === undefined)) { addNotification({ type: NOTIFICATION_TYPE.WARNING, title: FORM_ALERTS.FORBIDDEN, message: FORM_ALERTS.CANNOT_EDIT_RULE, crdType: OVERVIEW_ENTITY_TYPES.RULE, target: id }); } else { setIsEditing(typeof bool === 'boolean' ? bool : true); diff --git a/frontend/webapp/containers/main/overview/multi-source-control/index.tsx b/frontend/webapp/containers/main/overview/multi-source-control/index.tsx index ed4861550..4181dda72 100644 --- a/frontend/webapp/containers/main/overview/multi-source-control/index.tsx +++ b/frontend/webapp/containers/main/overview/multi-source-control/index.tsx @@ -32,7 +32,7 @@ export const MultiSourceControl = () => { }); const { sources, deleteSources } = useSourceCRUD(); - const { configuredSources, setConfiguredSources } = useAppStore((state) => state); + const { configuredSources, setConfiguredSources } = useAppStore(); const [isWarnModalOpen, setIsWarnModalOpen] = useState(false); const totalSelected = useMemo(() => { diff --git a/frontend/webapp/hooks/compute-platform/useComputePlatform.ts b/frontend/webapp/hooks/compute-platform/useComputePlatform.ts index 0253168f6..da943fe0c 100644 --- a/frontend/webapp/hooks/compute-platform/useComputePlatform.ts +++ b/frontend/webapp/hooks/compute-platform/useComputePlatform.ts @@ -45,6 +45,27 @@ export const useComputePlatform = (): UseComputePlatformHook => { return { ...item, type }; }), + destinations: data.computePlatform.destinations.map((item) => { + // Replace deprecated string values, with boolean values + const fields = + item.destinationType.type === 'clickhouse' + ? item.fields.replace('"CLICKHOUSE_CREATE_SCHEME":"Create"', '"CLICKHOUSE_CREATE_SCHEME":"true"').replace('"CLICKHOUSE_CREATE_SCHEME":"Skip"', '"CLICKHOUSE_CREATE_SCHEME":"false"') + : item.destinationType.type === 'qryn' + ? item.fields + .replace('"QRYN_ADD_EXPORTER_NAME":"Yes"', '"QRYN_ADD_EXPORTER_NAME":"true"') + .replace('"QRYN_ADD_EXPORTER_NAME":"No"', '"QRYN_ADD_EXPORTER_NAME":"false"') + .replace('"QRYN_RESOURCE_TO_TELEMETRY_CONVERSION":"Yes"', '"QRYN_RESOURCE_TO_TELEMETRY_CONVERSION":"true"') + .replace('"QRYN_RESOURCE_TO_TELEMETRY_CONVERSION":"No"', '"QRYN_RESOURCE_TO_TELEMETRY_CONVERSION":"false"') + : item.destinationType.type === 'qryn-oss' + ? item.fields + .replace('"QRYN_OSS_ADD_EXPORTER_NAME":"Yes"', '"QRYN_OSS_ADD_EXPORTER_NAME":"true"') + .replace('"QRYN_OSS_ADD_EXPORTER_NAME":"No"', '"QRYN_OSS_ADD_EXPORTER_NAME":"false"') + .replace('"QRYN_OSS_RESOURCE_TO_TELEMETRY_CONVERSION":"Yes"', '"QRYN_OSS_RESOURCE_TO_TELEMETRY_CONVERSION":"true"') + .replace('"QRYN_OSS_RESOURCE_TO_TELEMETRY_CONVERSION":"No"', '"QRYN_OSS_RESOURCE_TO_TELEMETRY_CONVERSION":"false"') + : item.fields; + + return { ...item, fields }; + }), }, }; }, [data]); diff --git a/frontend/webapp/hooks/destinations/useDestinationCRUD.ts b/frontend/webapp/hooks/destinations/useDestinationCRUD.ts index cc52de26d..4870b6617 100644 --- a/frontend/webapp/hooks/destinations/useDestinationCRUD.ts +++ b/frontend/webapp/hooks/destinations/useDestinationCRUD.ts @@ -75,8 +75,14 @@ export const useDestinationCRUD = (params?: Params) => { loading: cState.loading || uState.loading || dState.loading, destinations: data?.computePlatform.destinations || [], - createDestination: (destination: DestinationInput) => createDestination({ variables: { destination: { ...destination, fields: destination.fields.filter(({ value }) => value !== undefined) } } }), - updateDestination: (id: string, destination: DestinationInput) => updateDestination({ variables: { id, destination } }), - deleteDestination: (id: string) => deleteDestination({ variables: { id } }), + createDestination: (destination: DestinationInput) => { + createDestination({ variables: { destination: { ...destination, fields: destination.fields.filter(({ value }) => value !== undefined) } } }); + }, + updateDestination: (id: string, destination: DestinationInput) => { + updateDestination({ variables: { id, destination: { ...destination, fields: destination.fields.filter(({ value }) => value !== undefined) } } }); + }, + deleteDestination: (id: string) => { + deleteDestination({ variables: { id } }); + }, }; }; diff --git a/frontend/webapp/hooks/sources/useSourceCRUD.ts b/frontend/webapp/hooks/sources/useSourceCRUD.ts index 82f30bdf7..9a2bdbdfc 100644 --- a/frontend/webapp/hooks/sources/useSourceCRUD.ts +++ b/frontend/webapp/hooks/sources/useSourceCRUD.ts @@ -1,6 +1,7 @@ +import { useCallback } from 'react'; import { useMutation } from '@apollo/client'; -import { useNotificationStore } from '@/store'; import { ACTION, getSseTargetFromId } from '@/utils'; +import { useAppStore, useNotificationStore } from '@/store'; import { PERSIST_SOURCE, UPDATE_K8S_ACTUAL_SOURCE } from '@/graphql'; import { useComputePlatform, useNamespace } from '../compute-platform'; import { OVERVIEW_ENTITY_TYPES, type WorkloadId, type PatchSourceRequestInput, type K8sActualSource, NOTIFICATION_TYPE } from '@/types'; @@ -12,10 +13,24 @@ interface Params { export const useSourceCRUD = (params?: Params) => { const removeNotifications = useNotificationStore((store) => store.removeNotifications); + const { configuredSources, setConfiguredSources } = useAppStore(); + const { persistNamespace } = useNamespace(); const { data, refetch } = useComputePlatform(); const { addNotification } = useNotificationStore(); + const startPolling = useCallback(async () => { + let retries = 0; + const maxRetries = 5; + const retryInterval = 1 * 1000; // time in milliseconds + + while (retries < maxRetries) { + await new Promise((resolve) => setTimeout(resolve, retryInterval)); + refetch(); + retries++; + } + }, [refetch]); + const notifyUser = (type: NOTIFICATION_TYPE, title: string, message: string, id?: WorkloadId) => { addNotification({ type, @@ -33,7 +48,7 @@ export const useSourceCRUD = (params?: Params) => { const handleComplete = (title: string, message: string, id?: WorkloadId) => { notifyUser(NOTIFICATION_TYPE.SUCCESS, title, message, id); - refetch(); + startPolling(); params?.onSuccess?.(title); }; @@ -57,6 +72,7 @@ export const useSourceCRUD = (params?: Params) => { } else { const id = { kind, name, namespace }; if (!selected) removeNotifications(getSseTargetFromId(id, OVERVIEW_ENTITY_TYPES.SOURCE)); + if (!selected) setConfiguredSources({ ...configuredSources, [namespace]: configuredSources[namespace].filter((source) => source.name !== name) }); handleComplete(action, `source "${name}" was ${action.toLowerCase()}d ${fromOrIn} "${namespace}"`, selected ? id : undefined); } }, diff --git a/frontend/webapp/reuseable-components/tab-list/index.tsx b/frontend/webapp/reuseable-components/tab-list/index.tsx index 9ef07d3db..ddb196370 100644 --- a/frontend/webapp/reuseable-components/tab-list/index.tsx +++ b/frontend/webapp/reuseable-components/tab-list/index.tsx @@ -12,7 +12,7 @@ interface TabProps { icon: SVG; selected: boolean; disabled?: boolean; - onClick: () => void; + onClick?: () => void; } // Define types for the TabList component props @@ -21,18 +21,18 @@ interface TabListProps { } // Styled-components for Tab and TabList -const TabContainer = styled.div<{ $selected: TabProps['selected']; $disabled: TabProps['disabled'] }>` +const TabContainer = styled.div<{ $selected: TabProps['selected']; $disabled: TabProps['disabled']; $noClick: boolean }>` display: flex; align-items: center; padding: 10px 12px; border-radius: 32px; - cursor: ${({ $disabled }) => ($disabled ? 'not-allowed' : 'pointer')}; - background-color: ${({ $selected, theme }) => ($selected ? theme.colors.majestic_blue + hexPercentValues['024'] : theme.colors.card)}; + cursor: ${({ $noClick, $disabled }) => ($noClick ? 'unset' : $disabled ? 'not-allowed' : 'pointer')}; + background-color: ${({ $noClick, $selected, theme }) => ($noClick ? 'transparent' : $selected ? theme.colors.majestic_blue + hexPercentValues['024'] : theme.colors.card)}; opacity: ${({ $disabled }) => ($disabled ? 0.5 : 1)}; transition: background-color 0.3s, color 0.3s; &:hover { - background-color: ${({ $disabled, theme }) => ($disabled ? 'none' : theme.colors.majestic_blue + hexPercentValues['024'])}; + background-color: ${({ $noClick, $disabled, theme }) => ($noClick || $disabled ? 'none' : theme.colors.majestic_blue + hexPercentValues['024'])}; } svg { @@ -45,11 +45,10 @@ const TabListContainer = styled.div` gap: 8px; `; -// Tab component const Tab: React.FC = ({ title, tooltip, icon: Icon, selected, disabled, onClick }) => { return ( - + {title} @@ -62,7 +61,6 @@ const TABS = [ title: 'Overview', icon: OverviewIcon, selected: true, - onClick: () => {}, }, // { // title: 'Service Map', @@ -82,7 +80,7 @@ const TABS = [ // }, ]; -const TabList: React.FC = ({ tabs = TABS }) => { +export const TabList: React.FC = ({ tabs = TABS }) => { return ( {tabs.map((tab) => ( @@ -91,5 +89,3 @@ const TabList: React.FC = ({ tabs = TABS }) => { ); }; - -export { TabList }; diff --git a/helm/odigos/templates/autoscaler/clusterrole.yaml b/helm/odigos/templates/autoscaler/clusterrole.yaml index c1f33edb9..014f2c6b1 100644 --- a/helm/odigos/templates/autoscaler/clusterrole.yaml +++ b/helm/odigos/templates/autoscaler/clusterrole.yaml @@ -3,120 +3,11 @@ kind: ClusterRole metadata: name: odigos-autoscaler rules: - - apiGroups: - - "" - resources: - - services - verbs: - - get - - list - - watch - - apiGroups: - - "" - resources: - - configmaps - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - - apiGroups: - - apps - resources: - - daemonsets - - deployments - verbs: - - get - - list - - watch - - apiGroups: - - odigos.io - resources: - - instrumentedapplications - - collectorsgroups - - odigosconfigurations - - destinations - - processors - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - - apiGroups: - - odigos.io - resources: - - collectorsgroups/finalizers - - instrumentedapplications/finalizers - - destinations/finalizers - verbs: - - update - - apiGroups: - - odigos.io - resources: - - collectorsgroups/status - - instrumentedapplications/status - - destinations/status - verbs: - - get - - patch - - update - - apiGroups: - - actions.odigos.io - resources: - - addclusterinfos - - deleteattributes - - renameattributes - - probabilisticsamplers - - latencysamplers - - errorsamplers - - piimaskings - verbs: - - watch - - get - - list - - apiGroups: - - actions.odigos.io - resources: - - addclusterinfos/status - - deleteattributes/status - - renameattributes/status - - probabilisticsamplers/status - - latencysamplers/status - - errorsamplers/status - - piimaskings/status - verbs: - - get - - patch - - update - - apiGroups: - - "" - resources: - - configmaps - - services - verbs: - - get - - list - - watch - - apiGroups: - - apps - resources: - - daemonsets - - deployments - verbs: - - get - - list - - watch - apiGroups: - odigos.io resources: - - odigosconfigurations + - instrumentationconfigs verbs: - get - list - - watch + - watch \ No newline at end of file diff --git a/helm/odigos/templates/autoscaler/role.yaml b/helm/odigos/templates/autoscaler/role.yaml index c8f864849..623646420 100644 --- a/helm/odigos/templates/autoscaler/role.yaml +++ b/helm/odigos/templates/autoscaler/role.yaml @@ -8,6 +8,17 @@ rules: - "" resources: - configmaps + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - "" + resources: - services verbs: - create @@ -22,7 +33,6 @@ rules: - apps resources: - daemonsets - - deployments verbs: - create - delete @@ -36,6 +46,26 @@ rules: - apps resources: - daemonsets/status + verbs: + - get + - patch + - update + - apiGroups: + - apps + resources: + - deployments + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + - apiGroups: + - apps + resources: - deployments/status verbs: - get @@ -54,7 +84,78 @@ rules: - "" resources: - secrets + verbs: + - list + - watch + - get + - apiGroups: + - odigos.io + resources: + - destinations verbs: - get - list - watch + - apiGroups: + - odigos.io + resources: + - destinations/status + verbs: + - get + - patch + - update + - apiGroups: + - odigos.io + resources: + - processors + verbs: + - get + - list + - watch + - patch + - create + - update + - apiGroups: + - actions.odigos.io + resources: + - addclusterinfos + - deleteattributes + - renameattributes + - probabilisticsamplers + - piimaskings + - latencysamplers + - errorsamplers + verbs: + - watch + - get + - list + - apiGroups: + - actions.odigos.io + resources: + - addclusterinfos/status + - deleteattributes/status + - renameattributes/status + - probabilisticsamplers/status + - piimaskings/status + - latencysamplers/status + - errorsamplers/status + verbs: + - get + - patch + - update + - apiGroups: + - odigos.io + resources: + - collectorsgroups + verbs: + - get + - list + - watch + - apiGroups: + - odigos.io + resources: + - collectorsgroups/status + verbs: + - get + - patch + - update diff --git a/helm/odigos/templates/scheduler/role.yaml b/helm/odigos/templates/scheduler/role.yaml index ca14e3135..8199720ad 100644 --- a/helm/odigos/templates/scheduler/role.yaml +++ b/helm/odigos/templates/scheduler/role.yaml @@ -2,6 +2,7 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: odigos-scheduler + namespace: {{ .Release.Namespace }} rules: - apiGroups: - "" diff --git a/instrumentation/manager.go b/instrumentation/manager.go index 6da5792ee..f1444ea20 100644 --- a/instrumentation/manager.go +++ b/instrumentation/manager.go @@ -23,16 +23,16 @@ var ( // The manager will apply the configuration to all instrumentations that match the config group. type ConfigUpdate[configGroup ConfigGroup] map[configGroup]Config -type instrumentationDetails[processGroup ProcessGroup, configGroup ConfigGroup] struct { +type instrumentationDetails[processDetails ProcessDetails, configGroup ConfigGroup] struct { // we want to track the instrumentation even if it failed to load, to be able to report the error // and clean up the reporter resources once the process exits. // hence, this might be nil if the instrumentation failed to load. inst Instrumentation - pg processGroup + pd processDetails cg configGroup } -type ManagerOptions[processGroup ProcessGroup, configGroup ConfigGroup] struct { +type ManagerOptions[processDetails ProcessDetails, configGroup ConfigGroup] struct { Logger logr.Logger // Factories is a map of OTel distributions to their corresponding instrumentation factories. @@ -46,7 +46,7 @@ type ManagerOptions[processGroup ProcessGroup, configGroup ConfigGroup] struct { // based on the process event. // // The handler is also used to report the instrumentation lifecycle events. - Handler *Handler[processGroup, configGroup] + Handler *Handler[processDetails, configGroup] // DetectorOptions is a list of options to configure the process detector. // @@ -69,27 +69,27 @@ type Manager interface { Run(ctx context.Context) error } -type manager[processGroup ProcessGroup, configGroup ConfigGroup] struct { +type manager[processDetails ProcessDetails, configGroup ConfigGroup] struct { // channel for receiving process events, // used to detect new processes and process exits, and handle their instrumentation accordingly. procEvents <-chan detector.ProcessEvent detector detector.Detector - handler *Handler[processGroup, configGroup] + handler *Handler[processDetails, configGroup] factories map[OtelDistribution]Factory logger logr.Logger // all the created instrumentations by pid, // this map is not concurrent safe, so it should be accessed only from the main event loop - detailsByPid map[int]*instrumentationDetails[processGroup, configGroup] + detailsByPid map[int]*instrumentationDetails[processDetails, configGroup] // instrumentations by workload, and aggregated by pid // this map is not concurrent safe, so it should be accessed only from the main event loop - detailsByWorkload map[configGroup]map[int]*instrumentationDetails[processGroup, configGroup] + detailsByWorkload map[configGroup]map[int]*instrumentationDetails[processDetails, configGroup] configUpdates <-chan ConfigUpdate[configGroup] } -func NewManager[processGroup ProcessGroup, configGroup ConfigGroup](options ManagerOptions[processGroup, configGroup]) (Manager, error) { +func NewManager[processDetails ProcessDetails, configGroup ConfigGroup](options ManagerOptions[processDetails, configGroup]) (Manager, error) { handler := options.Handler if handler == nil { return nil, errors.New("handler is required for ebpf instrumentation manager") @@ -99,7 +99,7 @@ func NewManager[processGroup ProcessGroup, configGroup ConfigGroup](options Mana return nil, errors.New("reporter is required for ebpf instrumentation manager") } - if handler.ProcessGroupResolver == nil { + if handler.ProcessDetailsResolver == nil { return nil, errors.New("details resolver is required for ebpf instrumentation manager") } @@ -126,19 +126,19 @@ func NewManager[processGroup ProcessGroup, configGroup ConfigGroup](options Mana return nil, fmt.Errorf("failed to create process detector: %w", err) } - return &manager[processGroup, configGroup]{ + return &manager[processDetails, configGroup]{ procEvents: procEvents, detector: detector, handler: handler, factories: options.Factories, logger: logger.WithName("ebpf-instrumentation-manager"), - detailsByPid: make(map[int]*instrumentationDetails[processGroup, configGroup]), - detailsByWorkload: map[configGroup]map[int]*instrumentationDetails[processGroup, configGroup]{}, + detailsByPid: make(map[int]*instrumentationDetails[processDetails, configGroup]), + detailsByWorkload: map[configGroup]map[int]*instrumentationDetails[processDetails, configGroup]{}, configUpdates: options.ConfigUpdates, }, nil } -func (m *manager[ProcessGroup, ConfigGroup]) runEventLoop(ctx context.Context) { +func (m *manager[ProcessDetails, ConfigGroup]) runEventLoop(ctx context.Context) { // main event loop for handling instrumentations for { select { @@ -182,7 +182,7 @@ func (m *manager[ProcessGroup, ConfigGroup]) runEventLoop(ctx context.Context) { } } -func (m *manager[ProcessGroup, ConfigGroup]) Run(ctx context.Context) error { +func (m *manager[ProcessDetails, ConfigGroup]) Run(ctx context.Context) error { g, errCtx := errgroup.WithContext(ctx) g.Go(func() error { @@ -198,14 +198,14 @@ func (m *manager[ProcessGroup, ConfigGroup]) Run(ctx context.Context) error { return err } -func (m *manager[ProcessGroup, ConfigGroup]) cleanInstrumentation(ctx context.Context, pid int) { +func (m *manager[ProcessDetails, ConfigGroup]) cleanInstrumentation(ctx context.Context, pid int) { details, found := m.detailsByPid[pid] if !found { m.logger.V(3).Info("no instrumentation found for exiting pid, nothing to clean", "pid", pid) return } - m.logger.Info("cleaning instrumentation resources", "pid", pid, "process group details", details.pg) + m.logger.Info("cleaning instrumentation resources", "pid", pid, "process group details", details.pd) if details.inst != nil { err := details.inst.Close(ctx) @@ -214,7 +214,7 @@ func (m *manager[ProcessGroup, ConfigGroup]) cleanInstrumentation(ctx context.Co } } - err := m.handler.Reporter.OnExit(ctx, pid, details.pg) + err := m.handler.Reporter.OnExit(ctx, pid, details.pd) if err != nil { m.logger.Error(err, "failed to report instrumentation exit") } @@ -222,7 +222,7 @@ func (m *manager[ProcessGroup, ConfigGroup]) cleanInstrumentation(ctx context.Co m.stopTrackInstrumentation(pid) } -func (m *manager[ProcessGroup, ConfigGroup]) handleProcessExecEvent(ctx context.Context, e detector.ProcessEvent) error { +func (m *manager[ProcessDetails, ConfigGroup]) handleProcessExecEvent(ctx context.Context, e detector.ProcessEvent) error { if details, found := m.detailsByPid[e.PID]; found && details.inst != nil { // this can happen if we have multiple exec events for the same pid (chain loading) // TODO: better handle this? @@ -232,17 +232,17 @@ func (m *manager[ProcessGroup, ConfigGroup]) handleProcessExecEvent(ctx context. return nil } - pg, err := m.handler.ProcessGroupResolver.Resolve(ctx, e) + pd, err := m.handler.ProcessDetailsResolver.Resolve(ctx, e) if err != nil { return errors.Join(err, errFailedToGetDetails) } - otelDisto, err := m.handler.DistributionMatcher.Distribution(ctx, pg) + otelDisto, err := m.handler.DistributionMatcher.Distribution(ctx, pd) if err != nil { return errors.Join(err, errFailedToGetDistribution) } - configGroup, err := m.handler.ConfigGroupResolver.Resolve(ctx, pg, otelDisto) + configGroup, err := m.handler.ConfigGroupResolver.Resolve(ctx, pd, otelDisto) if err != nil { return errors.Join(err, errFailedToGetConfigGroup) } @@ -253,7 +253,7 @@ func (m *manager[ProcessGroup, ConfigGroup]) handleProcessExecEvent(ctx context. } // Fetch initial settings for the instrumentation - settings, err := m.handler.SettingsGetter.Settings(ctx, pg, otelDisto) + settings, err := m.handler.SettingsGetter.Settings(ctx, pd, otelDisto) if err != nil { // for k8s instrumentation config CR will be queried to get the settings // we should always have config for this event. @@ -269,35 +269,35 @@ func (m *manager[ProcessGroup, ConfigGroup]) handleProcessExecEvent(ctx context. inst, err := factory.CreateInstrumentation(ctx, e.PID, settings) if err != nil { m.logger.Error(err, "failed to initialize instrumentation", "language", otelDisto.Language, "sdk", otelDisto.OtelSdk) - err = m.handler.Reporter.OnInit(ctx, e.PID, err, pg) + err = m.handler.Reporter.OnInit(ctx, e.PID, err, pd) // TODO: should we return here the initialize error? or the handler error? or both? return err } loadErr := inst.Load(ctx) - reporterErr := m.handler.Reporter.OnLoad(ctx, e.PID, loadErr, pg) + reporterErr := m.handler.Reporter.OnLoad(ctx, e.PID, loadErr, pd) if reporterErr != nil { - m.logger.Error(reporterErr, "failed to report instrumentation load", "loaded", loadErr == nil, "pid", e.PID, "process group details", pg) + m.logger.Error(reporterErr, "failed to report instrumentation load", "loaded", loadErr == nil, "pid", e.PID, "process group details", pd) } if loadErr != nil { // we need to track the instrumentation even if the load failed. // consider a reporter which writes a persistent record for a failed/successful load // we need to notify the reporter once that PID exits to clean up the resources - hence we track it. // saving the inst as nil marking the instrumentation failed to load, and is not valid to run/configure/close. - m.startTrackInstrumentation(e.PID, nil, pg, configGroup) + m.startTrackInstrumentation(e.PID, nil, pd, configGroup) m.logger.Error(err, "failed to load instrumentation", "language", otelDisto.Language, "sdk", otelDisto.OtelSdk) // TODO: should we return here the load error? or the instance write error? or both? return err } - m.startTrackInstrumentation(e.PID, inst, pg, configGroup) - m.logger.Info("instrumentation loaded", "pid", e.PID, "process group details", pg) + m.startTrackInstrumentation(e.PID, inst, pd, configGroup) + m.logger.Info("instrumentation loaded", "pid", e.PID, "process group details", pd) go func() { err := inst.Run(ctx) if err != nil && !errors.Is(err, context.Canceled) { - reporterErr := m.handler.Reporter.OnRun(ctx, e.PID, err, pg) + reporterErr := m.handler.Reporter.OnRun(ctx, e.PID, err, pd) if reporterErr != nil { m.logger.Error(reporterErr, "failed to report instrumentation run") } @@ -308,23 +308,23 @@ func (m *manager[ProcessGroup, ConfigGroup]) handleProcessExecEvent(ctx context. return nil } -func (m *manager[ProcessGroup, ConfigGroup]) startTrackInstrumentation(pid int, inst Instrumentation, processGroup ProcessGroup, configGroup ConfigGroup) { - instDetails := &instrumentationDetails[ProcessGroup, ConfigGroup]{ +func (m *manager[ProcessDetails, ConfigGroup]) startTrackInstrumentation(pid int, inst Instrumentation, processDetails ProcessDetails, configGroup ConfigGroup) { + instDetails := &instrumentationDetails[ProcessDetails, ConfigGroup]{ inst: inst, - pg: processGroup, + pd: processDetails, cg: configGroup, } m.detailsByPid[pid] = instDetails if _, found := m.detailsByWorkload[configGroup]; !found { // first instrumentation for this workload - m.detailsByWorkload[configGroup] = map[int]*instrumentationDetails[ProcessGroup, ConfigGroup]{pid: instDetails} + m.detailsByWorkload[configGroup] = map[int]*instrumentationDetails[ProcessDetails, ConfigGroup]{pid: instDetails} } else { m.detailsByWorkload[configGroup][pid] = instDetails } } -func (m *manager[ProcessGroup, ConfigGroup]) stopTrackInstrumentation(pid int) { +func (m *manager[ProcessDetails, ConfigGroup]) stopTrackInstrumentation(pid int) { details, ok := m.detailsByPid[pid] if !ok { return @@ -339,7 +339,7 @@ func (m *manager[ProcessGroup, ConfigGroup]) stopTrackInstrumentation(pid int) { } } -func (m *manager[ProcessGroup, ConfigGroup]) applyInstrumentationConfigurationForSDK(ctx context.Context, configGroup ConfigGroup, config Config) error { +func (m *manager[ProcessDetails, ConfigGroup]) applyInstrumentationConfigurationForSDK(ctx context.Context, configGroup ConfigGroup, config Config) error { var err error configGroupInstrumentations, ok := m.detailsByWorkload[configGroup] @@ -351,7 +351,7 @@ func (m *manager[ProcessGroup, ConfigGroup]) applyInstrumentationConfigurationFo if instDetails.inst == nil { continue } - m.logger.Info("applying configuration to instrumentation", "process group details", instDetails.pg, "configGroup", configGroup) + m.logger.Info("applying configuration to instrumentation", "process group details", instDetails.pd, "configGroup", configGroup) applyErr := instDetails.inst.ApplyConfig(ctx, config) err = errors.Join(err, applyErr) } diff --git a/instrumentation/types.go b/instrumentation/types.go index 9b734e592..509e394af 100644 --- a/instrumentation/types.go +++ b/instrumentation/types.go @@ -17,13 +17,18 @@ type OtelDistribution struct { OtelSdk common.OtelSdk } -// ProcessGroup represents a group of processes that are managed together by the hosting platform. +// ProcessDetails is used to convert the common process details reported by the detector to details relevant to hosting platform. +// +// ProcessDetails can contain details that associates a process to a group of processes that are managed together by the hosting platform. // It may include different information depending on the platform (Kubernetes, VM, etc). // // For example consider an app which is launched by a bash script, the script launches a python process. // The process may create different child processes, and the bash script may launch multiple python processes. // In this case, the process group may include the bash script, the python process, and the child processes. -type ProcessGroup interface { +// +// Another category of information that may be included relates to language and runtime information which can be used to +// determine the OTel distribution to use. +type ProcessDetails interface { fmt.Stringer } @@ -34,60 +39,60 @@ type ConfigGroup interface { comparable } -// ProcessGroupResolver is used to resolve the process group of a process. -type ProcessGroupResolver[processGroup ProcessGroup] interface { +// ProcessDetailsResolver is used to resolve the process group of a process. +type ProcessDetailsResolver[processDetails ProcessDetails] interface { // Resolve will classify the process into a process group. // Those process group details may be used for future calls when reporting the status of the instrumentation. // or for resolving the configuration group of the process. - Resolve(context.Context, detector.ProcessEvent) (processGroup, error) + Resolve(context.Context, detector.ProcessEvent) (processDetails, error) } // ConfigGroupResolver is used to resolve the configuration group of a process. -type ConfigGroupResolver[processGroup ProcessGroup, configGroup ConfigGroup] interface { +type ConfigGroupResolver[processDetails ProcessDetails, configGroup ConfigGroup] interface { // Resolve will classify the process into a configuration group. // The Otel Distribution is resolved in the time of calling this function, and may be used // to determine the configuration group. - Resolve(context.Context, processGroup, OtelDistribution) (configGroup, error) + Resolve(context.Context, processDetails, OtelDistribution) (configGroup, error) } // Reporter is used to report the status of the instrumentation. // It is called at different stages of the instrumentation lifecycle. -type Reporter[processGroup ProcessGroup] interface { +type Reporter[processDetails ProcessDetails] interface { // OnInit is called when the instrumentation is initialized. // The error parameter will be nil if the instrumentation was initialized successfully. - OnInit(ctx context.Context, pid int, err error, pg processGroup) error + OnInit(ctx context.Context, pid int, err error, pg processDetails) error // OnLoad is called after an instrumentation is loaded successfully or failed to load. // The error parameter will be nil if the instrumentation was loaded successfully. - OnLoad(ctx context.Context, pid int, err error, pg processGroup) error + OnLoad(ctx context.Context, pid int, err error, pg processDetails) error // OnRun is called after the instrumentation stops running. // An error may report a fatal error during the instrumentation run, or a closing error // which happened during the closing of the instrumentation. - OnRun(ctx context.Context, pid int, err error, pg processGroup) error + OnRun(ctx context.Context, pid int, err error, pg processDetails) error // OnExit is called when the instrumented process exits, and the instrumentation has already been stopped. // For a reported which persists the instrumentation state, this is the time to clean up the state. - OnExit(ctx context.Context, pid int, pg processGroup) error + OnExit(ctx context.Context, pid int, pg processDetails) error } // DistributionMatcher is used to match a process to an Otel Distribution. -type DistributionMatcher[processGroup ProcessGroup] interface { +type DistributionMatcher[processDetails ProcessDetails] interface { // Distribution will match a process to an Otel Distribution. - Distribution(context.Context, processGroup) (OtelDistribution, error) + Distribution(context.Context, processDetails) (OtelDistribution, error) } // SettingsGetter is used to fetch the initial settings of an instrumentation. -type SettingsGetter[processGroup ProcessGroup] interface { +type SettingsGetter[processDetails ProcessDetails] interface { // GetSettings will fetch the initial settings of an instrumentation. - Settings(context.Context, processGroup, OtelDistribution) (Settings, error) + Settings(context.Context, processDetails, OtelDistribution) (Settings, error) } // Handler is used to classify, report and configure instrumentations. -type Handler[processGroup ProcessGroup, configGroup comparable] struct { - ProcessGroupResolver ProcessGroupResolver[processGroup] - ConfigGroupResolver ConfigGroupResolver[processGroup, configGroup] - Reporter Reporter[processGroup] - DistributionMatcher DistributionMatcher[processGroup] - SettingsGetter SettingsGetter[processGroup] +type Handler[processDetails ProcessDetails, configGroup comparable] struct { + ProcessDetailsResolver ProcessDetailsResolver[processDetails] + ConfigGroupResolver ConfigGroupResolver[processDetails, configGroup] + Reporter Reporter[processDetails] + DistributionMatcher DistributionMatcher[processDetails] + SettingsGetter SettingsGetter[processDetails] } diff --git a/odiglet/pkg/ebpf/common.go b/odiglet/pkg/ebpf/common.go index b39a390d7..887580823 100644 --- a/odiglet/pkg/ebpf/common.go +++ b/odiglet/pkg/ebpf/common.go @@ -10,7 +10,7 @@ import ( // NewManager creates a new instrumentation manager for eBPF which is configured to work with Kubernetes. func NewManager(client client.Client, logger logr.Logger, factories map[instrumentation.OtelDistribution]instrumentation.Factory, configUpdates <-chan instrumentation.ConfigUpdate[K8sConfigGroup]) (instrumentation.Manager, error) { - managerOpts := instrumentation.ManagerOptions[K8sProcessGroup, K8sConfigGroup]{ + managerOpts := instrumentation.ManagerOptions[K8sProcessDetails, K8sConfigGroup]{ Logger: logger, Factories: factories, Handler: newHandler(client), @@ -26,11 +26,11 @@ func NewManager(client client.Client, logger logr.Logger, factories map[instrume return manager, nil } -func newHandler(client client.Client) *instrumentation.Handler[K8sProcessGroup, K8sConfigGroup] { +func newHandler(client client.Client) *instrumentation.Handler[K8sProcessDetails, K8sConfigGroup] { reporter := &k8sReporter{ client: client, } - processGroupResolver := &k8sDetailsResolver{ + processDetailsResolver := &k8sDetailsResolver{ client: client, } configGroupResolver := &k8sConfigGroupResolver{} @@ -38,11 +38,11 @@ func newHandler(client client.Client) *instrumentation.Handler[K8sProcessGroup, client: client, } distributionMatcher := &podDeviceDistributionMatcher{} - return &instrumentation.Handler[K8sProcessGroup, K8sConfigGroup]{ - ProcessGroupResolver: processGroupResolver, - ConfigGroupResolver: configGroupResolver, - Reporter: reporter, - DistributionMatcher: distributionMatcher, - SettingsGetter: settingsGetter, + return &instrumentation.Handler[K8sProcessDetails, K8sConfigGroup]{ + ProcessDetailsResolver: processDetailsResolver, + ConfigGroupResolver: configGroupResolver, + Reporter: reporter, + DistributionMatcher: distributionMatcher, + SettingsGetter: settingsGetter, } } diff --git a/odiglet/pkg/ebpf/distribution_matcher.go b/odiglet/pkg/ebpf/distribution_matcher.go index 624e31e63..5b7aed12a 100644 --- a/odiglet/pkg/ebpf/distribution_matcher.go +++ b/odiglet/pkg/ebpf/distribution_matcher.go @@ -10,7 +10,7 @@ import ( type podDeviceDistributionMatcher struct{} -func (dm *podDeviceDistributionMatcher) Distribution(ctx context.Context, e K8sProcessGroup) (instrumentation.OtelDistribution, error) { +func (dm *podDeviceDistributionMatcher) Distribution(ctx context.Context, e K8sProcessDetails) (instrumentation.OtelDistribution, error) { // get the language and sdk for this process event // based on the pod spec and the container name from the process event // TODO: We should have all the required information in the process event diff --git a/odiglet/pkg/ebpf/reporter.go b/odiglet/pkg/ebpf/reporter.go index ec52647e6..7cae2f7e4 100644 --- a/odiglet/pkg/ebpf/reporter.go +++ b/odiglet/pkg/ebpf/reporter.go @@ -16,13 +16,13 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) -type K8sProcessGroup struct { +type K8sProcessDetails struct { pod *corev1.Pod containerName string pw *workload.PodWorkload } -func (kd K8sProcessGroup) String() string { +func (kd K8sProcessDetails) String() string { return fmt.Sprintf("Pod: %s.%s, Container: %s, Workload: %s", kd.pod.Name, kd.pod.Namespace, kd.containerName, @@ -30,7 +30,7 @@ func (kd K8sProcessGroup) String() string { ) } -var _ instrumentation.ProcessGroup = K8sProcessGroup{} +var _ instrumentation.ProcessDetails = K8sProcessDetails{} type k8sReporter struct { client client.Client @@ -41,7 +41,7 @@ type K8sConfigGroup struct { Lang common.ProgrammingLanguage } -var _ instrumentation.Reporter[K8sProcessGroup] = &k8sReporter{} +var _ instrumentation.Reporter[K8sProcessDetails] = &k8sReporter{} type errRequiredEnvVarNotFound struct { envVarName string @@ -75,7 +75,7 @@ const ( InstrumentationUnhealthy InstrumentationHealth = false ) -func (r *k8sReporter) OnInit(ctx context.Context, pid int, err error, e K8sProcessGroup) error { +func (r *k8sReporter) OnInit(ctx context.Context, pid int, err error, e K8sProcessDetails) error { if err == nil { // currently we don't report on successful initialization return nil @@ -84,7 +84,7 @@ func (r *k8sReporter) OnInit(ctx context.Context, pid int, err error, e K8sProce return r.updateInstrumentationInstanceStatus(ctx, e, pid, InstrumentationUnhealthy, FailedToInitialize, err.Error()) } -func (r *k8sReporter) OnLoad(ctx context.Context, pid int, err error, e K8sProcessGroup) error { +func (r *k8sReporter) OnLoad(ctx context.Context, pid int, err error, e K8sProcessDetails) error { if err != nil { return r.updateInstrumentationInstanceStatus(ctx, e, pid, InstrumentationUnhealthy, FailedToLoad, err.Error()) } @@ -93,7 +93,7 @@ func (r *k8sReporter) OnLoad(ctx context.Context, pid int, err error, e K8sProce return r.updateInstrumentationInstanceStatus(ctx, e, pid, InstrumentationHealthy, LoadedSuccessfully, msg) } -func (r *k8sReporter) OnRun(ctx context.Context, pid int, err error, e K8sProcessGroup) error { +func (r *k8sReporter) OnRun(ctx context.Context, pid int, err error, e K8sProcessDetails) error { if err == nil { // finished running successfully return nil @@ -102,7 +102,7 @@ func (r *k8sReporter) OnRun(ctx context.Context, pid int, err error, e K8sProces return r.updateInstrumentationInstanceStatus(ctx, e, pid, InstrumentationUnhealthy, FailedToRun, err.Error()) } -func (r *k8sReporter) OnExit(ctx context.Context, pid int, e K8sProcessGroup) error { +func (r *k8sReporter) OnExit(ctx context.Context, pid int, e K8sProcessDetails) error { if err := r.client.Delete(ctx, &odigosv1.InstrumentationInstance{ ObjectMeta: metav1.ObjectMeta{ Name: instance.InstrumentationInstanceName(e.pod.Name, pid), @@ -114,7 +114,7 @@ func (r *k8sReporter) OnExit(ctx context.Context, pid int, e K8sProcessGroup) er return nil } -func (r *k8sReporter) updateInstrumentationInstanceStatus(ctx context.Context, ke K8sProcessGroup, pid int, health InstrumentationHealth, reason InstrumentationStatusReason, msg string) error { +func (r *k8sReporter) updateInstrumentationInstanceStatus(ctx context.Context, ke K8sProcessDetails, pid int, health InstrumentationHealth, reason InstrumentationStatusReason, msg string) error { instrumentedAppName := workload.CalculateWorkloadRuntimeObjectName(ke.pw.Name, ke.pw.Kind) healthy := bool(health) return instance.UpdateInstrumentationInstanceStatus(ctx, ke.pod, ke.containerName, r.client, instrumentedAppName, pid, r.client.Scheme(), diff --git a/odiglet/pkg/ebpf/resolvers.go b/odiglet/pkg/ebpf/resolvers.go index efb654f9e..92f1a6372 100644 --- a/odiglet/pkg/ebpf/resolvers.go +++ b/odiglet/pkg/ebpf/resolvers.go @@ -16,23 +16,23 @@ type k8sDetailsResolver struct { client client.Client } -func (dr *k8sDetailsResolver) Resolve(ctx context.Context, event detector.ProcessEvent) (K8sProcessGroup, error) { +func (dr *k8sDetailsResolver) Resolve(ctx context.Context, event detector.ProcessEvent) (K8sProcessDetails, error) { pod, err := dr.podFromProcEvent(ctx, event) if err != nil { - return K8sProcessGroup{}, err + return K8sProcessDetails{}, err } containerName, found := containerNameFromProcEvent(event) if !found { - return K8sProcessGroup{}, errContainerNameNotReported + return K8sProcessDetails{}, errContainerNameNotReported } podWorkload, err := workload.PodWorkloadObjectOrError(ctx, pod) if err != nil { - return K8sProcessGroup{}, fmt.Errorf("failed to find workload object from pod manifest owners references: %w", err) + return K8sProcessDetails{}, fmt.Errorf("failed to find workload object from pod manifest owners references: %w", err) } - return K8sProcessGroup{ + return K8sProcessDetails{ pod: pod, containerName: containerName, pw: podWorkload, @@ -68,7 +68,7 @@ func containerNameFromProcEvent(event detector.ProcessEvent) (string, bool) { type k8sConfigGroupResolver struct{} -func (cr *k8sConfigGroupResolver) Resolve(ctx context.Context, d K8sProcessGroup, dist instrumentation.OtelDistribution) (K8sConfigGroup, error) { +func (cr *k8sConfigGroupResolver) Resolve(ctx context.Context, d K8sProcessDetails, dist instrumentation.OtelDistribution) (K8sConfigGroup, error) { if d.pw == nil { return K8sConfigGroup{}, fmt.Errorf("podWorkload is not provided, cannot resolve config group") } diff --git a/odiglet/pkg/ebpf/settings_getter.go b/odiglet/pkg/ebpf/settings_getter.go index 40c420006..a10a216ed 100644 --- a/odiglet/pkg/ebpf/settings_getter.go +++ b/odiglet/pkg/ebpf/settings_getter.go @@ -15,9 +15,9 @@ type k8sSettingsGetter struct { client client.Client } -var _ instrumentation.SettingsGetter[K8sProcessGroup] = &k8sSettingsGetter{} +var _ instrumentation.SettingsGetter[K8sProcessDetails] = &k8sSettingsGetter{} -func (ksg *k8sSettingsGetter) Settings(ctx context.Context, kd K8sProcessGroup, dist instrumentation.OtelDistribution) (instrumentation.Settings, error) { +func (ksg *k8sSettingsGetter) Settings(ctx context.Context, kd K8sProcessDetails, dist instrumentation.OtelDistribution) (instrumentation.Settings, error) { sdkConfig, serviceName, err := ksg.instrumentationSDKConfig(ctx, kd, dist) if err != nil { return instrumentation.Settings{}, err @@ -35,7 +35,7 @@ func (ksg *k8sSettingsGetter) Settings(ctx context.Context, kd K8sProcessGroup, }, nil } -func (ksg *k8sSettingsGetter) instrumentationSDKConfig(ctx context.Context, kd K8sProcessGroup, dist instrumentation.OtelDistribution) (*odigosv1.SdkConfig, string, error) { +func (ksg *k8sSettingsGetter) instrumentationSDKConfig(ctx context.Context, kd K8sProcessDetails, dist instrumentation.OtelDistribution) (*odigosv1.SdkConfig, string, error) { instrumentationConfig := odigosv1.InstrumentationConfig{} instrumentationConfigKey := client.ObjectKey{ Namespace: kd.pw.Namespace,